Merge pull request #17756 from reitermarkus/concurrent-downloads
Implement concurrent downloads in `brew fetch`.
This commit is contained in:
commit
c8e8aa5600
@ -78,6 +78,7 @@ end
|
|||||||
|
|
||||||
# vendored gems (no group)
|
# vendored gems (no group)
|
||||||
gem "addressable"
|
gem "addressable"
|
||||||
|
gem "concurrent-ruby"
|
||||||
gem "patchelf"
|
gem "patchelf"
|
||||||
gem "plist"
|
gem "plist"
|
||||||
gem "ruby-macho"
|
gem "ruby-macho"
|
||||||
|
|||||||
@ -9,6 +9,7 @@ GEM
|
|||||||
coderay (1.1.3)
|
coderay (1.1.3)
|
||||||
commander (5.0.0)
|
commander (5.0.0)
|
||||||
highline (~> 3.0.0)
|
highline (~> 3.0.0)
|
||||||
|
concurrent-ruby (1.3.4)
|
||||||
diff-lcs (1.5.1)
|
diff-lcs (1.5.1)
|
||||||
docile (1.4.1)
|
docile (1.4.1)
|
||||||
elftools (1.3.1)
|
elftools (1.3.1)
|
||||||
@ -161,6 +162,7 @@ PLATFORMS
|
|||||||
|
|
||||||
DEPENDENCIES
|
DEPENDENCIES
|
||||||
addressable
|
addressable
|
||||||
|
concurrent-ruby
|
||||||
json_schemer
|
json_schemer
|
||||||
kramdown
|
kramdown
|
||||||
method_source
|
method_source
|
||||||
|
|||||||
@ -12,7 +12,9 @@ module Homebrew
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
class Download < Downloadable
|
class Download
|
||||||
|
include Downloadable
|
||||||
|
|
||||||
sig {
|
sig {
|
||||||
params(
|
params(
|
||||||
url: String,
|
url: String,
|
||||||
@ -29,6 +31,21 @@ module Homebrew
|
|||||||
@cache = cache
|
@cache = cache
|
||||||
end
|
end
|
||||||
|
|
||||||
|
sig { override.returns(API::DownloadStrategy) }
|
||||||
|
def downloader
|
||||||
|
T.cast(super, API::DownloadStrategy)
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { override.returns(String) }
|
||||||
|
def name
|
||||||
|
download_name
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { override.returns(String) }
|
||||||
|
def download_type
|
||||||
|
"API source"
|
||||||
|
end
|
||||||
|
|
||||||
sig { override.returns(Pathname) }
|
sig { override.returns(Pathname) }
|
||||||
def cache
|
def cache
|
||||||
@cache || super
|
@cache || super
|
||||||
@ -36,7 +53,7 @@ module Homebrew
|
|||||||
|
|
||||||
sig { returns(Pathname) }
|
sig { returns(Pathname) }
|
||||||
def symlink_location
|
def symlink_location
|
||||||
T.cast(downloader, API::DownloadStrategy).symlink_location
|
downloader.symlink_location
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@ -8,7 +8,9 @@ require "cask/quarantine"
|
|||||||
|
|
||||||
module Cask
|
module Cask
|
||||||
# A download corresponding to a {Cask}.
|
# A download corresponding to a {Cask}.
|
||||||
class Download < ::Downloadable
|
class Download
|
||||||
|
include Downloadable
|
||||||
|
|
||||||
include Context
|
include Context
|
||||||
|
|
||||||
attr_reader :cask
|
attr_reader :cask
|
||||||
@ -20,6 +22,11 @@ module Cask
|
|||||||
@quarantine = quarantine
|
@quarantine = quarantine
|
||||||
end
|
end
|
||||||
|
|
||||||
|
sig { override.returns(String) }
|
||||||
|
def name
|
||||||
|
cask.token
|
||||||
|
end
|
||||||
|
|
||||||
sig { override.returns(T.nilable(::URL)) }
|
sig { override.returns(T.nilable(::URL)) }
|
||||||
def url
|
def url
|
||||||
return if cask.url.nil?
|
return if cask.url.nil?
|
||||||
@ -88,6 +95,11 @@ module Cask
|
|||||||
cask.token
|
cask.token
|
||||||
end
|
end
|
||||||
|
|
||||||
|
sig { override.returns(String) }
|
||||||
|
def download_type
|
||||||
|
"cask"
|
||||||
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def quarantine(path)
|
def quarantine(path)
|
||||||
|
|||||||
@ -5,6 +5,7 @@ require "abstract_command"
|
|||||||
require "formula"
|
require "formula"
|
||||||
require "fetch"
|
require "fetch"
|
||||||
require "cask/download"
|
require "cask/download"
|
||||||
|
require "retryable_download"
|
||||||
|
|
||||||
module Homebrew
|
module Homebrew
|
||||||
module Cmd
|
module Cmd
|
||||||
@ -25,6 +26,7 @@ module Homebrew
|
|||||||
"(Pass `all` to download for all architectures.)"
|
"(Pass `all` to download for all architectures.)"
|
||||||
flag "--bottle-tag=",
|
flag "--bottle-tag=",
|
||||||
description: "Download a bottle for given tag."
|
description: "Download a bottle for given tag."
|
||||||
|
flag "--concurrency=", description: "Number of concurrent downloads.", hidden: true
|
||||||
switch "--HEAD",
|
switch "--HEAD",
|
||||||
description: "Fetch HEAD version instead of stable version."
|
description: "Fetch HEAD version instead of stable version."
|
||||||
switch "-f", "--force",
|
switch "-f", "--force",
|
||||||
@ -67,6 +69,49 @@ module Homebrew
|
|||||||
named_args [:formula, :cask], min: 1
|
named_args [:formula, :cask], min: 1
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def concurrency
|
||||||
|
@concurrency ||= args.concurrency&.to_i || 1
|
||||||
|
end
|
||||||
|
|
||||||
|
def download_queue
|
||||||
|
@download_queue ||= begin
|
||||||
|
require "download_queue"
|
||||||
|
DownloadQueue.new(concurrency)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
class Spinner
|
||||||
|
FRAMES = [
|
||||||
|
"⠋",
|
||||||
|
"⠙",
|
||||||
|
"⠚",
|
||||||
|
"⠞",
|
||||||
|
"⠖",
|
||||||
|
"⠦",
|
||||||
|
"⠴",
|
||||||
|
"⠲",
|
||||||
|
"⠳",
|
||||||
|
"⠓",
|
||||||
|
].freeze
|
||||||
|
|
||||||
|
sig { void }
|
||||||
|
def initialize
|
||||||
|
@start = Time.now
|
||||||
|
@i = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { returns(String) }
|
||||||
|
def to_s
|
||||||
|
now = Time.now
|
||||||
|
if @start + 0.1 < now
|
||||||
|
@start = now
|
||||||
|
@i = (@i + 1) % FRAMES.count
|
||||||
|
end
|
||||||
|
|
||||||
|
FRAMES.fetch(@i)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
sig { override.void }
|
sig { override.void }
|
||||||
def run
|
def run
|
||||||
Formulary.enable_factory_cache!
|
Formulary.enable_factory_cache!
|
||||||
@ -125,13 +170,10 @@ module Homebrew
|
|||||||
next
|
next
|
||||||
end
|
end
|
||||||
|
|
||||||
begin
|
if (manifest_resource = bottle.github_packages_manifest_resource)
|
||||||
bottle.fetch_tab
|
fetch_downloadable(manifest_resource)
|
||||||
rescue DownloadError
|
|
||||||
retry if retry_fetch?(bottle)
|
|
||||||
raise
|
|
||||||
end
|
end
|
||||||
fetch_formula(bottle)
|
fetch_downloadable(bottle)
|
||||||
rescue Interrupt
|
rescue Interrupt
|
||||||
raise
|
raise
|
||||||
rescue => e
|
rescue => e
|
||||||
@ -147,14 +189,14 @@ module Homebrew
|
|||||||
|
|
||||||
next if fetched_bottle
|
next if fetched_bottle
|
||||||
|
|
||||||
fetch_formula(formula)
|
fetch_downloadable(formula.resource)
|
||||||
|
|
||||||
formula.resources.each do |r|
|
formula.resources.each do |r|
|
||||||
fetch_resource(r)
|
fetch_downloadable(r)
|
||||||
r.patches.each { |p| fetch_patch(p) if p.external? }
|
r.patches.each { |patch| fetch_downloadable(patch.resource) if patch.external? }
|
||||||
end
|
end
|
||||||
|
|
||||||
formula.patchlist.each { |p| fetch_patch(p) if p.external? }
|
formula.patchlist.each { |patch| fetch_downloadable(patch.resource) if patch.external? }
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
@ -176,81 +218,119 @@ module Homebrew
|
|||||||
quarantine = true if quarantine.nil?
|
quarantine = true if quarantine.nil?
|
||||||
|
|
||||||
download = Cask::Download.new(cask, quarantine:)
|
download = Cask::Download.new(cask, quarantine:)
|
||||||
fetch_cask(download)
|
fetch_downloadable(download)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
if concurrency == 1
|
||||||
|
downloads.each do |downloadable, promise|
|
||||||
|
promise.wait!
|
||||||
|
rescue ChecksumMismatchError => e
|
||||||
|
opoo "#{downloadable.download_type.capitalize} reports different checksum: #{e.expected}"
|
||||||
|
Homebrew.failed = true if downloadable.is_a?(Resource::Patch)
|
||||||
|
end
|
||||||
|
else
|
||||||
|
spinner = Spinner.new
|
||||||
|
remaining_downloads = downloads.dup
|
||||||
|
previous_pending_line_count = 0
|
||||||
|
|
||||||
|
begin
|
||||||
|
$stdout.print Tty.hide_cursor
|
||||||
|
$stdout.flush
|
||||||
|
|
||||||
|
output_message = lambda do |downloadable, future|
|
||||||
|
status = case future.state
|
||||||
|
when :fulfilled
|
||||||
|
"#{Tty.green}✔︎#{Tty.reset}"
|
||||||
|
when :rejected
|
||||||
|
"#{Tty.red}✘#{Tty.reset}"
|
||||||
|
when :pending, :processing
|
||||||
|
"#{Tty.blue}#{spinner}#{Tty.reset}"
|
||||||
|
else
|
||||||
|
raise future.state.to_s
|
||||||
|
end
|
||||||
|
|
||||||
|
message = "#{downloadable.download_type.capitalize} #{downloadable.name}"
|
||||||
|
$stdout.puts "#{status} #{message}"
|
||||||
|
$stdout.flush
|
||||||
|
|
||||||
|
if future.rejected? && (e = future.reason).is_a?(ChecksumMismatchError)
|
||||||
|
opoo "#{downloadable.download_type.capitalize} reports different checksum: #{e.expected}"
|
||||||
|
Homebrew.failed = true if downloadable.is_a?(Resource::Patch)
|
||||||
|
next 2
|
||||||
|
end
|
||||||
|
|
||||||
|
1
|
||||||
|
end
|
||||||
|
|
||||||
|
until remaining_downloads.empty?
|
||||||
|
begin
|
||||||
|
finished_states = [:fulfilled, :rejected]
|
||||||
|
|
||||||
|
finished_downloads, remaining_downloads = remaining_downloads.partition do |_, future|
|
||||||
|
finished_states.include?(future.state)
|
||||||
|
end
|
||||||
|
|
||||||
|
finished_downloads.each do |downloadable, future|
|
||||||
|
previous_pending_line_count -= 1
|
||||||
|
$stdout.print Tty.clear_to_end
|
||||||
|
$stdout.flush
|
||||||
|
output_message.call(downloadable, future)
|
||||||
|
end
|
||||||
|
|
||||||
|
previous_pending_line_count = 0
|
||||||
|
remaining_downloads.each do |downloadable, future|
|
||||||
|
# FIXME: Allow printing full terminal height.
|
||||||
|
break if previous_pending_line_count >= [concurrency, (Tty.height - 1)].min
|
||||||
|
|
||||||
|
$stdout.print Tty.clear_to_end
|
||||||
|
$stdout.flush
|
||||||
|
previous_pending_line_count += output_message.call(downloadable, future)
|
||||||
|
end
|
||||||
|
|
||||||
|
if previous_pending_line_count.positive?
|
||||||
|
$stdout.print Tty.move_cursor_up_beginning(previous_pending_line_count)
|
||||||
|
$stdout.flush
|
||||||
|
end
|
||||||
|
|
||||||
|
sleep 0.05
|
||||||
|
rescue Interrupt
|
||||||
|
remaining_downloads.each do |_, future|
|
||||||
|
# FIXME: Implement cancellation of running downloads.
|
||||||
|
end
|
||||||
|
|
||||||
|
download_queue.cancel
|
||||||
|
|
||||||
|
if previous_pending_line_count.positive?
|
||||||
|
$stdout.print Tty.move_cursor_down(previous_pending_line_count - 1)
|
||||||
|
$stdout.flush
|
||||||
|
end
|
||||||
|
|
||||||
|
raise
|
||||||
|
end
|
||||||
|
end
|
||||||
|
ensure
|
||||||
|
$stdout.print Tty.show_cursor
|
||||||
|
$stdout.flush
|
||||||
|
end
|
||||||
|
end
|
||||||
|
ensure
|
||||||
|
download_queue.shutdown
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def fetch_resource(resource)
|
def downloads
|
||||||
puts "Resource: #{resource.name}"
|
@downloads ||= {}
|
||||||
fetch_fetchable resource
|
|
||||||
rescue ChecksumMismatchError => e
|
|
||||||
retry if retry_fetch?(resource)
|
|
||||||
opoo "Resource #{resource.name} reports different sha256: #{e.expected}"
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def fetch_formula(formula)
|
def fetch_downloadable(downloadable)
|
||||||
fetch_fetchable(formula)
|
downloads[downloadable] ||= begin
|
||||||
rescue ChecksumMismatchError => e
|
tries = args.retry? ? {} : { tries: 1 }
|
||||||
retry if retry_fetch?(formula)
|
download_queue.enqueue(RetryableDownload.new(downloadable, **tries), force: args.force?)
|
||||||
opoo "Formula reports different sha256: #{e.expected}"
|
end
|
||||||
end
|
|
||||||
|
|
||||||
def fetch_cask(cask_download)
|
|
||||||
fetch_fetchable(cask_download)
|
|
||||||
rescue ChecksumMismatchError => e
|
|
||||||
retry if retry_fetch?(cask_download)
|
|
||||||
opoo "Cask reports different sha256: #{e.expected}"
|
|
||||||
end
|
|
||||||
|
|
||||||
def fetch_patch(patch)
|
|
||||||
fetch_fetchable(patch)
|
|
||||||
rescue ChecksumMismatchError => e
|
|
||||||
opoo "Patch reports different sha256: #{e.expected}"
|
|
||||||
Homebrew.failed = true
|
|
||||||
end
|
|
||||||
|
|
||||||
def retry_fetch?(formula)
|
|
||||||
@fetch_tries ||= Hash.new { |h, k| h[k] = 1 }
|
|
||||||
if args.retry? && (@fetch_tries[formula] < FETCH_MAX_TRIES)
|
|
||||||
wait = 2 ** @fetch_tries[formula]
|
|
||||||
remaining = FETCH_MAX_TRIES - @fetch_tries[formula]
|
|
||||||
what = Utils.pluralize("tr", remaining, plural: "ies", singular: "y")
|
|
||||||
|
|
||||||
ohai "Retrying download in #{wait}s... (#{remaining} #{what} left)"
|
|
||||||
sleep wait
|
|
||||||
|
|
||||||
formula.clear_cache
|
|
||||||
@fetch_tries[formula] += 1
|
|
||||||
true
|
|
||||||
else
|
|
||||||
Homebrew.failed = true
|
|
||||||
false
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def fetch_fetchable(formula)
|
|
||||||
formula.clear_cache if args.force?
|
|
||||||
|
|
||||||
already_fetched = formula.cached_download.exist?
|
|
||||||
|
|
||||||
begin
|
|
||||||
download = formula.fetch(verify_download_integrity: false)
|
|
||||||
rescue DownloadError
|
|
||||||
retry if retry_fetch?(formula)
|
|
||||||
raise
|
|
||||||
end
|
|
||||||
|
|
||||||
return unless download.file?
|
|
||||||
|
|
||||||
puts "Downloaded to: #{download}" unless already_fetched
|
|
||||||
puts "SHA256: #{download.sha256}"
|
|
||||||
|
|
||||||
formula.verify_download_integrity(download)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@ -207,7 +207,6 @@ module Homebrew
|
|||||||
end
|
end
|
||||||
|
|
||||||
if casks.any?
|
if casks.any?
|
||||||
|
|
||||||
if args.dry_run?
|
if args.dry_run?
|
||||||
if (casks_to_install = casks.reject(&:installed?).presence)
|
if (casks_to_install = casks.reject(&:installed?).presence)
|
||||||
ohai "Would install #{::Utils.pluralize("cask", casks_to_install.count, include_count: true)}:"
|
ohai "Would install #{::Utils.pluralize("cask", casks_to_install.count, include_count: true)}:"
|
||||||
|
|||||||
45
Library/Homebrew/download_queue.rb
Normal file
45
Library/Homebrew/download_queue.rb
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# typed: true # rubocop:todo Sorbet/StrictSigil
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
require "downloadable"
|
||||||
|
require "concurrent/promises"
|
||||||
|
require "concurrent/executors"
|
||||||
|
|
||||||
|
module Homebrew
|
||||||
|
class DownloadQueue
|
||||||
|
sig { returns(Concurrent::FixedThreadPool) }
|
||||||
|
attr_reader :pool
|
||||||
|
private :pool
|
||||||
|
|
||||||
|
sig { params(size: Integer).void }
|
||||||
|
def initialize(size = 1)
|
||||||
|
@pool = Concurrent::FixedThreadPool.new(size)
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { params(downloadable: Downloadable, force: T::Boolean).returns(Concurrent::Promises::Future) }
|
||||||
|
def enqueue(downloadable, force: false)
|
||||||
|
quiet = pool.max_length > 1
|
||||||
|
# Passing in arguments from outside into the future is a common `concurrent-ruby` pattern.
|
||||||
|
# rubocop:disable Lint/ShadowingOuterLocalVariable
|
||||||
|
Concurrent::Promises.future_on(pool, downloadable, force, quiet) do |downloadable, force, quiet|
|
||||||
|
downloadable.clear_cache if force
|
||||||
|
downloadable.fetch(quiet:)
|
||||||
|
end
|
||||||
|
# rubocop:enable Lint/ShadowingOuterLocalVariable
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { void }
|
||||||
|
def cancel
|
||||||
|
# FIXME: Implement graceful cancellaction of running downloads based on
|
||||||
|
# https://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent/Cancellation.html
|
||||||
|
# instead of killing the whole thread pool.
|
||||||
|
pool.kill
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { void }
|
||||||
|
def shutdown
|
||||||
|
pool.shutdown
|
||||||
|
pool.wait_for_termination
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -716,6 +716,10 @@ class LocalBottleDownloadStrategy < AbstractFileDownloadStrategy
|
|||||||
@cached_location = path
|
@cached_location = path
|
||||||
extend Pourable
|
extend Pourable
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def clear_cache
|
||||||
|
# Path is used directly and not cached.
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
# Strategy for downloading a Subversion repository.
|
# Strategy for downloading a Subversion repository.
|
||||||
|
|||||||
@ -5,19 +5,19 @@ require "url"
|
|||||||
require "checksum"
|
require "checksum"
|
||||||
require "download_strategy"
|
require "download_strategy"
|
||||||
|
|
||||||
class Downloadable
|
module Downloadable
|
||||||
include Context
|
include Context
|
||||||
extend T::Helpers
|
extend T::Helpers
|
||||||
|
|
||||||
abstract!
|
abstract!
|
||||||
|
|
||||||
sig { returns(T.nilable(URL)) }
|
sig { overridable.returns(T.nilable(URL)) }
|
||||||
attr_reader :url
|
attr_reader :url
|
||||||
|
|
||||||
sig { returns(T.nilable(Checksum)) }
|
sig { overridable.returns(T.nilable(Checksum)) }
|
||||||
attr_reader :checksum
|
attr_reader :checksum
|
||||||
|
|
||||||
sig { returns(T::Array[String]) }
|
sig { overridable.returns(T::Array[String]) }
|
||||||
attr_reader :mirrors
|
attr_reader :mirrors
|
||||||
|
|
||||||
sig { void }
|
sig { void }
|
||||||
@ -32,7 +32,7 @@ class Downloadable
|
|||||||
@version = @version.dup
|
@version = @version.dup
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { override.returns(T.self_type) }
|
sig { overridable.returns(T.self_type) }
|
||||||
def freeze
|
def freeze
|
||||||
@checksum.freeze
|
@checksum.freeze
|
||||||
@mirrors.freeze
|
@mirrors.freeze
|
||||||
@ -40,22 +40,30 @@ class Downloadable
|
|||||||
super
|
super
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { returns(T::Boolean) }
|
sig { abstract.returns(String) }
|
||||||
|
def name; end
|
||||||
|
|
||||||
|
sig { returns(String) }
|
||||||
|
def download_type
|
||||||
|
T.must(self.class.name&.split("::")&.last).gsub(/([[:lower:]])([[:upper:]])/, '\1 \2').downcase
|
||||||
|
end
|
||||||
|
|
||||||
|
sig(:final) { returns(T::Boolean) }
|
||||||
def downloaded?
|
def downloaded?
|
||||||
cached_download.exist?
|
cached_download.exist?
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { returns(Pathname) }
|
sig { overridable.returns(Pathname) }
|
||||||
def cached_download
|
def cached_download
|
||||||
downloader.cached_location
|
downloader.cached_location
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { void }
|
sig { overridable.void }
|
||||||
def clear_cache
|
def clear_cache
|
||||||
downloader.clear_cache
|
downloader.clear_cache
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { returns(T.nilable(Version)) }
|
sig { overridable.returns(T.nilable(Version)) }
|
||||||
def version
|
def version
|
||||||
return @version if @version && !@version.null?
|
return @version if @version && !@version.null?
|
||||||
|
|
||||||
@ -63,27 +71,34 @@ class Downloadable
|
|||||||
version unless version&.null?
|
version unless version&.null?
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { returns(T.class_of(AbstractDownloadStrategy)) }
|
sig { overridable.returns(T.class_of(AbstractDownloadStrategy)) }
|
||||||
def download_strategy
|
def download_strategy
|
||||||
@download_strategy ||= determine_url&.download_strategy
|
@download_strategy ||= determine_url&.download_strategy
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { returns(AbstractDownloadStrategy) }
|
sig { overridable.returns(AbstractDownloadStrategy) }
|
||||||
def downloader
|
def downloader
|
||||||
@downloader ||= begin
|
@downloader ||= begin
|
||||||
primary_url, *mirrors = determine_url_mirrors
|
primary_url, *mirrors = determine_url_mirrors
|
||||||
raise ArgumentError, "attempted to use a Downloadable without a URL!" if primary_url.blank?
|
raise ArgumentError, "attempted to use a `Downloadable` without a URL!" if primary_url.blank?
|
||||||
|
|
||||||
download_strategy.new(primary_url, download_name, version,
|
download_strategy.new(primary_url, download_name, version,
|
||||||
mirrors:, cache:, **T.must(@url).specs)
|
mirrors:, cache:, **T.must(@url).specs)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { params(verify_download_integrity: T::Boolean, timeout: T.nilable(T.any(Integer, Float))).returns(Pathname) }
|
sig {
|
||||||
def fetch(verify_download_integrity: true, timeout: nil)
|
overridable.params(
|
||||||
|
verify_download_integrity: T::Boolean,
|
||||||
|
timeout: T.nilable(T.any(Integer, Float)),
|
||||||
|
quiet: T::Boolean,
|
||||||
|
).returns(Pathname)
|
||||||
|
}
|
||||||
|
def fetch(verify_download_integrity: true, timeout: nil, quiet: false)
|
||||||
cache.mkpath
|
cache.mkpath
|
||||||
|
|
||||||
begin
|
begin
|
||||||
|
downloader.quiet! if quiet
|
||||||
downloader.fetch(timeout:)
|
downloader.fetch(timeout:)
|
||||||
rescue ErrorDuringExecution, CurlDownloadStrategyError => e
|
rescue ErrorDuringExecution, CurlDownloadStrategyError => e
|
||||||
raise DownloadError.new(self, e)
|
raise DownloadError.new(self, e)
|
||||||
@ -94,7 +109,7 @@ class Downloadable
|
|||||||
download
|
download
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { params(filename: Pathname).void }
|
sig { overridable.params(filename: Pathname).void }
|
||||||
def verify_download_integrity(filename)
|
def verify_download_integrity(filename)
|
||||||
if filename.file?
|
if filename.file?
|
||||||
ohai "Verifying checksum for '#{filename.basename}'" if verbose?
|
ohai "Verifying checksum for '#{filename.basename}'" if verbose?
|
||||||
@ -111,7 +126,7 @@ class Downloadable
|
|||||||
|
|
||||||
sig { overridable.returns(String) }
|
sig { overridable.returns(String) }
|
||||||
def download_name
|
def download_name
|
||||||
File.basename(determine_url.to_s)
|
@download_name ||= File.basename(determine_url.to_s)
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|||||||
5
Library/Homebrew/downloadable.rbi
Normal file
5
Library/Homebrew/downloadable.rbi
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# typed: strict
|
||||||
|
|
||||||
|
module Downloadable
|
||||||
|
requires_ancestor { Kernel }
|
||||||
|
end
|
||||||
@ -567,7 +567,13 @@ class Formula
|
|||||||
params(name: String, klass: T.class_of(Resource), block: T.nilable(T.proc.bind(Resource).void))
|
params(name: String, klass: T.class_of(Resource), block: T.nilable(T.proc.bind(Resource).void))
|
||||||
.returns(T.nilable(Resource))
|
.returns(T.nilable(Resource))
|
||||||
}
|
}
|
||||||
def resource(name, klass = Resource, &block) = active_spec.resource(name, klass, &block)
|
def resource(name = T.unsafe(nil), klass = T.unsafe(nil), &block)
|
||||||
|
if klass.nil?
|
||||||
|
active_spec.resource(*name, &block)
|
||||||
|
else
|
||||||
|
active_spec.resource(name, klass, &block)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
# Old names for the formula.
|
# Old names for the formula.
|
||||||
#
|
#
|
||||||
@ -2765,11 +2771,20 @@ class Formula
|
|||||||
self.class.on_system_blocks_exist? || @on_system_blocks_exist
|
self.class.on_system_blocks_exist? || @on_system_blocks_exist
|
||||||
end
|
end
|
||||||
|
|
||||||
def fetch(verify_download_integrity: true)
|
sig {
|
||||||
active_spec.fetch(verify_download_integrity:)
|
params(
|
||||||
|
verify_download_integrity: T::Boolean,
|
||||||
|
timeout: T.nilable(T.any(Integer, Float)),
|
||||||
|
quiet: T::Boolean,
|
||||||
|
).returns(Pathname)
|
||||||
|
}
|
||||||
|
def fetch(verify_download_integrity: true, timeout: nil, quiet: false)
|
||||||
|
# odeprecated "Formula#fetch", "Resource#fetch on Formula#resource"
|
||||||
|
active_spec.fetch(verify_download_integrity:, timeout:, quiet:)
|
||||||
end
|
end
|
||||||
|
|
||||||
def verify_download_integrity(filename)
|
def verify_download_integrity(filename)
|
||||||
|
# odeprecated "Formula#verify_download_integrity", "Resource#verify_download_integrity on Formula#resource"
|
||||||
active_spec.verify_download_integrity(filename)
|
active_spec.verify_download_integrity(filename)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|||||||
@ -1254,18 +1254,18 @@ on_request: installed_on_request?, options:)
|
|||||||
formula.fetch_patches
|
formula.fetch_patches
|
||||||
formula.resources.each(&:fetch)
|
formula.resources.each(&:fetch)
|
||||||
end
|
end
|
||||||
downloader.fetch
|
downloadable.downloader.fetch
|
||||||
|
|
||||||
self.class.fetched << formula
|
self.class.fetched << formula
|
||||||
end
|
end
|
||||||
|
|
||||||
def downloader
|
def downloadable
|
||||||
if (bottle_path = formula.local_bottle_path)
|
if (bottle_path = formula.local_bottle_path)
|
||||||
LocalBottleDownloadStrategy.new(bottle_path)
|
Resource::Local.new(bottle_path)
|
||||||
elsif pour_bottle?
|
elsif pour_bottle?
|
||||||
formula.bottle
|
formula.bottle
|
||||||
else
|
else
|
||||||
formula
|
formula.resource
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -1324,7 +1324,7 @@ on_request: installed_on_request?, options:)
|
|||||||
end
|
end
|
||||||
|
|
||||||
HOMEBREW_CELLAR.cd do
|
HOMEBREW_CELLAR.cd do
|
||||||
downloader.stage
|
downloadable.downloader.stage
|
||||||
end
|
end
|
||||||
|
|
||||||
Tab.clear_cache
|
Tab.clear_cache
|
||||||
|
|||||||
@ -106,7 +106,7 @@ class ExternalPatch
|
|||||||
|
|
||||||
def initialize(strip, &block)
|
def initialize(strip, &block)
|
||||||
@strip = strip
|
@strip = strip
|
||||||
@resource = Resource::PatchResource.new(&block)
|
@resource = Resource::Patch.new(&block)
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { returns(T::Boolean) }
|
sig { returns(T::Boolean) }
|
||||||
|
|||||||
@ -9,7 +9,8 @@ require "extend/on_system"
|
|||||||
# Resource is the fundamental representation of an external resource. The
|
# Resource is the fundamental representation of an external resource. The
|
||||||
# primary formula download, along with other declared resources, are instances
|
# primary formula download, along with other declared resources, are instances
|
||||||
# of this class.
|
# of this class.
|
||||||
class Resource < Downloadable
|
class Resource
|
||||||
|
include Downloadable
|
||||||
include FileUtils
|
include FileUtils
|
||||||
include OnSystem::MacOSAndLinux
|
include OnSystem::MacOSAndLinux
|
||||||
|
|
||||||
@ -140,7 +141,15 @@ class Resource < Downloadable
|
|||||||
Partial.new(self, files)
|
Partial.new(self, files)
|
||||||
end
|
end
|
||||||
|
|
||||||
def fetch(verify_download_integrity: true)
|
sig {
|
||||||
|
override
|
||||||
|
.params(
|
||||||
|
verify_download_integrity: T::Boolean,
|
||||||
|
timeout: T.nilable(T.any(Integer, Float)),
|
||||||
|
quiet: T::Boolean,
|
||||||
|
).returns(Pathname)
|
||||||
|
}
|
||||||
|
def fetch(verify_download_integrity: true, timeout: nil, quiet: false)
|
||||||
fetch_patches
|
fetch_patches
|
||||||
|
|
||||||
super
|
super
|
||||||
@ -194,7 +203,7 @@ class Resource < Downloadable
|
|||||||
@download_strategy = @url.download_strategy
|
@download_strategy = @url.download_strategy
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { params(val: T.nilable(T.any(String, Version))).returns(T.nilable(Version)) }
|
sig { override.params(val: T.nilable(T.any(String, Version))).returns(T.nilable(Version)) }
|
||||||
def version(val = nil)
|
def version(val = nil)
|
||||||
return super() if val.nil?
|
return super() if val.nil?
|
||||||
|
|
||||||
@ -211,7 +220,7 @@ class Resource < Downloadable
|
|||||||
end
|
end
|
||||||
|
|
||||||
def patch(strip = :p1, src = nil, &block)
|
def patch(strip = :p1, src = nil, &block)
|
||||||
p = Patch.create(strip, src, &block)
|
p = ::Patch.create(strip, src, &block)
|
||||||
patches << p
|
patches << p
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -260,6 +269,27 @@ class Resource < Downloadable
|
|||||||
[*extra_urls, *super].uniq
|
[*extra_urls, *super].uniq
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# A local resource that doesn't need to be downloaded.
|
||||||
|
class Local < Resource
|
||||||
|
def initialize(path)
|
||||||
|
super(File.basename(path))
|
||||||
|
@downloader = LocalBottleDownloadStrategy.new(path)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# A resource for a formula.
|
||||||
|
class Formula < Resource
|
||||||
|
sig { override.returns(String) }
|
||||||
|
def name
|
||||||
|
T.must(owner).name
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { override.returns(String) }
|
||||||
|
def download_name
|
||||||
|
name
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
# A resource containing a Go package.
|
# A resource containing a Go package.
|
||||||
class Go < Resource
|
class Go < Resource
|
||||||
def stage(target, &block)
|
def stage(target, &block)
|
||||||
@ -320,7 +350,7 @@ class Resource < Downloadable
|
|||||||
end
|
end
|
||||||
|
|
||||||
# A resource containing a patch.
|
# A resource containing a patch.
|
||||||
class PatchResource < Resource
|
class Patch < Resource
|
||||||
attr_reader :patch_files
|
attr_reader :patch_files
|
||||||
|
|
||||||
def initialize(&block)
|
def initialize(&block)
|
||||||
|
|||||||
96
Library/Homebrew/retryable_download.rb
Normal file
96
Library/Homebrew/retryable_download.rb
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
# typed: true # rubocop:todo Sorbet/StrictSigil
|
||||||
|
# frozen_string_literal: true
|
||||||
|
|
||||||
|
module Homebrew
|
||||||
|
class RetryableDownload
|
||||||
|
include Downloadable
|
||||||
|
|
||||||
|
sig { returns(Downloadable) }
|
||||||
|
attr_reader :downloadable
|
||||||
|
private :downloadable
|
||||||
|
|
||||||
|
sig { override.returns(T.nilable(URL)) }
|
||||||
|
def url = downloadable.url
|
||||||
|
|
||||||
|
sig { override.returns(T.nilable(Checksum)) }
|
||||||
|
def checksum = downloadable.checksum
|
||||||
|
|
||||||
|
sig { override.returns(T::Array[String]) }
|
||||||
|
def mirrors = downloadable.mirrors
|
||||||
|
|
||||||
|
sig { params(downloadable: Downloadable, tries: Integer).void }
|
||||||
|
def initialize(downloadable, tries: 3)
|
||||||
|
super()
|
||||||
|
|
||||||
|
@downloadable = downloadable
|
||||||
|
@try = 0
|
||||||
|
@tries = tries
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { override.returns(String) }
|
||||||
|
def name = downloadable.name
|
||||||
|
|
||||||
|
sig { override.returns(String) }
|
||||||
|
def download_type = downloadable.download_type
|
||||||
|
|
||||||
|
sig { override.returns(Pathname) }
|
||||||
|
def cached_download = downloadable.cached_download
|
||||||
|
|
||||||
|
sig { override.void }
|
||||||
|
def clear_cache = downloadable.clear_cache
|
||||||
|
|
||||||
|
sig { override.returns(T.nilable(Version)) }
|
||||||
|
def version = downloadable.version
|
||||||
|
|
||||||
|
sig { override.returns(T.class_of(AbstractDownloadStrategy)) }
|
||||||
|
def download_strategy = downloadable.download_strategy
|
||||||
|
|
||||||
|
sig { override.returns(AbstractDownloadStrategy) }
|
||||||
|
def downloader = downloadable.downloader
|
||||||
|
|
||||||
|
sig {
|
||||||
|
override.params(
|
||||||
|
verify_download_integrity: T::Boolean,
|
||||||
|
timeout: T.nilable(T.any(Integer, Float)),
|
||||||
|
quiet: T::Boolean,
|
||||||
|
).returns(Pathname)
|
||||||
|
}
|
||||||
|
def fetch(verify_download_integrity: true, timeout: nil, quiet: false)
|
||||||
|
@try += 1
|
||||||
|
|
||||||
|
already_downloaded = downloadable.downloaded?
|
||||||
|
|
||||||
|
download = downloadable.fetch(verify_download_integrity: false, timeout:, quiet:)
|
||||||
|
|
||||||
|
return download unless download.file?
|
||||||
|
|
||||||
|
unless quiet
|
||||||
|
puts "Downloaded to: #{download}" unless already_downloaded
|
||||||
|
puts "SHA256: #{download.sha256}"
|
||||||
|
end
|
||||||
|
|
||||||
|
downloadable.verify_download_integrity(download) if verify_download_integrity
|
||||||
|
|
||||||
|
download
|
||||||
|
rescue DownloadError, ChecksumMismatchError
|
||||||
|
tries_remaining = @tries - @try
|
||||||
|
raise if tries_remaining.zero?
|
||||||
|
|
||||||
|
wait = 2 ** @try
|
||||||
|
unless quiet
|
||||||
|
what = Utils.pluralize("tr", tries_remaining, plural: "ies", singular: "y")
|
||||||
|
ohai "Retrying download in #{wait}s... (#{tries_remaining} #{what} left)"
|
||||||
|
end
|
||||||
|
sleep wait
|
||||||
|
|
||||||
|
downloadable.clear_cache
|
||||||
|
retry
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { override.params(filename: Pathname).void }
|
||||||
|
def verify_download_integrity(filename) = downloadable.verify_download_integrity(filename)
|
||||||
|
|
||||||
|
sig { override.returns(String) }
|
||||||
|
def download_name = downloadable.download_name
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -16,6 +16,8 @@ require "macos_version"
|
|||||||
require "extend/on_system"
|
require "extend/on_system"
|
||||||
|
|
||||||
class SoftwareSpec
|
class SoftwareSpec
|
||||||
|
include Downloadable
|
||||||
|
|
||||||
extend Forwardable
|
extend Forwardable
|
||||||
include OnSystem::MacOSAndLinux
|
include OnSystem::MacOSAndLinux
|
||||||
|
|
||||||
@ -34,8 +36,10 @@ class SoftwareSpec
|
|||||||
def_delegators :@resource, :sha256
|
def_delegators :@resource, :sha256
|
||||||
|
|
||||||
def initialize(flags: [])
|
def initialize(flags: [])
|
||||||
|
super()
|
||||||
|
|
||||||
# Ensure this is synced with `initialize_dup` and `freeze` (excluding simple objects like integers and booleans)
|
# Ensure this is synced with `initialize_dup` and `freeze` (excluding simple objects like integers and booleans)
|
||||||
@resource = Resource.new
|
@resource = Resource::Formula.new
|
||||||
@resources = {}
|
@resources = {}
|
||||||
@dependency_collector = DependencyCollector.new
|
@dependency_collector = DependencyCollector.new
|
||||||
@bottle_specification = BottleSpecification.new
|
@bottle_specification = BottleSpecification.new
|
||||||
@ -78,6 +82,11 @@ class SoftwareSpec
|
|||||||
super
|
super
|
||||||
end
|
end
|
||||||
|
|
||||||
|
sig { override.returns(String) }
|
||||||
|
def download_type
|
||||||
|
"formula"
|
||||||
|
end
|
||||||
|
|
||||||
def owner=(owner)
|
def owner=(owner)
|
||||||
@name = owner.name
|
@name = owner.name
|
||||||
@full_name = owner.full_name
|
@full_name = owner.full_name
|
||||||
@ -126,8 +135,9 @@ class SoftwareSpec
|
|||||||
params(name: String, klass: T.class_of(Resource), block: T.nilable(T.proc.bind(Resource).void))
|
params(name: String, klass: T.class_of(Resource), block: T.nilable(T.proc.bind(Resource).void))
|
||||||
.returns(T.nilable(Resource))
|
.returns(T.nilable(Resource))
|
||||||
}
|
}
|
||||||
def resource(name, klass = Resource, &block)
|
def resource(name = T.unsafe(nil), klass = Resource, &block)
|
||||||
if block
|
if block
|
||||||
|
raise ArgumentError, "Resource must have a name." if name.nil?
|
||||||
raise DuplicateResourceError, name if resource_defined?(name)
|
raise DuplicateResourceError, name if resource_defined?(name)
|
||||||
|
|
||||||
res = klass.new(name, &block)
|
res = klass.new(name, &block)
|
||||||
@ -137,6 +147,8 @@ class SoftwareSpec
|
|||||||
dependency_collector.add(res)
|
dependency_collector.add(res)
|
||||||
res
|
res
|
||||||
else
|
else
|
||||||
|
return @resource if name.nil?
|
||||||
|
|
||||||
resources.fetch(name) { raise ResourceMissingError.new(owner, name) }
|
resources.fetch(name) { raise ResourceMissingError.new(owner, name) }
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -285,6 +297,8 @@ class HeadSoftwareSpec < SoftwareSpec
|
|||||||
end
|
end
|
||||||
|
|
||||||
class Bottle
|
class Bottle
|
||||||
|
include Downloadable
|
||||||
|
|
||||||
class Filename
|
class Filename
|
||||||
attr_reader :name, :version, :tag, :rebuild
|
attr_reader :name, :version, :tag, :rebuild
|
||||||
|
|
||||||
@ -338,9 +352,11 @@ class Bottle
|
|||||||
attr_reader :name, :resource, :tag, :cellar, :rebuild
|
attr_reader :name, :resource, :tag, :cellar, :rebuild
|
||||||
|
|
||||||
def_delegators :resource, :url, :verify_download_integrity
|
def_delegators :resource, :url, :verify_download_integrity
|
||||||
def_delegators :resource, :cached_download
|
def_delegators :resource, :cached_download, :downloader
|
||||||
|
|
||||||
def initialize(formula, spec, tag = nil)
|
def initialize(formula, spec, tag = nil)
|
||||||
|
super()
|
||||||
|
|
||||||
@name = formula.name
|
@name = formula.name
|
||||||
@resource = Resource.new
|
@resource = Resource.new
|
||||||
@resource.owner = formula
|
@resource.owner = formula
|
||||||
@ -360,8 +376,15 @@ class Bottle
|
|||||||
root_url(spec.root_url, spec.root_url_specs)
|
root_url(spec.root_url, spec.root_url_specs)
|
||||||
end
|
end
|
||||||
|
|
||||||
def fetch(verify_download_integrity: true)
|
sig {
|
||||||
@resource.fetch(verify_download_integrity:)
|
override.params(
|
||||||
|
verify_download_integrity: T::Boolean,
|
||||||
|
timeout: T.nilable(T.any(Integer, Float)),
|
||||||
|
quiet: T.nilable(T::Boolean),
|
||||||
|
).returns(Pathname)
|
||||||
|
}
|
||||||
|
def fetch(verify_download_integrity: true, timeout: nil, quiet: false)
|
||||||
|
resource.fetch(verify_download_integrity:, timeout:, quiet:)
|
||||||
rescue DownloadError
|
rescue DownloadError
|
||||||
raise unless fallback_on_error
|
raise unless fallback_on_error
|
||||||
|
|
||||||
@ -369,6 +392,7 @@ class Bottle
|
|||||||
retry
|
retry
|
||||||
end
|
end
|
||||||
|
|
||||||
|
sig { override.void }
|
||||||
def clear_cache
|
def clear_cache
|
||||||
@resource.clear_cache
|
@resource.clear_cache
|
||||||
github_packages_manifest_resource&.clear_cache
|
github_packages_manifest_resource&.clear_cache
|
||||||
@ -384,14 +408,13 @@ class Bottle
|
|||||||
@spec.skip_relocation?(tag: @tag)
|
@spec.skip_relocation?(tag: @tag)
|
||||||
end
|
end
|
||||||
|
|
||||||
def stage
|
def stage = downloader.stage
|
||||||
resource.downloader.stage
|
|
||||||
end
|
|
||||||
|
|
||||||
def fetch_tab
|
def fetch_tab(timeout: nil, quiet: false)
|
||||||
return if github_packages_manifest_resource.blank?
|
return unless (resource = github_packages_manifest_resource)
|
||||||
|
|
||||||
github_packages_manifest_resource.fetch
|
begin
|
||||||
|
resource.fetch(timeout:, quiet:)
|
||||||
rescue DownloadError
|
rescue DownloadError
|
||||||
raise unless fallback_on_error
|
raise unless fallback_on_error
|
||||||
|
|
||||||
@ -400,14 +423,17 @@ class Bottle
|
|||||||
raise if @fetch_tab_retried
|
raise if @fetch_tab_retried
|
||||||
|
|
||||||
@fetch_tab_retried = true
|
@fetch_tab_retried = true
|
||||||
github_packages_manifest_resource.clear_cache
|
resource.clear_cache
|
||||||
retry
|
retry
|
||||||
end
|
end
|
||||||
|
end
|
||||||
|
|
||||||
def tab_attributes
|
def tab_attributes
|
||||||
return {} unless github_packages_manifest_resource&.downloaded?
|
if (resource = github_packages_manifest_resource) && resource.downloaded?
|
||||||
|
return resource.tab
|
||||||
|
end
|
||||||
|
|
||||||
github_packages_manifest_resource.tab
|
{}
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { returns(Filename) }
|
sig { returns(Filename) }
|
||||||
@ -415,8 +441,7 @@ class Bottle
|
|||||||
Filename.create(resource.owner, @tag, @spec.rebuild)
|
Filename.create(resource.owner, @tag, @spec.rebuild)
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
sig { returns(T.nilable(Resource::BottleManifest)) }
|
||||||
|
|
||||||
def github_packages_manifest_resource
|
def github_packages_manifest_resource
|
||||||
return if @resource.download_strategy != CurlGitHubPackagesDownloadStrategy
|
return if @resource.download_strategy != CurlGitHubPackagesDownloadStrategy
|
||||||
|
|
||||||
@ -439,6 +464,8 @@ class Bottle
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
def select_download_strategy(specs)
|
def select_download_strategy(specs)
|
||||||
specs[:using] ||= DownloadStrategyDetector.detect(@root_url)
|
specs[:using] ||= DownloadStrategyDetector.detect(@root_url)
|
||||||
specs[:bottle] = true
|
specs[:bottle] = true
|
||||||
|
|||||||
@ -32,6 +32,9 @@ class Homebrew::Cmd::FetchCmd::Args < Homebrew::CLI::Args
|
|||||||
sig { returns(T::Boolean) }
|
sig { returns(T::Boolean) }
|
||||||
def casks?; end
|
def casks?; end
|
||||||
|
|
||||||
|
sig { returns(T.nilable(String)) }
|
||||||
|
def concurrency; end
|
||||||
|
|
||||||
sig { returns(T::Boolean) }
|
sig { returns(T::Boolean) }
|
||||||
def deps?; end
|
def deps?; end
|
||||||
|
|
||||||
|
|||||||
11645
Library/Homebrew/sorbet/rbi/gems/concurrent-ruby@1.3.4.rbi
generated
Normal file
11645
Library/Homebrew/sorbet/rbi/gems/concurrent-ruby@1.3.4.rbi
generated
Normal file
File diff suppressed because it is too large
Load Diff
@ -55,7 +55,7 @@ RSpec.describe Patch do
|
|||||||
subject(:patch) { described_class.create(:p2, nil) }
|
subject(:patch) { described_class.create(:p2, nil) }
|
||||||
|
|
||||||
context "when the patch is empty" do
|
context "when the patch is empty" do
|
||||||
it(:resource) { expect(patch.resource).to be_a Resource::PatchResource }
|
it(:resource) { expect(patch.resource).to be_a Resource::Patch }
|
||||||
it { expect(patch.patch_files).to eq(patch.resource.patch_files) }
|
it { expect(patch.patch_files).to eq(patch.resource.patch_files) }
|
||||||
it { expect(patch.patch_files).to eq([]) }
|
it { expect(patch.patch_files).to eq([]) }
|
||||||
end
|
end
|
||||||
|
|||||||
@ -51,14 +51,54 @@ module Tty
|
|||||||
string.gsub(/\033\[\d+(;\d+)*m/, "")
|
string.gsub(/\033\[\d+(;\d+)*m/, "")
|
||||||
end
|
end
|
||||||
|
|
||||||
|
sig { params(line_count: Integer).returns(String) }
|
||||||
|
def move_cursor_up(line_count)
|
||||||
|
"\033[#{line_count}A"
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { params(line_count: Integer).returns(String) }
|
||||||
|
def move_cursor_up_beginning(line_count)
|
||||||
|
"\033[#{line_count}F"
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { params(line_count: Integer).returns(String) }
|
||||||
|
def move_cursor_down(line_count)
|
||||||
|
"\033[#{line_count}B"
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { returns(String) }
|
||||||
|
def clear_to_end
|
||||||
|
"\033[K"
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { returns(String) }
|
||||||
|
def hide_cursor
|
||||||
|
"\033[?25l"
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { returns(String) }
|
||||||
|
def show_cursor
|
||||||
|
"\033[?25h"
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { returns(T.nilable([Integer, Integer])) }
|
||||||
|
def size
|
||||||
|
return @size if defined?(@size)
|
||||||
|
|
||||||
|
height, width = `/bin/stty size 2>/dev/null`.presence&.split&.map(&:to_i)
|
||||||
|
return if height.nil? || width.nil?
|
||||||
|
|
||||||
|
@size = [height, width]
|
||||||
|
end
|
||||||
|
|
||||||
|
sig { returns(Integer) }
|
||||||
|
def height
|
||||||
|
@height ||= size&.first || `/usr/bin/tput lines 2>/dev/null`.presence&.to_i || 40
|
||||||
|
end
|
||||||
|
|
||||||
sig { returns(Integer) }
|
sig { returns(Integer) }
|
||||||
def width
|
def width
|
||||||
@width ||= begin
|
@width ||= size&.second || `/usr/bin/tput cols 2>/dev/null`.presence&.to_i || 80
|
||||||
_, width = `/bin/stty size 2>/dev/null`.split
|
|
||||||
width, = `/usr/bin/tput cols 2>/dev/null`.split if width.to_i.zero?
|
|
||||||
width ||= 80
|
|
||||||
width.to_i
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
sig { params(string: String).returns(String) }
|
sig { params(string: String).returns(String) }
|
||||||
|
|||||||
@ -43,6 +43,7 @@ $:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version
|
|||||||
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/coderay-1.1.3/lib")
|
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/coderay-1.1.3/lib")
|
||||||
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/highline-3.0.1/lib")
|
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/highline-3.0.1/lib")
|
||||||
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/commander-5.0.0/lib")
|
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/commander-5.0.0/lib")
|
||||||
|
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/concurrent-ruby-1.3.4/lib/concurrent-ruby")
|
||||||
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/diff-lcs-1.5.1/lib")
|
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/diff-lcs-1.5.1/lib")
|
||||||
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/docile-1.4.1/lib")
|
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/docile-1.4.1/lib")
|
||||||
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/elftools-1.3.1/lib")
|
$:.unshift File.expand_path("#{__dir__}/../#{RUBY_ENGINE}/#{Gem.ruby_api_version}/gems/elftools-1.3.1/lib")
|
||||||
|
|||||||
21
Library/Homebrew/vendor/bundle/ruby/3.3.0/gems/concurrent-ruby-1.3.4/LICENSE.txt
vendored
Normal file
21
Library/Homebrew/vendor/bundle/ruby/3.3.0/gems/concurrent-ruby-1.3.4/LICENSE.txt
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
Copyright (c) Jerry D'Antonio -- released under the MIT license.
|
||||||
|
|
||||||
|
http://www.opensource.org/licenses/mit-license.php
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
@ -0,0 +1,5 @@
|
|||||||
|
# This file is here so that there is a file with the same name as the gem that
|
||||||
|
# can be required by Bundler.require. Applications should normally
|
||||||
|
# require 'concurrent'.
|
||||||
|
|
||||||
|
require_relative "concurrent"
|
||||||
@ -0,0 +1,134 @@
|
|||||||
|
require 'concurrent/version'
|
||||||
|
require 'concurrent/constants'
|
||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/configuration'
|
||||||
|
|
||||||
|
require 'concurrent/atomics'
|
||||||
|
require 'concurrent/executors'
|
||||||
|
require 'concurrent/synchronization'
|
||||||
|
|
||||||
|
require 'concurrent/atomic/atomic_markable_reference'
|
||||||
|
require 'concurrent/atomic/atomic_reference'
|
||||||
|
require 'concurrent/agent'
|
||||||
|
require 'concurrent/atom'
|
||||||
|
require 'concurrent/array'
|
||||||
|
require 'concurrent/hash'
|
||||||
|
require 'concurrent/set'
|
||||||
|
require 'concurrent/map'
|
||||||
|
require 'concurrent/tuple'
|
||||||
|
require 'concurrent/async'
|
||||||
|
require 'concurrent/dataflow'
|
||||||
|
require 'concurrent/delay'
|
||||||
|
require 'concurrent/exchanger'
|
||||||
|
require 'concurrent/future'
|
||||||
|
require 'concurrent/immutable_struct'
|
||||||
|
require 'concurrent/ivar'
|
||||||
|
require 'concurrent/maybe'
|
||||||
|
require 'concurrent/mutable_struct'
|
||||||
|
require 'concurrent/mvar'
|
||||||
|
require 'concurrent/promise'
|
||||||
|
require 'concurrent/scheduled_task'
|
||||||
|
require 'concurrent/settable_struct'
|
||||||
|
require 'concurrent/timer_task'
|
||||||
|
require 'concurrent/tvar'
|
||||||
|
require 'concurrent/promises'
|
||||||
|
|
||||||
|
require 'concurrent/thread_safe/synchronized_delegator'
|
||||||
|
require 'concurrent/thread_safe/util'
|
||||||
|
|
||||||
|
require 'concurrent/options'
|
||||||
|
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
#
|
||||||
|
# @note **Private Implementation:** This abstraction is a private, internal
|
||||||
|
# implementation detail. It should never be used directly.
|
||||||
|
|
||||||
|
# @!macro monotonic_clock_warning
|
||||||
|
#
|
||||||
|
# @note Time calculations on all platforms and languages are sensitive to
|
||||||
|
# changes to the system clock. To alleviate the potential problems
|
||||||
|
# associated with changing the system clock while an application is running,
|
||||||
|
# most modern operating systems provide a monotonic clock that operates
|
||||||
|
# independently of the system clock. A monotonic clock cannot be used to
|
||||||
|
# determine human-friendly clock times. A monotonic clock is used exclusively
|
||||||
|
# for calculating time intervals. Not all Ruby platforms provide access to an
|
||||||
|
# operating system monotonic clock. On these platforms a pure-Ruby monotonic
|
||||||
|
# clock will be used as a fallback. An operating system monotonic clock is both
|
||||||
|
# faster and more reliable than the pure-Ruby implementation. The pure-Ruby
|
||||||
|
# implementation should be fast and reliable enough for most non-realtime
|
||||||
|
# operations. At this time the common Ruby platforms that provide access to an
|
||||||
|
# operating system monotonic clock are MRI 2.1 and above and JRuby (all versions).
|
||||||
|
#
|
||||||
|
# @see http://linux.die.net/man/3/clock_gettime Linux clock_gettime(3)
|
||||||
|
|
||||||
|
# @!macro copy_options
|
||||||
|
#
|
||||||
|
# ## Copy Options
|
||||||
|
#
|
||||||
|
# Object references in Ruby are mutable. This can lead to serious
|
||||||
|
# problems when the {#value} of an object is a mutable reference. Which
|
||||||
|
# is always the case unless the value is a `Fixnum`, `Symbol`, or similar
|
||||||
|
# "primitive" data type. Each instance can be configured with a few
|
||||||
|
# options that can help protect the program from potentially dangerous
|
||||||
|
# operations. Each of these options can be optionally set when the object
|
||||||
|
# instance is created:
|
||||||
|
#
|
||||||
|
# * `:dup_on_deref` When true the object will call the `#dup` method on
|
||||||
|
# the `value` object every time the `#value` method is called
|
||||||
|
# (default: false)
|
||||||
|
# * `:freeze_on_deref` When true the object will call the `#freeze`
|
||||||
|
# method on the `value` object every time the `#value` method is called
|
||||||
|
# (default: false)
|
||||||
|
# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run
|
||||||
|
# every time the `#value` method is called. The `Proc` will be given
|
||||||
|
# the current `value` as its only argument and the result returned by
|
||||||
|
# the block will be the return value of the `#value` call. When `nil`
|
||||||
|
# this option will be ignored (default: nil)
|
||||||
|
#
|
||||||
|
# When multiple deref options are set the order of operations is strictly defined.
|
||||||
|
# The order of deref operations is:
|
||||||
|
# * `:copy_on_deref`
|
||||||
|
# * `:dup_on_deref`
|
||||||
|
# * `:freeze_on_deref`
|
||||||
|
#
|
||||||
|
# Because of this ordering there is no need to `#freeze` an object created by a
|
||||||
|
# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`.
|
||||||
|
# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is
|
||||||
|
# as close to the behavior of a "pure" functional language (like Erlang, Clojure,
|
||||||
|
# or Haskell) as we are likely to get in Ruby.
|
||||||
|
|
||||||
|
# @!macro deref_options
|
||||||
|
#
|
||||||
|
# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before
|
||||||
|
# returning the data from {#value}
|
||||||
|
# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before
|
||||||
|
# returning the data from {#value}
|
||||||
|
# @option opts [Proc] :copy_on_deref (nil) When calling the {#value}
|
||||||
|
# method, call the given proc passing the internal value as the sole
|
||||||
|
# argument then return the new value returned from the proc.
|
||||||
|
|
||||||
|
# @!macro executor_and_deref_options
|
||||||
|
#
|
||||||
|
# @param [Hash] opts the options used to define the behavior at update and deref
|
||||||
|
# and to specify the executor on which to perform actions
|
||||||
|
# @option opts [Executor] :executor when set use the given `Executor` instance.
|
||||||
|
# Three special values are also supported: `:io` returns the global pool for
|
||||||
|
# long, blocking (IO) tasks, `:fast` returns the global pool for short, fast
|
||||||
|
# operations, and `:immediate` returns the global `ImmediateExecutor` object.
|
||||||
|
# @!macro deref_options
|
||||||
|
|
||||||
|
# @!macro warn.edge
|
||||||
|
# @api Edge
|
||||||
|
# @note **Edge Features** are under active development and may change frequently.
|
||||||
|
#
|
||||||
|
# - Deprecations are not added before incompatible changes.
|
||||||
|
# - Edge version: _major_ is always 0, _minor_ bump means incompatible change,
|
||||||
|
# _patch_ bump means compatible change.
|
||||||
|
# - Edge features may also lack tests and documentation.
|
||||||
|
# - Features developed in `concurrent-ruby-edge` are expected to move
|
||||||
|
# to `concurrent-ruby` when finalised.
|
||||||
|
|
||||||
|
|
||||||
|
# {include:file:README.md}
|
||||||
|
module Concurrent
|
||||||
|
end
|
||||||
@ -0,0 +1,588 @@
|
|||||||
|
require 'concurrent/configuration'
|
||||||
|
require 'concurrent/atomic/atomic_reference'
|
||||||
|
require 'concurrent/atomic/count_down_latch'
|
||||||
|
require 'concurrent/atomic/thread_local_var'
|
||||||
|
require 'concurrent/collection/copy_on_write_observer_set'
|
||||||
|
require 'concurrent/concern/observable'
|
||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# `Agent` is inspired by Clojure's [agent](http://clojure.org/agents)
|
||||||
|
# function. An agent is a shared, mutable variable providing independent,
|
||||||
|
# uncoordinated, *asynchronous* change of individual values. Best used when
|
||||||
|
# the value will undergo frequent, complex updates. Suitable when the result
|
||||||
|
# of an update does not need to be known immediately. `Agent` is (mostly)
|
||||||
|
# functionally equivalent to Clojure's agent, except where the runtime
|
||||||
|
# prevents parity.
|
||||||
|
#
|
||||||
|
# Agents are reactive, not autonomous - there is no imperative message loop
|
||||||
|
# and no blocking receive. The state of an Agent should be itself immutable
|
||||||
|
# and the `#value` of an Agent is always immediately available for reading by
|
||||||
|
# any thread without any messages, i.e. observation does not require
|
||||||
|
# cooperation or coordination.
|
||||||
|
#
|
||||||
|
# Agent action dispatches are made using the various `#send` methods. These
|
||||||
|
# methods always return immediately. At some point later, in another thread,
|
||||||
|
# the following will happen:
|
||||||
|
#
|
||||||
|
# 1. The given `action` will be applied to the state of the Agent and the
|
||||||
|
# `args`, if any were supplied.
|
||||||
|
# 2. The return value of `action` will be passed to the validator lambda,
|
||||||
|
# if one has been set on the Agent.
|
||||||
|
# 3. If the validator succeeds or if no validator was given, the return value
|
||||||
|
# of the given `action` will become the new `#value` of the Agent. See
|
||||||
|
# `#initialize` for details.
|
||||||
|
# 4. If any observers were added to the Agent, they will be notified. See
|
||||||
|
# `#add_observer` for details.
|
||||||
|
# 5. If during the `action` execution any other dispatches are made (directly
|
||||||
|
# or indirectly), they will be held until after the `#value` of the Agent
|
||||||
|
# has been changed.
|
||||||
|
#
|
||||||
|
# If any exceptions are thrown by an action function, no nested dispatches
|
||||||
|
# will occur, and the exception will be cached in the Agent itself. When an
|
||||||
|
# Agent has errors cached, any subsequent interactions will immediately throw
|
||||||
|
# an exception, until the agent's errors are cleared. Agent errors can be
|
||||||
|
# examined with `#error` and the agent restarted with `#restart`.
|
||||||
|
#
|
||||||
|
# The actions of all Agents get interleaved amongst threads in a thread pool.
|
||||||
|
# At any point in time, at most one action for each Agent is being executed.
|
||||||
|
# Actions dispatched to an agent from another single agent or thread will
|
||||||
|
# occur in the order they were sent, potentially interleaved with actions
|
||||||
|
# dispatched to the same agent from other sources. The `#send` method should
|
||||||
|
# be used for actions that are CPU limited, while the `#send_off` method is
|
||||||
|
# appropriate for actions that may block on IO.
|
||||||
|
#
|
||||||
|
# Unlike in Clojure, `Agent` cannot participate in `Concurrent::TVar` transactions.
|
||||||
|
#
|
||||||
|
# ## Example
|
||||||
|
#
|
||||||
|
# ```
|
||||||
|
# def next_fibonacci(set = nil)
|
||||||
|
# return [0, 1] if set.nil?
|
||||||
|
# set + [set[-2..-1].reduce{|sum,x| sum + x }]
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# # create an agent with an initial value
|
||||||
|
# agent = Concurrent::Agent.new(next_fibonacci)
|
||||||
|
#
|
||||||
|
# # send a few update requests
|
||||||
|
# 5.times do
|
||||||
|
# agent.send{|set| next_fibonacci(set) }
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# # wait for them to complete
|
||||||
|
# agent.await
|
||||||
|
#
|
||||||
|
# # get the current value
|
||||||
|
# agent.value #=> [0, 1, 1, 2, 3, 5, 8]
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# ## Observation
|
||||||
|
#
|
||||||
|
# Agents support observers through the {Concurrent::Observable} mixin module.
|
||||||
|
# Notification of observers occurs every time an action dispatch returns and
|
||||||
|
# the new value is successfully validated. Observation will *not* occur if the
|
||||||
|
# action raises an exception, if validation fails, or when a {#restart} occurs.
|
||||||
|
#
|
||||||
|
# When notified the observer will receive three arguments: `time`, `old_value`,
|
||||||
|
# and `new_value`. The `time` argument is the time at which the value change
|
||||||
|
# occurred. The `old_value` is the value of the Agent when the action began
|
||||||
|
# processing. The `new_value` is the value to which the Agent was set when the
|
||||||
|
# action completed. Note that `old_value` and `new_value` may be the same.
|
||||||
|
# This is not an error. It simply means that the action returned the same
|
||||||
|
# value.
|
||||||
|
#
|
||||||
|
# ## Nested Actions
|
||||||
|
#
|
||||||
|
# It is possible for an Agent action to post further actions back to itself.
|
||||||
|
# The nested actions will be enqueued normally then processed *after* the
|
||||||
|
# outer action completes, in the order they were sent, possibly interleaved
|
||||||
|
# with action dispatches from other threads. Nested actions never deadlock
|
||||||
|
# with one another and a failure in a nested action will never affect the
|
||||||
|
# outer action.
|
||||||
|
#
|
||||||
|
# Nested actions can be called using the Agent reference from the enclosing
|
||||||
|
# scope or by passing the reference in as a "send" argument. Nested actions
|
||||||
|
# cannot be post using `self` from within the action block/proc/lambda; `self`
|
||||||
|
# in this context will not reference the Agent. The preferred method for
|
||||||
|
# dispatching nested actions is to pass the Agent as an argument. This allows
|
||||||
|
# Ruby to more effectively manage the closing scope.
|
||||||
|
#
|
||||||
|
# Prefer this:
|
||||||
|
#
|
||||||
|
# ```
|
||||||
|
# agent = Concurrent::Agent.new(0)
|
||||||
|
# agent.send(agent) do |value, this|
|
||||||
|
# this.send {|v| v + 42 }
|
||||||
|
# 3.14
|
||||||
|
# end
|
||||||
|
# agent.value #=> 45.14
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# Over this:
|
||||||
|
#
|
||||||
|
# ```
|
||||||
|
# agent = Concurrent::Agent.new(0)
|
||||||
|
# agent.send do |value|
|
||||||
|
# agent.send {|v| v + 42 }
|
||||||
|
# 3.14
|
||||||
|
# end
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# @!macro agent_await_warning
|
||||||
|
#
|
||||||
|
# **NOTE** Never, *under any circumstances*, call any of the "await" methods
|
||||||
|
# ({#await}, {#await_for}, {#await_for!}, and {#wait}) from within an action
|
||||||
|
# block/proc/lambda. The call will block the Agent and will always fail.
|
||||||
|
# Calling either {#await} or {#wait} (with a timeout of `nil`) will
|
||||||
|
# hopelessly deadlock the Agent with no possibility of recovery.
|
||||||
|
#
|
||||||
|
# @!macro thread_safe_variable_comparison
|
||||||
|
#
|
||||||
|
# @see http://clojure.org/Agents Clojure Agents
|
||||||
|
# @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State
|
||||||
|
class Agent < Synchronization::LockableObject
|
||||||
|
include Concern::Observable
|
||||||
|
|
||||||
|
ERROR_MODES = [:continue, :fail].freeze
|
||||||
|
private_constant :ERROR_MODES
|
||||||
|
|
||||||
|
AWAIT_FLAG = ::Object.new
|
||||||
|
private_constant :AWAIT_FLAG
|
||||||
|
|
||||||
|
AWAIT_ACTION = ->(value, latch) { latch.count_down; AWAIT_FLAG }
|
||||||
|
private_constant :AWAIT_ACTION
|
||||||
|
|
||||||
|
DEFAULT_ERROR_HANDLER = ->(agent, error) { nil }
|
||||||
|
private_constant :DEFAULT_ERROR_HANDLER
|
||||||
|
|
||||||
|
DEFAULT_VALIDATOR = ->(value) { true }
|
||||||
|
private_constant :DEFAULT_VALIDATOR
|
||||||
|
|
||||||
|
Job = Struct.new(:action, :args, :executor, :caller)
|
||||||
|
private_constant :Job
|
||||||
|
|
||||||
|
# Raised during action processing or any other time in an Agent's lifecycle.
|
||||||
|
class Error < StandardError
|
||||||
|
def initialize(message = nil)
|
||||||
|
message ||= 'agent must be restarted before jobs can post'
|
||||||
|
super(message)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Raised when a new value obtained during action processing or at `#restart`
|
||||||
|
# fails validation.
|
||||||
|
class ValidationError < Error
|
||||||
|
def initialize(message = nil)
|
||||||
|
message ||= 'invalid value'
|
||||||
|
super(message)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# The error mode this Agent is operating in. See {#initialize} for details.
|
||||||
|
attr_reader :error_mode
|
||||||
|
|
||||||
|
# Create a new `Agent` with the given initial value and options.
|
||||||
|
#
|
||||||
|
# The `:validator` option must be `nil` or a side-effect free proc/lambda
|
||||||
|
# which takes one argument. On any intended value change the validator, if
|
||||||
|
# provided, will be called. If the new value is invalid the validator should
|
||||||
|
# return `false` or raise an error.
|
||||||
|
#
|
||||||
|
# The `:error_handler` option must be `nil` or a proc/lambda which takes two
|
||||||
|
# arguments. When an action raises an error or validation fails, either by
|
||||||
|
# returning false or raising an error, the error handler will be called. The
|
||||||
|
# arguments to the error handler will be a reference to the agent itself and
|
||||||
|
# the error object which was raised.
|
||||||
|
#
|
||||||
|
# The `:error_mode` may be either `:continue` (the default if an error
|
||||||
|
# handler is given) or `:fail` (the default if error handler nil or not
|
||||||
|
# given).
|
||||||
|
#
|
||||||
|
# If an action being run by the agent throws an error or doesn't pass
|
||||||
|
# validation the error handler, if present, will be called. After the
|
||||||
|
# handler executes if the error mode is `:continue` the Agent will continue
|
||||||
|
# as if neither the action that caused the error nor the error itself ever
|
||||||
|
# happened.
|
||||||
|
#
|
||||||
|
# If the mode is `:fail` the Agent will become {#failed?} and will stop
|
||||||
|
# accepting new action dispatches. Any previously queued actions will be
|
||||||
|
# held until {#restart} is called. The {#value} method will still work,
|
||||||
|
# returning the value of the Agent before the error.
|
||||||
|
#
|
||||||
|
# @param [Object] initial the initial value
|
||||||
|
# @param [Hash] opts the configuration options
|
||||||
|
#
|
||||||
|
# @option opts [Symbol] :error_mode either `:continue` or `:fail`
|
||||||
|
# @option opts [nil, Proc] :error_handler the (optional) error handler
|
||||||
|
# @option opts [nil, Proc] :validator the (optional) validation procedure
|
||||||
|
def initialize(initial, opts = {})
|
||||||
|
super()
|
||||||
|
synchronize { ns_initialize(initial, opts) }
|
||||||
|
end
|
||||||
|
|
||||||
|
# The current value (state) of the Agent, irrespective of any pending or
|
||||||
|
# in-progress actions. The value is always available and is non-blocking.
|
||||||
|
#
|
||||||
|
# @return [Object] the current value
|
||||||
|
def value
|
||||||
|
@current.value # TODO (pitr 12-Sep-2015): broken unsafe read?
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :deref, :value
|
||||||
|
|
||||||
|
# When {#failed?} and {#error_mode} is `:fail`, returns the error object
|
||||||
|
# which caused the failure, else `nil`. When {#error_mode} is `:continue`
|
||||||
|
# will *always* return `nil`.
|
||||||
|
#
|
||||||
|
# @return [nil, Error] the error which caused the failure when {#failed?}
|
||||||
|
def error
|
||||||
|
@error.value
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :reason, :error
|
||||||
|
|
||||||
|
# @!macro agent_send
|
||||||
|
#
|
||||||
|
# Dispatches an action to the Agent and returns immediately. Subsequently,
|
||||||
|
# in a thread from a thread pool, the {#value} will be set to the return
|
||||||
|
# value of the action. Action dispatches are only allowed when the Agent
|
||||||
|
# is not {#failed?}.
|
||||||
|
#
|
||||||
|
# The action must be a block/proc/lambda which takes 1 or more arguments.
|
||||||
|
# The first argument is the current {#value} of the Agent. Any arguments
|
||||||
|
# passed to the send method via the `args` parameter will be passed to the
|
||||||
|
# action as the remaining arguments. The action must return the new value
|
||||||
|
# of the Agent.
|
||||||
|
#
|
||||||
|
# * {#send} and {#send!} should be used for actions that are CPU limited
|
||||||
|
# * {#send_off}, {#send_off!}, and {#<<} are appropriate for actions that
|
||||||
|
# may block on IO
|
||||||
|
# * {#send_via} and {#send_via!} are used when a specific executor is to
|
||||||
|
# be used for the action
|
||||||
|
#
|
||||||
|
# @param [Array<Object>] args zero or more arguments to be passed to
|
||||||
|
# the action
|
||||||
|
# @param [Proc] action the action dispatch to be enqueued
|
||||||
|
#
|
||||||
|
# @yield [agent, value, *args] process the old value and return the new
|
||||||
|
# @yieldparam [Object] value the current {#value} of the Agent
|
||||||
|
# @yieldparam [Array<Object>] args zero or more arguments to pass to the
|
||||||
|
# action
|
||||||
|
# @yieldreturn [Object] the new value of the Agent
|
||||||
|
#
|
||||||
|
# @!macro send_return
|
||||||
|
# @return [Boolean] true if the action is successfully enqueued, false if
|
||||||
|
# the Agent is {#failed?}
|
||||||
|
def send(*args, &action)
|
||||||
|
enqueue_action_job(action, args, Concurrent.global_fast_executor)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro agent_send
|
||||||
|
#
|
||||||
|
# @!macro send_bang_return_and_raise
|
||||||
|
# @return [Boolean] true if the action is successfully enqueued
|
||||||
|
# @raise [Concurrent::Agent::Error] if the Agent is {#failed?}
|
||||||
|
def send!(*args, &action)
|
||||||
|
raise Error.new unless send(*args, &action)
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro agent_send
|
||||||
|
# @!macro send_return
|
||||||
|
def send_off(*args, &action)
|
||||||
|
enqueue_action_job(action, args, Concurrent.global_io_executor)
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :post, :send_off
|
||||||
|
|
||||||
|
# @!macro agent_send
|
||||||
|
# @!macro send_bang_return_and_raise
|
||||||
|
def send_off!(*args, &action)
|
||||||
|
raise Error.new unless send_off(*args, &action)
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro agent_send
|
||||||
|
# @!macro send_return
|
||||||
|
# @param [Concurrent::ExecutorService] executor the executor on which the
|
||||||
|
# action is to be dispatched
|
||||||
|
def send_via(executor, *args, &action)
|
||||||
|
enqueue_action_job(action, args, executor)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro agent_send
|
||||||
|
# @!macro send_bang_return_and_raise
|
||||||
|
# @param [Concurrent::ExecutorService] executor the executor on which the
|
||||||
|
# action is to be dispatched
|
||||||
|
def send_via!(executor, *args, &action)
|
||||||
|
raise Error.new unless send_via(executor, *args, &action)
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Dispatches an action to the Agent and returns immediately. Subsequently,
|
||||||
|
# in a thread from a thread pool, the {#value} will be set to the return
|
||||||
|
# value of the action. Appropriate for actions that may block on IO.
|
||||||
|
#
|
||||||
|
# @param [Proc] action the action dispatch to be enqueued
|
||||||
|
# @return [Concurrent::Agent] self
|
||||||
|
# @see #send_off
|
||||||
|
def <<(action)
|
||||||
|
send_off(&action)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# Blocks the current thread (indefinitely!) until all actions dispatched
|
||||||
|
# thus far, from this thread or nested by the Agent, have occurred. Will
|
||||||
|
# block when {#failed?}. Will never return if a failed Agent is {#restart}
|
||||||
|
# with `:clear_actions` true.
|
||||||
|
#
|
||||||
|
# Returns a reference to `self` to support method chaining:
|
||||||
|
#
|
||||||
|
# ```
|
||||||
|
# current_value = agent.await.value
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# @return [Boolean] self
|
||||||
|
#
|
||||||
|
# @!macro agent_await_warning
|
||||||
|
def await
|
||||||
|
wait(nil)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# Blocks the current thread until all actions dispatched thus far, from this
|
||||||
|
# thread or nested by the Agent, have occurred, or the timeout (in seconds)
|
||||||
|
# has elapsed.
|
||||||
|
#
|
||||||
|
# @param [Float] timeout the maximum number of seconds to wait
|
||||||
|
# @return [Boolean] true if all actions complete before timeout else false
|
||||||
|
#
|
||||||
|
# @!macro agent_await_warning
|
||||||
|
def await_for(timeout)
|
||||||
|
wait(timeout.to_f)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Blocks the current thread until all actions dispatched thus far, from this
|
||||||
|
# thread or nested by the Agent, have occurred, or the timeout (in seconds)
|
||||||
|
# has elapsed.
|
||||||
|
#
|
||||||
|
# @param [Float] timeout the maximum number of seconds to wait
|
||||||
|
# @return [Boolean] true if all actions complete before timeout
|
||||||
|
#
|
||||||
|
# @raise [Concurrent::TimeoutError] when timout is reached
|
||||||
|
#
|
||||||
|
# @!macro agent_await_warning
|
||||||
|
def await_for!(timeout)
|
||||||
|
raise Concurrent::TimeoutError unless wait(timeout.to_f)
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Blocks the current thread until all actions dispatched thus far, from this
|
||||||
|
# thread or nested by the Agent, have occurred, or the timeout (in seconds)
|
||||||
|
# has elapsed. Will block indefinitely when timeout is nil or not given.
|
||||||
|
#
|
||||||
|
# Provided mainly for consistency with other classes in this library. Prefer
|
||||||
|
# the various `await` methods instead.
|
||||||
|
#
|
||||||
|
# @param [Float] timeout the maximum number of seconds to wait
|
||||||
|
# @return [Boolean] true if all actions complete before timeout else false
|
||||||
|
#
|
||||||
|
# @!macro agent_await_warning
|
||||||
|
def wait(timeout = nil)
|
||||||
|
latch = Concurrent::CountDownLatch.new(1)
|
||||||
|
enqueue_await_job(latch)
|
||||||
|
latch.wait(timeout)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Is the Agent in a failed state?
|
||||||
|
#
|
||||||
|
# @see #restart
|
||||||
|
def failed?
|
||||||
|
!@error.value.nil?
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :stopped?, :failed?
|
||||||
|
|
||||||
|
# When an Agent is {#failed?}, changes the Agent {#value} to `new_value`
|
||||||
|
# then un-fails the Agent so that action dispatches are allowed again. If
|
||||||
|
# the `:clear_actions` option is give and true, any actions queued on the
|
||||||
|
# Agent that were being held while it was failed will be discarded,
|
||||||
|
# otherwise those held actions will proceed. The `new_value` must pass the
|
||||||
|
# validator if any, or `restart` will raise an exception and the Agent will
|
||||||
|
# remain failed with its old {#value} and {#error}. Observers, if any, will
|
||||||
|
# not be notified of the new state.
|
||||||
|
#
|
||||||
|
# @param [Object] new_value the new value for the Agent once restarted
|
||||||
|
# @param [Hash] opts the configuration options
|
||||||
|
# @option opts [Symbol] :clear_actions true if all enqueued but unprocessed
|
||||||
|
# actions should be discarded on restart, else false (default: false)
|
||||||
|
# @return [Boolean] true
|
||||||
|
#
|
||||||
|
# @raise [Concurrent:AgentError] when not failed
|
||||||
|
def restart(new_value, opts = {})
|
||||||
|
clear_actions = opts.fetch(:clear_actions, false)
|
||||||
|
synchronize do
|
||||||
|
raise Error.new('agent is not failed') unless failed?
|
||||||
|
raise ValidationError unless ns_validate(new_value)
|
||||||
|
@current.value = new_value
|
||||||
|
@error.value = nil
|
||||||
|
@queue.clear if clear_actions
|
||||||
|
ns_post_next_job unless @queue.empty?
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
class << self
|
||||||
|
|
||||||
|
# Blocks the current thread (indefinitely!) until all actions dispatched
|
||||||
|
# thus far to all the given Agents, from this thread or nested by the
|
||||||
|
# given Agents, have occurred. Will block when any of the agents are
|
||||||
|
# failed. Will never return if a failed Agent is restart with
|
||||||
|
# `:clear_actions` true.
|
||||||
|
#
|
||||||
|
# @param [Array<Concurrent::Agent>] agents the Agents on which to wait
|
||||||
|
# @return [Boolean] true
|
||||||
|
#
|
||||||
|
# @!macro agent_await_warning
|
||||||
|
def await(*agents)
|
||||||
|
agents.each { |agent| agent.await }
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Blocks the current thread until all actions dispatched thus far to all
|
||||||
|
# the given Agents, from this thread or nested by the given Agents, have
|
||||||
|
# occurred, or the timeout (in seconds) has elapsed.
|
||||||
|
#
|
||||||
|
# @param [Float] timeout the maximum number of seconds to wait
|
||||||
|
# @param [Array<Concurrent::Agent>] agents the Agents on which to wait
|
||||||
|
# @return [Boolean] true if all actions complete before timeout else false
|
||||||
|
#
|
||||||
|
# @!macro agent_await_warning
|
||||||
|
def await_for(timeout, *agents)
|
||||||
|
end_at = Concurrent.monotonic_time + timeout.to_f
|
||||||
|
ok = agents.length.times do |i|
|
||||||
|
break false if (delay = end_at - Concurrent.monotonic_time) < 0
|
||||||
|
break false unless agents[i].await_for(delay)
|
||||||
|
end
|
||||||
|
!!ok
|
||||||
|
end
|
||||||
|
|
||||||
|
# Blocks the current thread until all actions dispatched thus far to all
|
||||||
|
# the given Agents, from this thread or nested by the given Agents, have
|
||||||
|
# occurred, or the timeout (in seconds) has elapsed.
|
||||||
|
#
|
||||||
|
# @param [Float] timeout the maximum number of seconds to wait
|
||||||
|
# @param [Array<Concurrent::Agent>] agents the Agents on which to wait
|
||||||
|
# @return [Boolean] true if all actions complete before timeout
|
||||||
|
#
|
||||||
|
# @raise [Concurrent::TimeoutError] when timout is reached
|
||||||
|
# @!macro agent_await_warning
|
||||||
|
def await_for!(timeout, *agents)
|
||||||
|
raise Concurrent::TimeoutError unless await_for(timeout, *agents)
|
||||||
|
true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def ns_initialize(initial, opts)
|
||||||
|
@error_mode = opts[:error_mode]
|
||||||
|
@error_handler = opts[:error_handler]
|
||||||
|
|
||||||
|
if @error_mode && !ERROR_MODES.include?(@error_mode)
|
||||||
|
raise ArgumentError.new('unrecognized error mode')
|
||||||
|
elsif @error_mode.nil?
|
||||||
|
@error_mode = @error_handler ? :continue : :fail
|
||||||
|
end
|
||||||
|
|
||||||
|
@error_handler ||= DEFAULT_ERROR_HANDLER
|
||||||
|
@validator = opts.fetch(:validator, DEFAULT_VALIDATOR)
|
||||||
|
@current = Concurrent::AtomicReference.new(initial)
|
||||||
|
@error = Concurrent::AtomicReference.new(nil)
|
||||||
|
@caller = Concurrent::ThreadLocalVar.new(nil)
|
||||||
|
@queue = []
|
||||||
|
|
||||||
|
self.observers = Collection::CopyOnNotifyObserverSet.new
|
||||||
|
end
|
||||||
|
|
||||||
|
def enqueue_action_job(action, args, executor)
|
||||||
|
raise ArgumentError.new('no action given') unless action
|
||||||
|
job = Job.new(action, args, executor, @caller.value || Thread.current.object_id)
|
||||||
|
synchronize { ns_enqueue_job(job) }
|
||||||
|
end
|
||||||
|
|
||||||
|
def enqueue_await_job(latch)
|
||||||
|
synchronize do
|
||||||
|
if (index = ns_find_last_job_for_thread)
|
||||||
|
job = Job.new(AWAIT_ACTION, [latch], Concurrent.global_immediate_executor,
|
||||||
|
Thread.current.object_id)
|
||||||
|
ns_enqueue_job(job, index+1)
|
||||||
|
else
|
||||||
|
latch.count_down
|
||||||
|
true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_enqueue_job(job, index = nil)
|
||||||
|
# a non-nil index means this is an await job
|
||||||
|
return false if index.nil? && failed?
|
||||||
|
index ||= @queue.length
|
||||||
|
@queue.insert(index, job)
|
||||||
|
# if this is the only job, post to executor
|
||||||
|
ns_post_next_job if @queue.length == 1
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_post_next_job
|
||||||
|
@queue.first.executor.post { execute_next_job }
|
||||||
|
end
|
||||||
|
|
||||||
|
def execute_next_job
|
||||||
|
job = synchronize { @queue.first }
|
||||||
|
old_value = @current.value
|
||||||
|
|
||||||
|
@caller.value = job.caller # for nested actions
|
||||||
|
new_value = job.action.call(old_value, *job.args)
|
||||||
|
@caller.value = nil
|
||||||
|
|
||||||
|
return if new_value == AWAIT_FLAG
|
||||||
|
|
||||||
|
if ns_validate(new_value)
|
||||||
|
@current.value = new_value
|
||||||
|
observers.notify_observers(Time.now, old_value, new_value)
|
||||||
|
else
|
||||||
|
handle_error(ValidationError.new)
|
||||||
|
end
|
||||||
|
rescue => error
|
||||||
|
handle_error(error)
|
||||||
|
ensure
|
||||||
|
synchronize do
|
||||||
|
@queue.shift
|
||||||
|
unless failed? || @queue.empty?
|
||||||
|
ns_post_next_job
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_validate(value)
|
||||||
|
@validator.call(value)
|
||||||
|
rescue
|
||||||
|
false
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_error(error)
|
||||||
|
# stop new jobs from posting
|
||||||
|
@error.value = error if @error_mode == :fail
|
||||||
|
@error_handler.call(self, error)
|
||||||
|
rescue
|
||||||
|
# do nothing
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_find_last_job_for_thread
|
||||||
|
@queue.rindex { |job| job.caller == Thread.current.object_id }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,56 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require 'concurrent/thread_safe/util'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro concurrent_array
|
||||||
|
#
|
||||||
|
# A thread-safe subclass of Array. This version locks against the object
|
||||||
|
# itself for every method call, ensuring only one thread can be reading
|
||||||
|
# or writing at a time. This includes iteration methods like `#each`.
|
||||||
|
#
|
||||||
|
# @note `a += b` is **not** a **thread-safe** operation on
|
||||||
|
# `Concurrent::Array`. It reads array `a`, then it creates new `Concurrent::Array`
|
||||||
|
# which is concatenation of `a` and `b`, then it writes the concatenation to `a`.
|
||||||
|
# The read and write are independent operations they do not form a single atomic
|
||||||
|
# operation therefore when two `+=` operations are executed concurrently updates
|
||||||
|
# may be lost. Use `#concat` instead.
|
||||||
|
#
|
||||||
|
# @see http://ruby-doc.org/core/Array.html Ruby standard library `Array`
|
||||||
|
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
ArrayImplementation = case
|
||||||
|
when Concurrent.on_cruby?
|
||||||
|
# Array is not fully thread-safe on CRuby, see
|
||||||
|
# https://github.com/ruby-concurrency/concurrent-ruby/issues/929
|
||||||
|
# So we will need to add synchronization here
|
||||||
|
::Array
|
||||||
|
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
require 'jruby/synchronized'
|
||||||
|
|
||||||
|
class JRubyArray < ::Array
|
||||||
|
include JRuby::Synchronized
|
||||||
|
end
|
||||||
|
JRubyArray
|
||||||
|
|
||||||
|
when Concurrent.on_truffleruby?
|
||||||
|
require 'concurrent/thread_safe/util/data_structures'
|
||||||
|
|
||||||
|
class TruffleRubyArray < ::Array
|
||||||
|
end
|
||||||
|
|
||||||
|
ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyArray
|
||||||
|
TruffleRubyArray
|
||||||
|
|
||||||
|
else
|
||||||
|
warn 'Possibly unsupported Ruby implementation'
|
||||||
|
::Array
|
||||||
|
end
|
||||||
|
private_constant :ArrayImplementation
|
||||||
|
|
||||||
|
# @!macro concurrent_array
|
||||||
|
class Array < ArrayImplementation
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
@ -0,0 +1,449 @@
|
|||||||
|
require 'concurrent/configuration'
|
||||||
|
require 'concurrent/ivar'
|
||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# A mixin module that provides simple asynchronous behavior to a class,
|
||||||
|
# turning it into a simple actor. Loosely based on Erlang's
|
||||||
|
# [gen_server](http://www.erlang.org/doc/man/gen_server.html), but without
|
||||||
|
# supervision or linking.
|
||||||
|
#
|
||||||
|
# A more feature-rich {Concurrent::Actor} is also available when the
|
||||||
|
# capabilities of `Async` are too limited.
|
||||||
|
#
|
||||||
|
# ```cucumber
|
||||||
|
# Feature:
|
||||||
|
# As a stateful, plain old Ruby class
|
||||||
|
# I want safe, asynchronous behavior
|
||||||
|
# So my long-running methods don't block the main thread
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# The `Async` module is a way to mix simple yet powerful asynchronous
|
||||||
|
# capabilities into any plain old Ruby object or class, turning each object
|
||||||
|
# into a simple Actor. Method calls are processed on a background thread. The
|
||||||
|
# caller is free to perform other actions while processing occurs in the
|
||||||
|
# background.
|
||||||
|
#
|
||||||
|
# Method calls to the asynchronous object are made via two proxy methods:
|
||||||
|
# `async` (alias `cast`) and `await` (alias `call`). These proxy methods post
|
||||||
|
# the method call to the object's background thread and return a "future"
|
||||||
|
# which will eventually contain the result of the method call.
|
||||||
|
#
|
||||||
|
# This behavior is loosely patterned after Erlang's `gen_server` behavior.
|
||||||
|
# When an Erlang module implements the `gen_server` behavior it becomes
|
||||||
|
# inherently asynchronous. The `start` or `start_link` function spawns a
|
||||||
|
# process (similar to a thread but much more lightweight and efficient) and
|
||||||
|
# returns the ID of the process. Using the process ID, other processes can
|
||||||
|
# send messages to the `gen_server` via the `cast` and `call` methods. Unlike
|
||||||
|
# Erlang's `gen_server`, however, `Async` classes do not support linking or
|
||||||
|
# supervision trees.
|
||||||
|
#
|
||||||
|
# ## Basic Usage
|
||||||
|
#
|
||||||
|
# When this module is mixed into a class, objects of the class become inherently
|
||||||
|
# asynchronous. Each object gets its own background thread on which to post
|
||||||
|
# asynchronous method calls. Asynchronous method calls are executed in the
|
||||||
|
# background one at a time in the order they are received.
|
||||||
|
#
|
||||||
|
# To create an asynchronous class, simply mix in the `Concurrent::Async` module:
|
||||||
|
#
|
||||||
|
# ```
|
||||||
|
# class Hello
|
||||||
|
# include Concurrent::Async
|
||||||
|
#
|
||||||
|
# def hello(name)
|
||||||
|
# "Hello, #{name}!"
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# Mixing this module into a class provides each object two proxy methods:
|
||||||
|
# `async` and `await`. These methods are thread safe with respect to the
|
||||||
|
# enclosing object. The former proxy allows methods to be called
|
||||||
|
# asynchronously by posting to the object's internal thread. The latter proxy
|
||||||
|
# allows a method to be called synchronously but does so safely with respect
|
||||||
|
# to any pending asynchronous method calls and ensures proper ordering. Both
|
||||||
|
# methods return a {Concurrent::IVar} which can be inspected for the result
|
||||||
|
# of the proxied method call. Calling a method with `async` will return a
|
||||||
|
# `:pending` `IVar` whereas `await` will return a `:complete` `IVar`.
|
||||||
|
#
|
||||||
|
# ```
|
||||||
|
# class Echo
|
||||||
|
# include Concurrent::Async
|
||||||
|
#
|
||||||
|
# def echo(msg)
|
||||||
|
# print "#{msg}\n"
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# horn = Echo.new
|
||||||
|
# horn.echo('zero') # synchronous, not thread-safe
|
||||||
|
# # returns the actual return value of the method
|
||||||
|
#
|
||||||
|
# horn.async.echo('one') # asynchronous, non-blocking, thread-safe
|
||||||
|
# # returns an IVar in the :pending state
|
||||||
|
#
|
||||||
|
# horn.await.echo('two') # synchronous, blocking, thread-safe
|
||||||
|
# # returns an IVar in the :complete state
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# ## Let It Fail
|
||||||
|
#
|
||||||
|
# The `async` and `await` proxy methods have built-in error protection based
|
||||||
|
# on Erlang's famous "let it fail" philosophy. Instance methods should not be
|
||||||
|
# programmed defensively. When an exception is raised by a delegated method
|
||||||
|
# the proxy will rescue the exception, expose it to the caller as the `reason`
|
||||||
|
# attribute of the returned future, then process the next method call.
|
||||||
|
#
|
||||||
|
# ## Calling Methods Internally
|
||||||
|
#
|
||||||
|
# External method calls should *always* use the `async` and `await` proxy
|
||||||
|
# methods. When one method calls another method, the `async` proxy should
|
||||||
|
# rarely be used and the `await` proxy should *never* be used.
|
||||||
|
#
|
||||||
|
# When an object calls one of its own methods using the `await` proxy the
|
||||||
|
# second call will be enqueued *behind* the currently running method call.
|
||||||
|
# Any attempt to wait on the result will fail as the second call will never
|
||||||
|
# run until after the current call completes.
|
||||||
|
#
|
||||||
|
# Calling a method using the `await` proxy from within a method that was
|
||||||
|
# itself called using `async` or `await` will irreversibly deadlock the
|
||||||
|
# object. Do *not* do this, ever.
|
||||||
|
#
|
||||||
|
# ## Instance Variables and Attribute Accessors
|
||||||
|
#
|
||||||
|
# Instance variables do not need to be thread-safe so long as they are private.
|
||||||
|
# Asynchronous method calls are processed in the order they are received and
|
||||||
|
# are processed one at a time. Therefore private instance variables can only
|
||||||
|
# be accessed by one thread at a time. This is inherently thread-safe.
|
||||||
|
#
|
||||||
|
# When using private instance variables within asynchronous methods, the best
|
||||||
|
# practice is to read the instance variable into a local variable at the start
|
||||||
|
# of the method then update the instance variable at the *end* of the method.
|
||||||
|
# This way, should an exception be raised during method execution the internal
|
||||||
|
# state of the object will not have been changed.
|
||||||
|
#
|
||||||
|
# ### Reader Attributes
|
||||||
|
#
|
||||||
|
# The use of `attr_reader` is discouraged. Internal state exposed externally,
|
||||||
|
# when necessary, should be done through accessor methods. The instance
|
||||||
|
# variables exposed by these methods *must* be thread-safe, or they must be
|
||||||
|
# called using the `async` and `await` proxy methods. These two approaches are
|
||||||
|
# subtly different.
|
||||||
|
#
|
||||||
|
# When internal state is accessed via the `async` and `await` proxy methods,
|
||||||
|
# the returned value represents the object's state *at the time the call is
|
||||||
|
# processed*, which may *not* be the state of the object at the time the call
|
||||||
|
# is made.
|
||||||
|
#
|
||||||
|
# To get the state *at the current* time, irrespective of an enqueued method
|
||||||
|
# calls, a reader method must be called directly. This is inherently unsafe
|
||||||
|
# unless the instance variable is itself thread-safe, preferably using one
|
||||||
|
# of the thread-safe classes within this library. Because the thread-safe
|
||||||
|
# classes within this library are internally-locking or non-locking, they can
|
||||||
|
# be safely used from within asynchronous methods without causing deadlocks.
|
||||||
|
#
|
||||||
|
# Generally speaking, the best practice is to *not* expose internal state via
|
||||||
|
# reader methods. The best practice is to simply use the method's return value.
|
||||||
|
#
|
||||||
|
# ### Writer Attributes
|
||||||
|
#
|
||||||
|
# Writer attributes should never be used with asynchronous classes. Changing
|
||||||
|
# the state externally, even when done in the thread-safe way, is not logically
|
||||||
|
# consistent. Changes to state need to be timed with respect to all asynchronous
|
||||||
|
# method calls which my be in-process or enqueued. The only safe practice is to
|
||||||
|
# pass all necessary data to each method as arguments and let the method update
|
||||||
|
# the internal state as necessary.
|
||||||
|
#
|
||||||
|
# ## Class Constants, Variables, and Methods
|
||||||
|
#
|
||||||
|
# ### Class Constants
|
||||||
|
#
|
||||||
|
# Class constants do not need to be thread-safe. Since they are read-only and
|
||||||
|
# immutable they may be safely read both externally and from within
|
||||||
|
# asynchronous methods.
|
||||||
|
#
|
||||||
|
# ### Class Variables
|
||||||
|
#
|
||||||
|
# Class variables should be avoided. Class variables represent shared state.
|
||||||
|
# Shared state is anathema to concurrency. Should there be a need to share
|
||||||
|
# state using class variables they *must* be thread-safe, preferably
|
||||||
|
# using the thread-safe classes within this library. When updating class
|
||||||
|
# variables, never assign a new value/object to the variable itself. Assignment
|
||||||
|
# is not thread-safe in Ruby. Instead, use the thread-safe update functions
|
||||||
|
# of the variable itself to change the value.
|
||||||
|
#
|
||||||
|
# The best practice is to *never* use class variables with `Async` classes.
|
||||||
|
#
|
||||||
|
# ### Class Methods
|
||||||
|
#
|
||||||
|
# Class methods which are pure functions are safe. Class methods which modify
|
||||||
|
# class variables should be avoided, for all the reasons listed above.
|
||||||
|
#
|
||||||
|
# ## An Important Note About Thread Safe Guarantees
|
||||||
|
#
|
||||||
|
# > Thread safe guarantees can only be made when asynchronous method calls
|
||||||
|
# > are not mixed with direct method calls. Use only direct method calls
|
||||||
|
# > when the object is used exclusively on a single thread. Use only
|
||||||
|
# > `async` and `await` when the object is shared between threads. Once you
|
||||||
|
# > call a method using `async` or `await`, you should no longer call methods
|
||||||
|
# > directly on the object. Use `async` and `await` exclusively from then on.
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
#
|
||||||
|
# class Echo
|
||||||
|
# include Concurrent::Async
|
||||||
|
#
|
||||||
|
# def echo(msg)
|
||||||
|
# print "#{msg}\n"
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# horn = Echo.new
|
||||||
|
# horn.echo('zero') # synchronous, not thread-safe
|
||||||
|
# # returns the actual return value of the method
|
||||||
|
#
|
||||||
|
# horn.async.echo('one') # asynchronous, non-blocking, thread-safe
|
||||||
|
# # returns an IVar in the :pending state
|
||||||
|
#
|
||||||
|
# horn.await.echo('two') # synchronous, blocking, thread-safe
|
||||||
|
# # returns an IVar in the :complete state
|
||||||
|
#
|
||||||
|
# @see Concurrent::Actor
|
||||||
|
# @see https://en.wikipedia.org/wiki/Actor_model "Actor Model" at Wikipedia
|
||||||
|
# @see http://www.erlang.org/doc/man/gen_server.html Erlang gen_server
|
||||||
|
# @see http://c2.com/cgi/wiki?LetItCrash "Let It Crash" at http://c2.com/
|
||||||
|
module Async
|
||||||
|
|
||||||
|
# @!method self.new(*args, &block)
|
||||||
|
#
|
||||||
|
# Instanciate a new object and ensure proper initialization of the
|
||||||
|
# synchronization mechanisms.
|
||||||
|
#
|
||||||
|
# @param [Array<Object>] args Zero or more arguments to be passed to the
|
||||||
|
# object's initializer.
|
||||||
|
# @param [Proc] block Optional block to pass to the object's initializer.
|
||||||
|
# @return [Object] A properly initialized object of the asynchronous class.
|
||||||
|
|
||||||
|
# Check for the presence of a method on an object and determine if a given
|
||||||
|
# set of arguments matches the required arity.
|
||||||
|
#
|
||||||
|
# @param [Object] obj the object to check against
|
||||||
|
# @param [Symbol] method the method to check the object for
|
||||||
|
# @param [Array] args zero or more arguments for the arity check
|
||||||
|
#
|
||||||
|
# @raise [NameError] the object does not respond to `method` method
|
||||||
|
# @raise [ArgumentError] the given `args` do not match the arity of `method`
|
||||||
|
#
|
||||||
|
# @note This check is imperfect because of the way Ruby reports the arity of
|
||||||
|
# methods with a variable number of arguments. It is possible to determine
|
||||||
|
# if too few arguments are given but impossible to determine if too many
|
||||||
|
# arguments are given. This check may also fail to recognize dynamic behavior
|
||||||
|
# of the object, such as methods simulated with `method_missing`.
|
||||||
|
#
|
||||||
|
# @see http://www.ruby-doc.org/core-2.1.1/Method.html#method-i-arity Method#arity
|
||||||
|
# @see http://ruby-doc.org/core-2.1.0/Object.html#method-i-respond_to-3F Object#respond_to?
|
||||||
|
# @see http://www.ruby-doc.org/core-2.1.0/BasicObject.html#method-i-method_missing BasicObject#method_missing
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def self.validate_argc(obj, method, *args)
|
||||||
|
argc = args.length
|
||||||
|
arity = obj.method(method).arity
|
||||||
|
|
||||||
|
if arity >= 0 && argc != arity
|
||||||
|
raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity})")
|
||||||
|
elsif arity < 0 && (arity = (arity + 1).abs) > argc
|
||||||
|
raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity}..*)")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def self.included(base)
|
||||||
|
base.singleton_class.send(:alias_method, :original_new, :new)
|
||||||
|
base.extend(ClassMethods)
|
||||||
|
super(base)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
module ClassMethods
|
||||||
|
def new(*args, &block)
|
||||||
|
obj = original_new(*args, &block)
|
||||||
|
obj.send(:init_synchronization)
|
||||||
|
obj
|
||||||
|
end
|
||||||
|
ruby2_keywords :new if respond_to?(:ruby2_keywords, true)
|
||||||
|
end
|
||||||
|
private_constant :ClassMethods
|
||||||
|
|
||||||
|
# Delegates asynchronous, thread-safe method calls to the wrapped object.
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
class AsyncDelegator < Synchronization::LockableObject
|
||||||
|
safe_initialization!
|
||||||
|
|
||||||
|
# Create a new delegator object wrapping the given delegate.
|
||||||
|
#
|
||||||
|
# @param [Object] delegate the object to wrap and delegate method calls to
|
||||||
|
def initialize(delegate)
|
||||||
|
super()
|
||||||
|
@delegate = delegate
|
||||||
|
@queue = []
|
||||||
|
@executor = Concurrent.global_io_executor
|
||||||
|
@ruby_pid = $$
|
||||||
|
end
|
||||||
|
|
||||||
|
# Delegates method calls to the wrapped object.
|
||||||
|
#
|
||||||
|
# @param [Symbol] method the method being called
|
||||||
|
# @param [Array] args zero or more arguments to the method
|
||||||
|
#
|
||||||
|
# @return [IVar] the result of the method call
|
||||||
|
#
|
||||||
|
# @raise [NameError] the object does not respond to `method` method
|
||||||
|
# @raise [ArgumentError] the given `args` do not match the arity of `method`
|
||||||
|
def method_missing(method, *args, &block)
|
||||||
|
super unless @delegate.respond_to?(method)
|
||||||
|
Async::validate_argc(@delegate, method, *args)
|
||||||
|
|
||||||
|
ivar = Concurrent::IVar.new
|
||||||
|
synchronize do
|
||||||
|
reset_if_forked
|
||||||
|
@queue.push [ivar, method, args, block]
|
||||||
|
@executor.post { perform } if @queue.length == 1
|
||||||
|
end
|
||||||
|
|
||||||
|
ivar
|
||||||
|
end
|
||||||
|
|
||||||
|
# Check whether the method is responsive
|
||||||
|
#
|
||||||
|
# @param [Symbol] method the method being called
|
||||||
|
def respond_to_missing?(method, include_private = false)
|
||||||
|
@delegate.respond_to?(method) || super
|
||||||
|
end
|
||||||
|
|
||||||
|
# Perform all enqueued tasks.
|
||||||
|
#
|
||||||
|
# This method must be called from within the executor. It must not be
|
||||||
|
# called while already running. It will loop until the queue is empty.
|
||||||
|
def perform
|
||||||
|
loop do
|
||||||
|
ivar, method, args, block = synchronize { @queue.first }
|
||||||
|
break unless ivar # queue is empty
|
||||||
|
|
||||||
|
begin
|
||||||
|
ivar.set(@delegate.send(method, *args, &block))
|
||||||
|
rescue => error
|
||||||
|
ivar.fail(error)
|
||||||
|
end
|
||||||
|
|
||||||
|
synchronize do
|
||||||
|
@queue.shift
|
||||||
|
return if @queue.empty?
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def reset_if_forked
|
||||||
|
if $$ != @ruby_pid
|
||||||
|
@queue.clear
|
||||||
|
@ruby_pid = $$
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
private_constant :AsyncDelegator
|
||||||
|
|
||||||
|
# Delegates synchronous, thread-safe method calls to the wrapped object.
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
class AwaitDelegator
|
||||||
|
|
||||||
|
# Create a new delegator object wrapping the given delegate.
|
||||||
|
#
|
||||||
|
# @param [AsyncDelegator] delegate the object to wrap and delegate method calls to
|
||||||
|
def initialize(delegate)
|
||||||
|
@delegate = delegate
|
||||||
|
end
|
||||||
|
|
||||||
|
# Delegates method calls to the wrapped object.
|
||||||
|
#
|
||||||
|
# @param [Symbol] method the method being called
|
||||||
|
# @param [Array] args zero or more arguments to the method
|
||||||
|
#
|
||||||
|
# @return [IVar] the result of the method call
|
||||||
|
#
|
||||||
|
# @raise [NameError] the object does not respond to `method` method
|
||||||
|
# @raise [ArgumentError] the given `args` do not match the arity of `method`
|
||||||
|
def method_missing(method, *args, &block)
|
||||||
|
ivar = @delegate.send(method, *args, &block)
|
||||||
|
ivar.wait
|
||||||
|
ivar
|
||||||
|
end
|
||||||
|
|
||||||
|
# Check whether the method is responsive
|
||||||
|
#
|
||||||
|
# @param [Symbol] method the method being called
|
||||||
|
def respond_to_missing?(method, include_private = false)
|
||||||
|
@delegate.respond_to?(method) || super
|
||||||
|
end
|
||||||
|
end
|
||||||
|
private_constant :AwaitDelegator
|
||||||
|
|
||||||
|
# Causes the chained method call to be performed asynchronously on the
|
||||||
|
# object's thread. The delegated method will return a future in the
|
||||||
|
# `:pending` state and the method call will have been scheduled on the
|
||||||
|
# object's thread. The final disposition of the method call can be obtained
|
||||||
|
# by inspecting the returned future.
|
||||||
|
#
|
||||||
|
# @!macro async_thread_safety_warning
|
||||||
|
# @note The method call is guaranteed to be thread safe with respect to
|
||||||
|
# all other method calls against the same object that are called with
|
||||||
|
# either `async` or `await`. The mutable nature of Ruby references
|
||||||
|
# (and object orientation in general) prevent any other thread safety
|
||||||
|
# guarantees. Do NOT mix direct method calls with delegated method calls.
|
||||||
|
# Use *only* delegated method calls when sharing the object between threads.
|
||||||
|
#
|
||||||
|
# @return [Concurrent::IVar] the pending result of the asynchronous operation
|
||||||
|
#
|
||||||
|
# @raise [NameError] the object does not respond to the requested method
|
||||||
|
# @raise [ArgumentError] the given `args` do not match the arity of
|
||||||
|
# the requested method
|
||||||
|
def async
|
||||||
|
@__async_delegator__
|
||||||
|
end
|
||||||
|
alias_method :cast, :async
|
||||||
|
|
||||||
|
# Causes the chained method call to be performed synchronously on the
|
||||||
|
# current thread. The delegated will return a future in either the
|
||||||
|
# `:fulfilled` or `:rejected` state and the delegated method will have
|
||||||
|
# completed. The final disposition of the delegated method can be obtained
|
||||||
|
# by inspecting the returned future.
|
||||||
|
#
|
||||||
|
# @!macro async_thread_safety_warning
|
||||||
|
#
|
||||||
|
# @return [Concurrent::IVar] the completed result of the synchronous operation
|
||||||
|
#
|
||||||
|
# @raise [NameError] the object does not respond to the requested method
|
||||||
|
# @raise [ArgumentError] the given `args` do not match the arity of the
|
||||||
|
# requested method
|
||||||
|
def await
|
||||||
|
@__await_delegator__
|
||||||
|
end
|
||||||
|
alias_method :call, :await
|
||||||
|
|
||||||
|
# Initialize the internal serializer and other stnchronization mechanisms.
|
||||||
|
#
|
||||||
|
# @note This method *must* be called immediately upon object construction.
|
||||||
|
# This is the only way thread-safe initialization can be guaranteed.
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def init_synchronization
|
||||||
|
return self if defined?(@__async_initialized__) && @__async_initialized__
|
||||||
|
@__async_initialized__ = true
|
||||||
|
@__async_delegator__ = AsyncDelegator.new(self)
|
||||||
|
@__await_delegator__ = AwaitDelegator.new(@__async_delegator__)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,222 @@
|
|||||||
|
require 'concurrent/atomic/atomic_reference'
|
||||||
|
require 'concurrent/collection/copy_on_notify_observer_set'
|
||||||
|
require 'concurrent/concern/observable'
|
||||||
|
require 'concurrent/synchronization/object'
|
||||||
|
|
||||||
|
# @!macro thread_safe_variable_comparison
|
||||||
|
#
|
||||||
|
# ## Thread-safe Variable Classes
|
||||||
|
#
|
||||||
|
# Each of the thread-safe variable classes is designed to solve a different
|
||||||
|
# problem. In general:
|
||||||
|
#
|
||||||
|
# * *{Concurrent::Agent}:* Shared, mutable variable providing independent,
|
||||||
|
# uncoordinated, *asynchronous* change of individual values. Best used when
|
||||||
|
# the value will undergo frequent, complex updates. Suitable when the result
|
||||||
|
# of an update does not need to be known immediately.
|
||||||
|
# * *{Concurrent::Atom}:* Shared, mutable variable providing independent,
|
||||||
|
# uncoordinated, *synchronous* change of individual values. Best used when
|
||||||
|
# the value will undergo frequent reads but only occasional, though complex,
|
||||||
|
# updates. Suitable when the result of an update must be known immediately.
|
||||||
|
# * *{Concurrent::AtomicReference}:* A simple object reference that can be updated
|
||||||
|
# atomically. Updates are synchronous but fast. Best used when updates a
|
||||||
|
# simple set operations. Not suitable when updates are complex.
|
||||||
|
# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar
|
||||||
|
# but optimized for the given data type.
|
||||||
|
# * *{Concurrent::Exchanger}:* Shared, stateless synchronization point. Used
|
||||||
|
# when two or more threads need to exchange data. The threads will pair then
|
||||||
|
# block on each other until the exchange is complete.
|
||||||
|
# * *{Concurrent::MVar}:* Shared synchronization point. Used when one thread
|
||||||
|
# must give a value to another, which must take the value. The threads will
|
||||||
|
# block on each other until the exchange is complete.
|
||||||
|
# * *{Concurrent::ThreadLocalVar}:* Shared, mutable, isolated variable which
|
||||||
|
# holds a different value for each thread which has access. Often used as
|
||||||
|
# an instance variable in objects which must maintain different state
|
||||||
|
# for different threads.
|
||||||
|
# * *{Concurrent::TVar}:* Shared, mutable variables which provide
|
||||||
|
# *coordinated*, *synchronous*, change of *many* stated. Used when multiple
|
||||||
|
# value must change together, in an all-or-nothing transaction.
|
||||||
|
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# Atoms provide a way to manage shared, synchronous, independent state.
|
||||||
|
#
|
||||||
|
# An atom is initialized with an initial value and an optional validation
|
||||||
|
# proc. At any time the value of the atom can be synchronously and safely
|
||||||
|
# changed. If a validator is given at construction then any new value
|
||||||
|
# will be checked against the validator and will be rejected if the
|
||||||
|
# validator returns false or raises an exception.
|
||||||
|
#
|
||||||
|
# There are two ways to change the value of an atom: {#compare_and_set} and
|
||||||
|
# {#swap}. The former will set the new value if and only if it validates and
|
||||||
|
# the current value matches the new value. The latter will atomically set the
|
||||||
|
# new value to the result of running the given block if and only if that
|
||||||
|
# value validates.
|
||||||
|
#
|
||||||
|
# ## Example
|
||||||
|
#
|
||||||
|
# ```
|
||||||
|
# def next_fibonacci(set = nil)
|
||||||
|
# return [0, 1] if set.nil?
|
||||||
|
# set + [set[-2..-1].reduce{|sum,x| sum + x }]
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# # create an atom with an initial value
|
||||||
|
# atom = Concurrent::Atom.new(next_fibonacci)
|
||||||
|
#
|
||||||
|
# # send a few update requests
|
||||||
|
# 5.times do
|
||||||
|
# atom.swap{|set| next_fibonacci(set) }
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# # get the current value
|
||||||
|
# atom.value #=> [0, 1, 1, 2, 3, 5, 8]
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# ## Observation
|
||||||
|
#
|
||||||
|
# Atoms support observers through the {Concurrent::Observable} mixin module.
|
||||||
|
# Notification of observers occurs every time the value of the Atom changes.
|
||||||
|
# When notified the observer will receive three arguments: `time`, `old_value`,
|
||||||
|
# and `new_value`. The `time` argument is the time at which the value change
|
||||||
|
# occurred. The `old_value` is the value of the Atom when the change began
|
||||||
|
# The `new_value` is the value to which the Atom was set when the change
|
||||||
|
# completed. Note that `old_value` and `new_value` may be the same. This is
|
||||||
|
# not an error. It simply means that the change operation returned the same
|
||||||
|
# value.
|
||||||
|
#
|
||||||
|
# Unlike in Clojure, `Atom` cannot participate in {Concurrent::TVar} transactions.
|
||||||
|
#
|
||||||
|
# @!macro thread_safe_variable_comparison
|
||||||
|
#
|
||||||
|
# @see http://clojure.org/atoms Clojure Atoms
|
||||||
|
# @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State
|
||||||
|
class Atom < Synchronization::Object
|
||||||
|
include Concern::Observable
|
||||||
|
|
||||||
|
safe_initialization!
|
||||||
|
attr_atomic(:value)
|
||||||
|
private :value=, :swap_value, :compare_and_set_value, :update_value
|
||||||
|
public :value
|
||||||
|
alias_method :deref, :value
|
||||||
|
|
||||||
|
# @!method value
|
||||||
|
# The current value of the atom.
|
||||||
|
#
|
||||||
|
# @return [Object] The current value.
|
||||||
|
|
||||||
|
# Create a new atom with the given initial value.
|
||||||
|
#
|
||||||
|
# @param [Object] value The initial value
|
||||||
|
# @param [Hash] opts The options used to configure the atom
|
||||||
|
# @option opts [Proc] :validator (nil) Optional proc used to validate new
|
||||||
|
# values. It must accept one and only one argument which will be the
|
||||||
|
# intended new value. The validator will return true if the new value
|
||||||
|
# is acceptable else return false (preferrably) or raise an exception.
|
||||||
|
#
|
||||||
|
# @!macro deref_options
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if the validator is not a `Proc` (when given)
|
||||||
|
def initialize(value, opts = {})
|
||||||
|
super()
|
||||||
|
@Validator = opts.fetch(:validator, -> v { true })
|
||||||
|
self.observers = Collection::CopyOnNotifyObserverSet.new
|
||||||
|
self.value = value
|
||||||
|
end
|
||||||
|
|
||||||
|
# Atomically swaps the value of atom using the given block. The current
|
||||||
|
# value will be passed to the block, as will any arguments passed as
|
||||||
|
# arguments to the function. The new value will be validated against the
|
||||||
|
# (optional) validator proc given at construction. If validation fails the
|
||||||
|
# value will not be changed.
|
||||||
|
#
|
||||||
|
# Internally, {#swap} reads the current value, applies the block to it, and
|
||||||
|
# attempts to compare-and-set it in. Since another thread may have changed
|
||||||
|
# the value in the intervening time, it may have to retry, and does so in a
|
||||||
|
# spin loop. The net effect is that the value will always be the result of
|
||||||
|
# the application of the supplied block to a current value, atomically.
|
||||||
|
# However, because the block might be called multiple times, it must be free
|
||||||
|
# of side effects.
|
||||||
|
#
|
||||||
|
# @note The given block may be called multiple times, and thus should be free
|
||||||
|
# of side effects.
|
||||||
|
#
|
||||||
|
# @param [Object] args Zero or more arguments passed to the block.
|
||||||
|
#
|
||||||
|
# @yield [value, args] Calculates a new value for the atom based on the
|
||||||
|
# current value and any supplied arguments.
|
||||||
|
# @yieldparam value [Object] The current value of the atom.
|
||||||
|
# @yieldparam args [Object] All arguments passed to the function, in order.
|
||||||
|
# @yieldreturn [Object] The intended new value of the atom.
|
||||||
|
#
|
||||||
|
# @return [Object] The final value of the atom after all operations and
|
||||||
|
# validations are complete.
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] When no block is given.
|
||||||
|
def swap(*args)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
|
||||||
|
loop do
|
||||||
|
old_value = value
|
||||||
|
new_value = yield(old_value, *args)
|
||||||
|
begin
|
||||||
|
break old_value unless valid?(new_value)
|
||||||
|
break new_value if compare_and_set(old_value, new_value)
|
||||||
|
rescue
|
||||||
|
break old_value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Atomically sets the value of atom to the new value if and only if the
|
||||||
|
# current value of the atom is identical to the old value and the new
|
||||||
|
# value successfully validates against the (optional) validator given
|
||||||
|
# at construction.
|
||||||
|
#
|
||||||
|
# @param [Object] old_value The expected current value.
|
||||||
|
# @param [Object] new_value The intended new value.
|
||||||
|
#
|
||||||
|
# @return [Boolean] True if the value is changed else false.
|
||||||
|
def compare_and_set(old_value, new_value)
|
||||||
|
if valid?(new_value) && compare_and_set_value(old_value, new_value)
|
||||||
|
observers.notify_observers(Time.now, old_value, new_value)
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Atomically sets the value of atom to the new value without regard for the
|
||||||
|
# current value so long as the new value successfully validates against the
|
||||||
|
# (optional) validator given at construction.
|
||||||
|
#
|
||||||
|
# @param [Object] new_value The intended new value.
|
||||||
|
#
|
||||||
|
# @return [Object] The final value of the atom after all operations and
|
||||||
|
# validations are complete.
|
||||||
|
def reset(new_value)
|
||||||
|
old_value = value
|
||||||
|
if valid?(new_value)
|
||||||
|
self.value = new_value
|
||||||
|
observers.notify_observers(Time.now, old_value, new_value)
|
||||||
|
new_value
|
||||||
|
else
|
||||||
|
old_value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# Is the new value valid?
|
||||||
|
#
|
||||||
|
# @param [Object] new_value The intended new value.
|
||||||
|
# @return [Boolean] false if the validator function returns false or raises
|
||||||
|
# an exception else true
|
||||||
|
def valid?(new_value)
|
||||||
|
@Validator.call(new_value)
|
||||||
|
rescue
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,127 @@
|
|||||||
|
require 'concurrent/utility/native_extension_loader' # load native parts first
|
||||||
|
|
||||||
|
require 'concurrent/atomic/mutex_atomic_boolean'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_initialize
|
||||||
|
#
|
||||||
|
# Creates a new `AtomicBoolean` with the given initial value.
|
||||||
|
#
|
||||||
|
# @param [Boolean] initial the initial value
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_value_get
|
||||||
|
#
|
||||||
|
# Retrieves the current `Boolean` value.
|
||||||
|
#
|
||||||
|
# @return [Boolean] the current value
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_value_set
|
||||||
|
#
|
||||||
|
# Explicitly sets the value.
|
||||||
|
#
|
||||||
|
# @param [Boolean] value the new value to be set
|
||||||
|
#
|
||||||
|
# @return [Boolean] the current value
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_true_question
|
||||||
|
#
|
||||||
|
# Is the current value `true`
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the current value is `true`, else false
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_false_question
|
||||||
|
#
|
||||||
|
# Is the current value `false`
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the current value is `false`, else false
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_make_true
|
||||||
|
#
|
||||||
|
# Explicitly sets the value to true.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if value has changed, otherwise false
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_make_false
|
||||||
|
#
|
||||||
|
# Explicitly sets the value to false.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if value has changed, otherwise false
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_public_api
|
||||||
|
#
|
||||||
|
# @!method initialize(initial = false)
|
||||||
|
# @!macro atomic_boolean_method_initialize
|
||||||
|
#
|
||||||
|
# @!method value
|
||||||
|
# @!macro atomic_boolean_method_value_get
|
||||||
|
#
|
||||||
|
# @!method value=(value)
|
||||||
|
# @!macro atomic_boolean_method_value_set
|
||||||
|
#
|
||||||
|
# @!method true?
|
||||||
|
# @!macro atomic_boolean_method_true_question
|
||||||
|
#
|
||||||
|
# @!method false?
|
||||||
|
# @!macro atomic_boolean_method_false_question
|
||||||
|
#
|
||||||
|
# @!method make_true
|
||||||
|
# @!macro atomic_boolean_method_make_true
|
||||||
|
#
|
||||||
|
# @!method make_false
|
||||||
|
# @!macro atomic_boolean_method_make_false
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
AtomicBooleanImplementation = case
|
||||||
|
when Concurrent.on_cruby? && Concurrent.c_extensions_loaded?
|
||||||
|
CAtomicBoolean
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
JavaAtomicBoolean
|
||||||
|
else
|
||||||
|
MutexAtomicBoolean
|
||||||
|
end
|
||||||
|
private_constant :AtomicBooleanImplementation
|
||||||
|
|
||||||
|
# @!macro atomic_boolean
|
||||||
|
#
|
||||||
|
# A boolean value that can be updated atomically. Reads and writes to an atomic
|
||||||
|
# boolean and thread-safe and guaranteed to succeed. Reads and writes may block
|
||||||
|
# briefly but no explicit locking is required.
|
||||||
|
#
|
||||||
|
# @!macro thread_safe_variable_comparison
|
||||||
|
#
|
||||||
|
# Performance:
|
||||||
|
#
|
||||||
|
# ```
|
||||||
|
# Testing with ruby 2.1.2
|
||||||
|
# Testing with Concurrent::MutexAtomicBoolean...
|
||||||
|
# 2.790000 0.000000 2.790000 ( 2.791454)
|
||||||
|
# Testing with Concurrent::CAtomicBoolean...
|
||||||
|
# 0.740000 0.000000 0.740000 ( 0.740206)
|
||||||
|
#
|
||||||
|
# Testing with jruby 1.9.3
|
||||||
|
# Testing with Concurrent::MutexAtomicBoolean...
|
||||||
|
# 5.240000 2.520000 7.760000 ( 3.683000)
|
||||||
|
# Testing with Concurrent::JavaAtomicBoolean...
|
||||||
|
# 3.340000 0.010000 3.350000 ( 0.855000)
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicBoolean.html java.util.concurrent.atomic.AtomicBoolean
|
||||||
|
#
|
||||||
|
# @!macro atomic_boolean_public_api
|
||||||
|
class AtomicBoolean < AtomicBooleanImplementation
|
||||||
|
# @return [String] Short string representation.
|
||||||
|
def to_s
|
||||||
|
format '%s value:%s>', super[0..-2], value
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :inspect, :to_s
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,144 @@
|
|||||||
|
require 'concurrent/utility/native_extension_loader' # load native parts first
|
||||||
|
|
||||||
|
require 'concurrent/atomic/mutex_atomic_fixnum'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_initialize
|
||||||
|
#
|
||||||
|
# Creates a new `AtomicFixnum` with the given initial value.
|
||||||
|
#
|
||||||
|
# @param [Fixnum] initial the initial value
|
||||||
|
# @raise [ArgumentError] if the initial value is not a `Fixnum`
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_value_get
|
||||||
|
#
|
||||||
|
# Retrieves the current `Fixnum` value.
|
||||||
|
#
|
||||||
|
# @return [Fixnum] the current value
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_value_set
|
||||||
|
#
|
||||||
|
# Explicitly sets the value.
|
||||||
|
#
|
||||||
|
# @param [Fixnum] value the new value to be set
|
||||||
|
#
|
||||||
|
# @return [Fixnum] the current value
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if the new value is not a `Fixnum`
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_increment
|
||||||
|
#
|
||||||
|
# Increases the current value by the given amount (defaults to 1).
|
||||||
|
#
|
||||||
|
# @param [Fixnum] delta the amount by which to increase the current value
|
||||||
|
#
|
||||||
|
# @return [Fixnum] the current value after incrementation
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_decrement
|
||||||
|
#
|
||||||
|
# Decreases the current value by the given amount (defaults to 1).
|
||||||
|
#
|
||||||
|
# @param [Fixnum] delta the amount by which to decrease the current value
|
||||||
|
#
|
||||||
|
# @return [Fixnum] the current value after decrementation
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_compare_and_set
|
||||||
|
#
|
||||||
|
# Atomically sets the value to the given updated value if the current
|
||||||
|
# value == the expected value.
|
||||||
|
#
|
||||||
|
# @param [Fixnum] expect the expected value
|
||||||
|
# @param [Fixnum] update the new value
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the value was updated else false
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_update
|
||||||
|
#
|
||||||
|
# Pass the current value to the given block, replacing it
|
||||||
|
# with the block's result. May retry if the value changes
|
||||||
|
# during the block's execution.
|
||||||
|
#
|
||||||
|
# @yield [Object] Calculate a new value for the atomic reference using
|
||||||
|
# given (old) value
|
||||||
|
# @yieldparam [Object] old_value the starting value of the atomic reference
|
||||||
|
#
|
||||||
|
# @return [Object] the new value
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_public_api
|
||||||
|
#
|
||||||
|
# @!method initialize(initial = 0)
|
||||||
|
# @!macro atomic_fixnum_method_initialize
|
||||||
|
#
|
||||||
|
# @!method value
|
||||||
|
# @!macro atomic_fixnum_method_value_get
|
||||||
|
#
|
||||||
|
# @!method value=(value)
|
||||||
|
# @!macro atomic_fixnum_method_value_set
|
||||||
|
#
|
||||||
|
# @!method increment(delta = 1)
|
||||||
|
# @!macro atomic_fixnum_method_increment
|
||||||
|
#
|
||||||
|
# @!method decrement(delta = 1)
|
||||||
|
# @!macro atomic_fixnum_method_decrement
|
||||||
|
#
|
||||||
|
# @!method compare_and_set(expect, update)
|
||||||
|
# @!macro atomic_fixnum_method_compare_and_set
|
||||||
|
#
|
||||||
|
# @!method update
|
||||||
|
# @!macro atomic_fixnum_method_update
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
AtomicFixnumImplementation = case
|
||||||
|
when Concurrent.on_cruby? && Concurrent.c_extensions_loaded?
|
||||||
|
CAtomicFixnum
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
JavaAtomicFixnum
|
||||||
|
else
|
||||||
|
MutexAtomicFixnum
|
||||||
|
end
|
||||||
|
private_constant :AtomicFixnumImplementation
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum
|
||||||
|
#
|
||||||
|
# A numeric value that can be updated atomically. Reads and writes to an atomic
|
||||||
|
# fixnum and thread-safe and guaranteed to succeed. Reads and writes may block
|
||||||
|
# briefly but no explicit locking is required.
|
||||||
|
#
|
||||||
|
# @!macro thread_safe_variable_comparison
|
||||||
|
#
|
||||||
|
# Performance:
|
||||||
|
#
|
||||||
|
# ```
|
||||||
|
# Testing with ruby 2.1.2
|
||||||
|
# Testing with Concurrent::MutexAtomicFixnum...
|
||||||
|
# 3.130000 0.000000 3.130000 ( 3.136505)
|
||||||
|
# Testing with Concurrent::CAtomicFixnum...
|
||||||
|
# 0.790000 0.000000 0.790000 ( 0.785550)
|
||||||
|
#
|
||||||
|
# Testing with jruby 1.9.3
|
||||||
|
# Testing with Concurrent::MutexAtomicFixnum...
|
||||||
|
# 5.460000 2.460000 7.920000 ( 3.715000)
|
||||||
|
# Testing with Concurrent::JavaAtomicFixnum...
|
||||||
|
# 4.520000 0.030000 4.550000 ( 1.187000)
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html java.util.concurrent.atomic.AtomicLong
|
||||||
|
#
|
||||||
|
# @!macro atomic_fixnum_public_api
|
||||||
|
class AtomicFixnum < AtomicFixnumImplementation
|
||||||
|
# @return [String] Short string representation.
|
||||||
|
def to_s
|
||||||
|
format '%s value:%s>', super[0..-2], value
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :inspect, :to_s
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,167 @@
|
|||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/synchronization/object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
# An atomic reference which maintains an object reference along with a mark bit
|
||||||
|
# that can be updated atomically.
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicMarkableReference.html
|
||||||
|
# java.util.concurrent.atomic.AtomicMarkableReference
|
||||||
|
class AtomicMarkableReference < ::Concurrent::Synchronization::Object
|
||||||
|
|
||||||
|
attr_atomic(:reference)
|
||||||
|
private :reference, :reference=, :swap_reference, :compare_and_set_reference, :update_reference
|
||||||
|
|
||||||
|
def initialize(value = nil, mark = false)
|
||||||
|
super()
|
||||||
|
self.reference = immutable_array(value, mark)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Atomically sets the value and mark to the given updated value and
|
||||||
|
# mark given both:
|
||||||
|
# - the current value == the expected value &&
|
||||||
|
# - the current mark == the expected mark
|
||||||
|
#
|
||||||
|
# @param [Object] expected_val the expected value
|
||||||
|
# @param [Object] new_val the new value
|
||||||
|
# @param [Boolean] expected_mark the expected mark
|
||||||
|
# @param [Boolean] new_mark the new mark
|
||||||
|
#
|
||||||
|
# @return [Boolean] `true` if successful. A `false` return indicates
|
||||||
|
# that the actual value was not equal to the expected value or the
|
||||||
|
# actual mark was not equal to the expected mark
|
||||||
|
def compare_and_set(expected_val, new_val, expected_mark, new_mark)
|
||||||
|
# Memoize a valid reference to the current AtomicReference for
|
||||||
|
# later comparison.
|
||||||
|
current = reference
|
||||||
|
curr_val, curr_mark = current
|
||||||
|
|
||||||
|
# Ensure that that the expected marks match.
|
||||||
|
return false unless expected_mark == curr_mark
|
||||||
|
|
||||||
|
if expected_val.is_a? Numeric
|
||||||
|
# If the object is a numeric, we need to ensure we are comparing
|
||||||
|
# the numerical values
|
||||||
|
return false unless expected_val == curr_val
|
||||||
|
else
|
||||||
|
# Otherwise, we need to ensure we are comparing the object identity.
|
||||||
|
# Theoretically, this could be incorrect if a user monkey-patched
|
||||||
|
# `Object#equal?`, but they should know that they are playing with
|
||||||
|
# fire at that point.
|
||||||
|
return false unless expected_val.equal? curr_val
|
||||||
|
end
|
||||||
|
|
||||||
|
prospect = immutable_array(new_val, new_mark)
|
||||||
|
|
||||||
|
compare_and_set_reference current, prospect
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :compare_and_swap, :compare_and_set
|
||||||
|
|
||||||
|
# Gets the current reference and marked values.
|
||||||
|
#
|
||||||
|
# @return [Array] the current reference and marked values
|
||||||
|
def get
|
||||||
|
reference
|
||||||
|
end
|
||||||
|
|
||||||
|
# Gets the current value of the reference
|
||||||
|
#
|
||||||
|
# @return [Object] the current value of the reference
|
||||||
|
def value
|
||||||
|
reference[0]
|
||||||
|
end
|
||||||
|
|
||||||
|
# Gets the current marked value
|
||||||
|
#
|
||||||
|
# @return [Boolean] the current marked value
|
||||||
|
def mark
|
||||||
|
reference[1]
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :marked?, :mark
|
||||||
|
|
||||||
|
# _Unconditionally_ sets to the given value of both the reference and
|
||||||
|
# the mark.
|
||||||
|
#
|
||||||
|
# @param [Object] new_val the new value
|
||||||
|
# @param [Boolean] new_mark the new mark
|
||||||
|
#
|
||||||
|
# @return [Array] both the new value and the new mark
|
||||||
|
def set(new_val, new_mark)
|
||||||
|
self.reference = immutable_array(new_val, new_mark)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Pass the current value and marked state to the given block, replacing it
|
||||||
|
# with the block's results. May retry if the value changes during the
|
||||||
|
# block's execution.
|
||||||
|
#
|
||||||
|
# @yield [Object] Calculate a new value and marked state for the atomic
|
||||||
|
# reference using given (old) value and (old) marked
|
||||||
|
# @yieldparam [Object] old_val the starting value of the atomic reference
|
||||||
|
# @yieldparam [Boolean] old_mark the starting state of marked
|
||||||
|
#
|
||||||
|
# @return [Array] the new value and new mark
|
||||||
|
def update
|
||||||
|
loop do
|
||||||
|
old_val, old_mark = reference
|
||||||
|
new_val, new_mark = yield old_val, old_mark
|
||||||
|
|
||||||
|
if compare_and_set old_val, new_val, old_mark, new_mark
|
||||||
|
return immutable_array(new_val, new_mark)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Pass the current value to the given block, replacing it
|
||||||
|
# with the block's result. Raise an exception if the update
|
||||||
|
# fails.
|
||||||
|
#
|
||||||
|
# @yield [Object] Calculate a new value and marked state for the atomic
|
||||||
|
# reference using given (old) value and (old) marked
|
||||||
|
# @yieldparam [Object] old_val the starting value of the atomic reference
|
||||||
|
# @yieldparam [Boolean] old_mark the starting state of marked
|
||||||
|
#
|
||||||
|
# @return [Array] the new value and marked state
|
||||||
|
#
|
||||||
|
# @raise [Concurrent::ConcurrentUpdateError] if the update fails
|
||||||
|
def try_update!
|
||||||
|
old_val, old_mark = reference
|
||||||
|
new_val, new_mark = yield old_val, old_mark
|
||||||
|
|
||||||
|
unless compare_and_set old_val, new_val, old_mark, new_mark
|
||||||
|
fail ::Concurrent::ConcurrentUpdateError,
|
||||||
|
'AtomicMarkableReference: Update failed due to race condition.',
|
||||||
|
'Note: If you would like to guarantee an update, please use ' +
|
||||||
|
'the `AtomicMarkableReference#update` method.'
|
||||||
|
end
|
||||||
|
|
||||||
|
immutable_array(new_val, new_mark)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Pass the current value to the given block, replacing it with the
|
||||||
|
# block's result. Simply return nil if update fails.
|
||||||
|
#
|
||||||
|
# @yield [Object] Calculate a new value and marked state for the atomic
|
||||||
|
# reference using given (old) value and (old) marked
|
||||||
|
# @yieldparam [Object] old_val the starting value of the atomic reference
|
||||||
|
# @yieldparam [Boolean] old_mark the starting state of marked
|
||||||
|
#
|
||||||
|
# @return [Array] the new value and marked state, or nil if
|
||||||
|
# the update failed
|
||||||
|
def try_update
|
||||||
|
old_val, old_mark = reference
|
||||||
|
new_val, new_mark = yield old_val, old_mark
|
||||||
|
|
||||||
|
return unless compare_and_set old_val, new_val, old_mark, new_mark
|
||||||
|
|
||||||
|
immutable_array(new_val, new_mark)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def immutable_array(*args)
|
||||||
|
args.freeze
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,135 @@
|
|||||||
|
require 'concurrent/utility/native_extension_loader' # load native parts first
|
||||||
|
|
||||||
|
require 'concurrent/atomic_reference/atomic_direct_update'
|
||||||
|
require 'concurrent/atomic_reference/numeric_cas_wrapper'
|
||||||
|
require 'concurrent/atomic_reference/mutex_atomic'
|
||||||
|
|
||||||
|
# Shim for TruffleRuby::AtomicReference
|
||||||
|
if Concurrent.on_truffleruby? && !defined?(TruffleRuby::AtomicReference)
|
||||||
|
# @!visibility private
|
||||||
|
module TruffleRuby
|
||||||
|
AtomicReference = Truffle::AtomicReference
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
AtomicReferenceImplementation = case
|
||||||
|
when Concurrent.on_cruby? && Concurrent.c_extensions_loaded?
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class CAtomicReference
|
||||||
|
include AtomicDirectUpdate
|
||||||
|
include AtomicNumericCompareAndSetWrapper
|
||||||
|
alias_method :compare_and_swap, :compare_and_set
|
||||||
|
end
|
||||||
|
CAtomicReference
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class JavaAtomicReference
|
||||||
|
include AtomicDirectUpdate
|
||||||
|
end
|
||||||
|
JavaAtomicReference
|
||||||
|
when Concurrent.on_truffleruby?
|
||||||
|
class TruffleRubyAtomicReference < TruffleRuby::AtomicReference
|
||||||
|
include AtomicDirectUpdate
|
||||||
|
alias_method :value, :get
|
||||||
|
alias_method :value=, :set
|
||||||
|
alias_method :compare_and_swap, :compare_and_set
|
||||||
|
alias_method :swap, :get_and_set
|
||||||
|
end
|
||||||
|
TruffleRubyAtomicReference
|
||||||
|
else
|
||||||
|
MutexAtomicReference
|
||||||
|
end
|
||||||
|
private_constant :AtomicReferenceImplementation
|
||||||
|
|
||||||
|
# An object reference that may be updated atomically. All read and write
|
||||||
|
# operations have java volatile semantic.
|
||||||
|
#
|
||||||
|
# @!macro thread_safe_variable_comparison
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html
|
||||||
|
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html
|
||||||
|
#
|
||||||
|
# @!method initialize(value = nil)
|
||||||
|
# @!macro atomic_reference_method_initialize
|
||||||
|
# @param [Object] value The initial value.
|
||||||
|
#
|
||||||
|
# @!method get
|
||||||
|
# @!macro atomic_reference_method_get
|
||||||
|
# Gets the current value.
|
||||||
|
# @return [Object] the current value
|
||||||
|
#
|
||||||
|
# @!method set(new_value)
|
||||||
|
# @!macro atomic_reference_method_set
|
||||||
|
# Sets to the given value.
|
||||||
|
# @param [Object] new_value the new value
|
||||||
|
# @return [Object] the new value
|
||||||
|
#
|
||||||
|
# @!method get_and_set(new_value)
|
||||||
|
# @!macro atomic_reference_method_get_and_set
|
||||||
|
# Atomically sets to the given value and returns the old value.
|
||||||
|
# @param [Object] new_value the new value
|
||||||
|
# @return [Object] the old value
|
||||||
|
#
|
||||||
|
# @!method compare_and_set(old_value, new_value)
|
||||||
|
# @!macro atomic_reference_method_compare_and_set
|
||||||
|
#
|
||||||
|
# Atomically sets the value to the given updated value if
|
||||||
|
# the current value == the expected value.
|
||||||
|
#
|
||||||
|
# @param [Object] old_value the expected value
|
||||||
|
# @param [Object] new_value the new value
|
||||||
|
#
|
||||||
|
# @return [Boolean] `true` if successful. A `false` return indicates
|
||||||
|
# that the actual value was not equal to the expected value.
|
||||||
|
#
|
||||||
|
# @!method update
|
||||||
|
# Pass the current value to the given block, replacing it
|
||||||
|
# with the block's result. May retry if the value changes
|
||||||
|
# during the block's execution.
|
||||||
|
#
|
||||||
|
# @yield [Object] Calculate a new value for the atomic reference using
|
||||||
|
# given (old) value
|
||||||
|
# @yieldparam [Object] old_value the starting value of the atomic reference
|
||||||
|
# @return [Object] the new value
|
||||||
|
#
|
||||||
|
# @!method try_update
|
||||||
|
# Pass the current value to the given block, replacing it
|
||||||
|
# with the block's result. Return nil if the update fails.
|
||||||
|
#
|
||||||
|
# @yield [Object] Calculate a new value for the atomic reference using
|
||||||
|
# given (old) value
|
||||||
|
# @yieldparam [Object] old_value the starting value of the atomic reference
|
||||||
|
# @note This method was altered to avoid raising an exception by default.
|
||||||
|
# Instead, this method now returns `nil` in case of failure. For more info,
|
||||||
|
# please see: https://github.com/ruby-concurrency/concurrent-ruby/pull/336
|
||||||
|
# @return [Object] the new value, or nil if update failed
|
||||||
|
#
|
||||||
|
# @!method try_update!
|
||||||
|
# Pass the current value to the given block, replacing it
|
||||||
|
# with the block's result. Raise an exception if the update
|
||||||
|
# fails.
|
||||||
|
#
|
||||||
|
# @yield [Object] Calculate a new value for the atomic reference using
|
||||||
|
# given (old) value
|
||||||
|
# @yieldparam [Object] old_value the starting value of the atomic reference
|
||||||
|
# @note This behavior mimics the behavior of the original
|
||||||
|
# `AtomicReference#try_update` API. The reason this was changed was to
|
||||||
|
# avoid raising exceptions (which are inherently slow) by default. For more
|
||||||
|
# info: https://github.com/ruby-concurrency/concurrent-ruby/pull/336
|
||||||
|
# @return [Object] the new value
|
||||||
|
# @raise [Concurrent::ConcurrentUpdateError] if the update fails
|
||||||
|
class AtomicReference < AtomicReferenceImplementation
|
||||||
|
|
||||||
|
# @return [String] Short string representation.
|
||||||
|
def to_s
|
||||||
|
format '%s value:%s>', super[0..-2], get
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :inspect, :to_s
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,100 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require 'concurrent/atomic/mutex_count_down_latch'
|
||||||
|
require 'concurrent/atomic/java_count_down_latch'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_initialize
|
||||||
|
#
|
||||||
|
# Create a new `CountDownLatch` with the initial `count`.
|
||||||
|
#
|
||||||
|
# @param [new] count the initial count
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `count` is not an integer or is less than zero
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_wait
|
||||||
|
#
|
||||||
|
# Block on the latch until the counter reaches zero or until `timeout` is reached.
|
||||||
|
#
|
||||||
|
# @param [Fixnum] timeout the number of seconds to wait for the counter or `nil`
|
||||||
|
# to block indefinitely
|
||||||
|
# @return [Boolean] `true` if the `count` reaches zero else false on `timeout`
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_count_down
|
||||||
|
#
|
||||||
|
# Signal the latch to decrement the counter. Will signal all blocked threads when
|
||||||
|
# the `count` reaches zero.
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_count
|
||||||
|
#
|
||||||
|
# The current value of the counter.
|
||||||
|
#
|
||||||
|
# @return [Fixnum] the current value of the counter
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro count_down_latch_public_api
|
||||||
|
#
|
||||||
|
# @!method initialize(count = 1)
|
||||||
|
# @!macro count_down_latch_method_initialize
|
||||||
|
#
|
||||||
|
# @!method wait(timeout = nil)
|
||||||
|
# @!macro count_down_latch_method_wait
|
||||||
|
#
|
||||||
|
# @!method count_down
|
||||||
|
# @!macro count_down_latch_method_count_down
|
||||||
|
#
|
||||||
|
# @!method count
|
||||||
|
# @!macro count_down_latch_method_count
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
CountDownLatchImplementation = case
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
JavaCountDownLatch
|
||||||
|
else
|
||||||
|
MutexCountDownLatch
|
||||||
|
end
|
||||||
|
private_constant :CountDownLatchImplementation
|
||||||
|
|
||||||
|
# @!macro count_down_latch
|
||||||
|
#
|
||||||
|
# A synchronization object that allows one thread to wait on multiple other threads.
|
||||||
|
# The thread that will wait creates a `CountDownLatch` and sets the initial value
|
||||||
|
# (normally equal to the number of other threads). The initiating thread passes the
|
||||||
|
# latch to the other threads then waits for the other threads by calling the `#wait`
|
||||||
|
# method. Each of the other threads calls `#count_down` when done with its work.
|
||||||
|
# When the latch counter reaches zero the waiting thread is unblocked and continues
|
||||||
|
# with its work. A `CountDownLatch` can be used only once. Its value cannot be reset.
|
||||||
|
#
|
||||||
|
# @!macro count_down_latch_public_api
|
||||||
|
# @example Waiter and Decrementer
|
||||||
|
# latch = Concurrent::CountDownLatch.new(3)
|
||||||
|
#
|
||||||
|
# waiter = Thread.new do
|
||||||
|
# latch.wait()
|
||||||
|
# puts ("Waiter released")
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# decrementer = Thread.new do
|
||||||
|
# sleep(1)
|
||||||
|
# latch.count_down
|
||||||
|
# puts latch.count
|
||||||
|
#
|
||||||
|
# sleep(1)
|
||||||
|
# latch.count_down
|
||||||
|
# puts latch.count
|
||||||
|
#
|
||||||
|
# sleep(1)
|
||||||
|
# latch.count_down
|
||||||
|
# puts latch.count
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# [waiter, decrementer].each(&:join)
|
||||||
|
class CountDownLatch < CountDownLatchImplementation
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,128 @@
|
|||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
require 'concurrent/utility/native_integer'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# A synchronization aid that allows a set of threads to all wait for each
|
||||||
|
# other to reach a common barrier point.
|
||||||
|
# @example
|
||||||
|
# barrier = Concurrent::CyclicBarrier.new(3)
|
||||||
|
# jobs = Array.new(3) { |i| -> { sleep i; p done: i } }
|
||||||
|
# process = -> (i) do
|
||||||
|
# # waiting to start at the same time
|
||||||
|
# barrier.wait
|
||||||
|
# # execute job
|
||||||
|
# jobs[i].call
|
||||||
|
# # wait for others to finish
|
||||||
|
# barrier.wait
|
||||||
|
# end
|
||||||
|
# threads = 2.times.map do |i|
|
||||||
|
# Thread.new(i, &process)
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# # use main as well
|
||||||
|
# process.call 2
|
||||||
|
#
|
||||||
|
# # here we can be sure that all jobs are processed
|
||||||
|
class CyclicBarrier < Synchronization::LockableObject
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
Generation = Struct.new(:status)
|
||||||
|
private_constant :Generation
|
||||||
|
|
||||||
|
# Create a new `CyclicBarrier` that waits for `parties` threads
|
||||||
|
#
|
||||||
|
# @param [Fixnum] parties the number of parties
|
||||||
|
# @yield an optional block that will be executed that will be executed after
|
||||||
|
# the last thread arrives and before the others are released
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `parties` is not an integer or is less than zero
|
||||||
|
def initialize(parties, &block)
|
||||||
|
Utility::NativeInteger.ensure_integer_and_bounds parties
|
||||||
|
Utility::NativeInteger.ensure_positive_and_no_zero parties
|
||||||
|
|
||||||
|
super(&nil)
|
||||||
|
synchronize { ns_initialize parties, &block }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @return [Fixnum] the number of threads needed to pass the barrier
|
||||||
|
def parties
|
||||||
|
synchronize { @parties }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @return [Fixnum] the number of threads currently waiting on the barrier
|
||||||
|
def number_waiting
|
||||||
|
synchronize { @number_waiting }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Blocks on the barrier until the number of waiting threads is equal to
|
||||||
|
# `parties` or until `timeout` is reached or `reset` is called
|
||||||
|
# If a block has been passed to the constructor, it will be executed once by
|
||||||
|
# the last arrived thread before releasing the others
|
||||||
|
# @param [Fixnum] timeout the number of seconds to wait for the counter or
|
||||||
|
# `nil` to block indefinitely
|
||||||
|
# @return [Boolean] `true` if the `count` reaches zero else false on
|
||||||
|
# `timeout` or on `reset` or if the barrier is broken
|
||||||
|
def wait(timeout = nil)
|
||||||
|
synchronize do
|
||||||
|
|
||||||
|
return false unless @generation.status == :waiting
|
||||||
|
|
||||||
|
@number_waiting += 1
|
||||||
|
|
||||||
|
if @number_waiting == @parties
|
||||||
|
@action.call if @action
|
||||||
|
ns_generation_done @generation, :fulfilled
|
||||||
|
true
|
||||||
|
else
|
||||||
|
generation = @generation
|
||||||
|
if ns_wait_until(timeout) { generation.status != :waiting }
|
||||||
|
generation.status == :fulfilled
|
||||||
|
else
|
||||||
|
ns_generation_done generation, :broken, false
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# resets the barrier to its initial state
|
||||||
|
# If there is at least one waiting thread, it will be woken up, the `wait`
|
||||||
|
# method will return false and the barrier will be broken
|
||||||
|
# If the barrier is broken, this method restores it to the original state
|
||||||
|
#
|
||||||
|
# @return [nil]
|
||||||
|
def reset
|
||||||
|
synchronize { ns_generation_done @generation, :reset }
|
||||||
|
end
|
||||||
|
|
||||||
|
# A barrier can be broken when:
|
||||||
|
# - a thread called the `reset` method while at least one other thread was waiting
|
||||||
|
# - at least one thread timed out on `wait` method
|
||||||
|
#
|
||||||
|
# A broken barrier can be restored using `reset` it's safer to create a new one
|
||||||
|
# @return [Boolean] true if the barrier is broken otherwise false
|
||||||
|
def broken?
|
||||||
|
synchronize { @generation.status != :waiting }
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
def ns_generation_done(generation, status, continue = true)
|
||||||
|
generation.status = status
|
||||||
|
ns_next_generation if continue
|
||||||
|
ns_broadcast
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_next_generation
|
||||||
|
@generation = Generation.new(:waiting)
|
||||||
|
@number_waiting = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_initialize(parties, &block)
|
||||||
|
@parties = parties
|
||||||
|
@action = block
|
||||||
|
ns_next_generation
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,109 @@
|
|||||||
|
require 'thread'
|
||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# Old school kernel-style event reminiscent of Win32 programming in C++.
|
||||||
|
#
|
||||||
|
# When an `Event` is created it is in the `unset` state. Threads can choose to
|
||||||
|
# `#wait` on the event, blocking until released by another thread. When one
|
||||||
|
# thread wants to alert all blocking threads it calls the `#set` method which
|
||||||
|
# will then wake up all listeners. Once an `Event` has been set it remains set.
|
||||||
|
# New threads calling `#wait` will return immediately. An `Event` may be
|
||||||
|
# `#reset` at any time once it has been set.
|
||||||
|
#
|
||||||
|
# @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682655.aspx
|
||||||
|
# @example
|
||||||
|
# event = Concurrent::Event.new
|
||||||
|
#
|
||||||
|
# t1 = Thread.new do
|
||||||
|
# puts "t1 is waiting"
|
||||||
|
# event.wait(1)
|
||||||
|
# puts "event occurred"
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# t2 = Thread.new do
|
||||||
|
# puts "t2 calling set"
|
||||||
|
# event.set
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# [t1, t2].each(&:join)
|
||||||
|
#
|
||||||
|
# # prints:
|
||||||
|
# # t1 is waiting
|
||||||
|
# # t2 calling set
|
||||||
|
# # event occurred
|
||||||
|
class Event < Synchronization::LockableObject
|
||||||
|
|
||||||
|
# Creates a new `Event` in the unset state. Threads calling `#wait` on the
|
||||||
|
# `Event` will block.
|
||||||
|
def initialize
|
||||||
|
super
|
||||||
|
synchronize { ns_initialize }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Is the object in the set state?
|
||||||
|
#
|
||||||
|
# @return [Boolean] indicating whether or not the `Event` has been set
|
||||||
|
def set?
|
||||||
|
synchronize { @set }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Trigger the event, setting the state to `set` and releasing all threads
|
||||||
|
# waiting on the event. Has no effect if the `Event` has already been set.
|
||||||
|
#
|
||||||
|
# @return [Boolean] should always return `true`
|
||||||
|
def set
|
||||||
|
synchronize { ns_set }
|
||||||
|
end
|
||||||
|
|
||||||
|
def try?
|
||||||
|
synchronize { @set ? false : ns_set }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Reset a previously set event back to the `unset` state.
|
||||||
|
# Has no effect if the `Event` has not yet been set.
|
||||||
|
#
|
||||||
|
# @return [Boolean] should always return `true`
|
||||||
|
def reset
|
||||||
|
synchronize do
|
||||||
|
if @set
|
||||||
|
@set = false
|
||||||
|
@iteration +=1
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Wait a given number of seconds for the `Event` to be set by another
|
||||||
|
# thread. Will wait forever when no `timeout` value is given. Returns
|
||||||
|
# immediately if the `Event` has already been set.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the `Event` was set before timeout else false
|
||||||
|
def wait(timeout = nil)
|
||||||
|
synchronize do
|
||||||
|
unless @set
|
||||||
|
iteration = @iteration
|
||||||
|
ns_wait_until(timeout) { iteration < @iteration || @set }
|
||||||
|
else
|
||||||
|
true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
def ns_set
|
||||||
|
unless @set
|
||||||
|
@set = true
|
||||||
|
ns_broadcast
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_initialize
|
||||||
|
@set = false
|
||||||
|
@iteration = 0
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,109 @@
|
|||||||
|
require 'concurrent/constants'
|
||||||
|
require_relative 'locals'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# A `FiberLocalVar` is a variable where the value is different for each fiber.
|
||||||
|
# Each variable may have a default value, but when you modify the variable only
|
||||||
|
# the current fiber will ever see that change.
|
||||||
|
#
|
||||||
|
# This is similar to Ruby's built-in fiber-local variables (`Thread.current[:name]`),
|
||||||
|
# but with these major advantages:
|
||||||
|
# * `FiberLocalVar` has its own identity, it doesn't need a Symbol.
|
||||||
|
# * Each Ruby's built-in fiber-local variable leaks some memory forever (it's a Symbol held forever on the fiber),
|
||||||
|
# so it's only OK to create a small amount of them.
|
||||||
|
# `FiberLocalVar` has no such issue and it is fine to create many of them.
|
||||||
|
# * Ruby's built-in fiber-local variables leak forever the value set on each fiber (unless set to nil explicitly).
|
||||||
|
# `FiberLocalVar` automatically removes the mapping for each fiber once the `FiberLocalVar` instance is GC'd.
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
# v = FiberLocalVar.new(14)
|
||||||
|
# v.value #=> 14
|
||||||
|
# v.value = 2
|
||||||
|
# v.value #=> 2
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
# v = FiberLocalVar.new(14)
|
||||||
|
#
|
||||||
|
# Fiber.new do
|
||||||
|
# v.value #=> 14
|
||||||
|
# v.value = 1
|
||||||
|
# v.value #=> 1
|
||||||
|
# end.resume
|
||||||
|
#
|
||||||
|
# Fiber.new do
|
||||||
|
# v.value #=> 14
|
||||||
|
# v.value = 2
|
||||||
|
# v.value #=> 2
|
||||||
|
# end.resume
|
||||||
|
#
|
||||||
|
# v.value #=> 14
|
||||||
|
class FiberLocalVar
|
||||||
|
LOCALS = FiberLocals.new
|
||||||
|
|
||||||
|
# Creates a fiber local variable.
|
||||||
|
#
|
||||||
|
# @param [Object] default the default value when otherwise unset
|
||||||
|
# @param [Proc] default_block Optional block that gets called to obtain the
|
||||||
|
# default value for each fiber
|
||||||
|
def initialize(default = nil, &default_block)
|
||||||
|
if default && block_given?
|
||||||
|
raise ArgumentError, "Cannot use both value and block as default value"
|
||||||
|
end
|
||||||
|
|
||||||
|
if block_given?
|
||||||
|
@default_block = default_block
|
||||||
|
@default = nil
|
||||||
|
else
|
||||||
|
@default_block = nil
|
||||||
|
@default = default
|
||||||
|
end
|
||||||
|
|
||||||
|
@index = LOCALS.next_index(self)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Returns the value in the current fiber's copy of this fiber-local variable.
|
||||||
|
#
|
||||||
|
# @return [Object] the current value
|
||||||
|
def value
|
||||||
|
LOCALS.fetch(@index) { default }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Sets the current fiber's copy of this fiber-local variable to the specified value.
|
||||||
|
#
|
||||||
|
# @param [Object] value the value to set
|
||||||
|
# @return [Object] the new value
|
||||||
|
def value=(value)
|
||||||
|
LOCALS.set(@index, value)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Bind the given value to fiber local storage during
|
||||||
|
# execution of the given block.
|
||||||
|
#
|
||||||
|
# @param [Object] value the value to bind
|
||||||
|
# @yield the operation to be performed with the bound variable
|
||||||
|
# @return [Object] the value
|
||||||
|
def bind(value)
|
||||||
|
if block_given?
|
||||||
|
old_value = self.value
|
||||||
|
self.value = value
|
||||||
|
begin
|
||||||
|
yield
|
||||||
|
ensure
|
||||||
|
self.value = old_value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def default
|
||||||
|
if @default_block
|
||||||
|
self.value = @default_block.call
|
||||||
|
else
|
||||||
|
@default
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,43 @@
|
|||||||
|
if Concurrent.on_jruby?
|
||||||
|
require 'concurrent/utility/native_extension_loader'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro count_down_latch
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class JavaCountDownLatch
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_initialize
|
||||||
|
def initialize(count = 1)
|
||||||
|
Utility::NativeInteger.ensure_integer_and_bounds(count)
|
||||||
|
Utility::NativeInteger.ensure_positive(count)
|
||||||
|
@latch = java.util.concurrent.CountDownLatch.new(count)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_wait
|
||||||
|
def wait(timeout = nil)
|
||||||
|
result = nil
|
||||||
|
if timeout.nil?
|
||||||
|
Synchronization::JRuby.sleep_interruptibly { @latch.await }
|
||||||
|
result = true
|
||||||
|
else
|
||||||
|
Synchronization::JRuby.sleep_interruptibly do
|
||||||
|
result = @latch.await(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
result
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_count_down
|
||||||
|
def count_down
|
||||||
|
@latch.countDown
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_count
|
||||||
|
def count
|
||||||
|
@latch.getCount
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,189 @@
|
|||||||
|
require 'fiber'
|
||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require 'concurrent/constants'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
#
|
||||||
|
# An abstract implementation of local storage, with sub-classes for
|
||||||
|
# per-thread and per-fiber locals.
|
||||||
|
#
|
||||||
|
# Each execution context (EC, thread or fiber) has a lazily initialized array
|
||||||
|
# of local variable values. Each time a new local variable is created, we
|
||||||
|
# allocate an "index" for it.
|
||||||
|
#
|
||||||
|
# For example, if the allocated index is 1, that means slot #1 in EVERY EC's
|
||||||
|
# locals array will be used for the value of that variable.
|
||||||
|
#
|
||||||
|
# The good thing about using a per-EC structure to hold values, rather than
|
||||||
|
# a global, is that no synchronization is needed when reading and writing
|
||||||
|
# those values (since the structure is only ever accessed by a single
|
||||||
|
# thread).
|
||||||
|
#
|
||||||
|
# Of course, when a local variable is GC'd, 1) we need to recover its index
|
||||||
|
# for use by other new local variables (otherwise the locals arrays could
|
||||||
|
# get bigger and bigger with time), and 2) we need to null out all the
|
||||||
|
# references held in the now-unused slots (both to avoid blocking GC of those
|
||||||
|
# objects, and also to prevent "stale" values from being passed on to a new
|
||||||
|
# local when the index is reused).
|
||||||
|
#
|
||||||
|
# Because we need to null out freed slots, we need to keep references to
|
||||||
|
# ALL the locals arrays, so we can null out the appropriate slots in all of
|
||||||
|
# them. This is why we need to use a finalizer to clean up the locals array
|
||||||
|
# when the EC goes out of scope.
|
||||||
|
class AbstractLocals
|
||||||
|
def initialize
|
||||||
|
@free = []
|
||||||
|
@lock = Mutex.new
|
||||||
|
@all_arrays = {}
|
||||||
|
@next = 0
|
||||||
|
end
|
||||||
|
|
||||||
|
def synchronize
|
||||||
|
@lock.synchronize { yield }
|
||||||
|
end
|
||||||
|
|
||||||
|
if Concurrent.on_cruby?
|
||||||
|
def weak_synchronize
|
||||||
|
yield
|
||||||
|
end
|
||||||
|
else
|
||||||
|
alias_method :weak_synchronize, :synchronize
|
||||||
|
end
|
||||||
|
|
||||||
|
def next_index(local)
|
||||||
|
index = synchronize do
|
||||||
|
if @free.empty?
|
||||||
|
@next += 1
|
||||||
|
else
|
||||||
|
@free.pop
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# When the local goes out of scope, we should free the associated index
|
||||||
|
# and all values stored into it.
|
||||||
|
ObjectSpace.define_finalizer(local, local_finalizer(index))
|
||||||
|
|
||||||
|
index
|
||||||
|
end
|
||||||
|
|
||||||
|
def free_index(index)
|
||||||
|
weak_synchronize do
|
||||||
|
# The cost of GC'ing a TLV is linear in the number of ECs using local
|
||||||
|
# variables. But that is natural! More ECs means more storage is used
|
||||||
|
# per local variable. So naturally more CPU time is required to free
|
||||||
|
# more storage.
|
||||||
|
#
|
||||||
|
# DO NOT use each_value which might conflict with new pair assignment
|
||||||
|
# into the hash in #set method.
|
||||||
|
@all_arrays.values.each do |locals|
|
||||||
|
locals[index] = nil
|
||||||
|
end
|
||||||
|
|
||||||
|
# free index has to be published after the arrays are cleared:
|
||||||
|
@free << index
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def fetch(index)
|
||||||
|
locals = self.locals
|
||||||
|
value = locals ? locals[index] : nil
|
||||||
|
|
||||||
|
if nil == value
|
||||||
|
yield
|
||||||
|
elsif NULL.equal?(value)
|
||||||
|
nil
|
||||||
|
else
|
||||||
|
value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def set(index, value)
|
||||||
|
locals = self.locals!
|
||||||
|
locals[index] = (nil == value ? NULL : value)
|
||||||
|
|
||||||
|
value
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# When the local goes out of scope, clean up that slot across all locals currently assigned.
|
||||||
|
def local_finalizer(index)
|
||||||
|
proc do
|
||||||
|
free_index(index)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# When a thread/fiber goes out of scope, remove the array from @all_arrays.
|
||||||
|
def thread_fiber_finalizer(array_object_id)
|
||||||
|
proc do
|
||||||
|
weak_synchronize do
|
||||||
|
@all_arrays.delete(array_object_id)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Returns the locals for the current scope, or nil if none exist.
|
||||||
|
def locals
|
||||||
|
raise NotImplementedError
|
||||||
|
end
|
||||||
|
|
||||||
|
# Returns the locals for the current scope, creating them if necessary.
|
||||||
|
def locals!
|
||||||
|
raise NotImplementedError
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
# An array-backed storage of indexed variables per thread.
|
||||||
|
class ThreadLocals < AbstractLocals
|
||||||
|
def locals
|
||||||
|
Thread.current.thread_variable_get(:concurrent_thread_locals)
|
||||||
|
end
|
||||||
|
|
||||||
|
def locals!
|
||||||
|
thread = Thread.current
|
||||||
|
locals = thread.thread_variable_get(:concurrent_thread_locals)
|
||||||
|
|
||||||
|
unless locals
|
||||||
|
locals = thread.thread_variable_set(:concurrent_thread_locals, [])
|
||||||
|
weak_synchronize do
|
||||||
|
@all_arrays[locals.object_id] = locals
|
||||||
|
end
|
||||||
|
# When the thread goes out of scope, we should delete the associated locals:
|
||||||
|
ObjectSpace.define_finalizer(thread, thread_fiber_finalizer(locals.object_id))
|
||||||
|
end
|
||||||
|
|
||||||
|
locals
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
# An array-backed storage of indexed variables per fiber.
|
||||||
|
class FiberLocals < AbstractLocals
|
||||||
|
def locals
|
||||||
|
Thread.current[:concurrent_fiber_locals]
|
||||||
|
end
|
||||||
|
|
||||||
|
def locals!
|
||||||
|
thread = Thread.current
|
||||||
|
locals = thread[:concurrent_fiber_locals]
|
||||||
|
|
||||||
|
unless locals
|
||||||
|
locals = thread[:concurrent_fiber_locals] = []
|
||||||
|
weak_synchronize do
|
||||||
|
@all_arrays[locals.object_id] = locals
|
||||||
|
end
|
||||||
|
# When the fiber goes out of scope, we should delete the associated locals:
|
||||||
|
ObjectSpace.define_finalizer(Fiber.current, thread_fiber_finalizer(locals.object_id))
|
||||||
|
end
|
||||||
|
|
||||||
|
locals
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private_constant :AbstractLocals, :ThreadLocals, :FiberLocals
|
||||||
|
end
|
||||||
@ -0,0 +1,28 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require_relative 'fiber_local_var'
|
||||||
|
require_relative 'thread_local_var'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
# @!visibility private
|
||||||
|
def self.mutex_owned_per_thread?
|
||||||
|
return false if Concurrent.on_jruby? || Concurrent.on_truffleruby?
|
||||||
|
|
||||||
|
mutex = Mutex.new
|
||||||
|
# Lock the mutex:
|
||||||
|
mutex.synchronize do
|
||||||
|
# Check if the mutex is still owned in a child fiber:
|
||||||
|
Fiber.new { mutex.owned? }.resume
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if mutex_owned_per_thread?
|
||||||
|
LockLocalVar = ThreadLocalVar
|
||||||
|
else
|
||||||
|
LockLocalVar = FiberLocalVar
|
||||||
|
end
|
||||||
|
|
||||||
|
# Either {FiberLocalVar} or {ThreadLocalVar} depending on whether Mutex (and Monitor)
|
||||||
|
# are held, respectively, per Fiber or per Thread.
|
||||||
|
class LockLocalVar
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,68 @@
|
|||||||
|
require 'concurrent/synchronization/safe_initialization'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro atomic_boolean
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class MutexAtomicBoolean
|
||||||
|
extend Concurrent::Synchronization::SafeInitialization
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_initialize
|
||||||
|
def initialize(initial = false)
|
||||||
|
super()
|
||||||
|
@Lock = ::Mutex.new
|
||||||
|
@value = !!initial
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_value_get
|
||||||
|
def value
|
||||||
|
synchronize { @value }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_value_set
|
||||||
|
def value=(value)
|
||||||
|
synchronize { @value = !!value }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_true_question
|
||||||
|
def true?
|
||||||
|
synchronize { @value }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_false_question
|
||||||
|
def false?
|
||||||
|
synchronize { !@value }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_make_true
|
||||||
|
def make_true
|
||||||
|
synchronize { ns_make_value(true) }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_boolean_method_make_false
|
||||||
|
def make_false
|
||||||
|
synchronize { ns_make_value(false) }
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def synchronize
|
||||||
|
if @Lock.owned?
|
||||||
|
yield
|
||||||
|
else
|
||||||
|
@Lock.synchronize { yield }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_make_value(value)
|
||||||
|
old = @value
|
||||||
|
@value = value
|
||||||
|
old != @value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,81 @@
|
|||||||
|
require 'concurrent/synchronization/safe_initialization'
|
||||||
|
require 'concurrent/utility/native_integer'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class MutexAtomicFixnum
|
||||||
|
extend Concurrent::Synchronization::SafeInitialization
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_initialize
|
||||||
|
def initialize(initial = 0)
|
||||||
|
super()
|
||||||
|
@Lock = ::Mutex.new
|
||||||
|
ns_set(initial)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_value_get
|
||||||
|
def value
|
||||||
|
synchronize { @value }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_value_set
|
||||||
|
def value=(value)
|
||||||
|
synchronize { ns_set(value) }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_increment
|
||||||
|
def increment(delta = 1)
|
||||||
|
synchronize { ns_set(@value + delta.to_i) }
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :up, :increment
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_decrement
|
||||||
|
def decrement(delta = 1)
|
||||||
|
synchronize { ns_set(@value - delta.to_i) }
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :down, :decrement
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_compare_and_set
|
||||||
|
def compare_and_set(expect, update)
|
||||||
|
synchronize do
|
||||||
|
if @value == expect.to_i
|
||||||
|
@value = update.to_i
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_fixnum_method_update
|
||||||
|
def update
|
||||||
|
synchronize do
|
||||||
|
@value = yield @value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def synchronize
|
||||||
|
if @Lock.owned?
|
||||||
|
yield
|
||||||
|
else
|
||||||
|
@Lock.synchronize { yield }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_set(value)
|
||||||
|
Utility::NativeInteger.ensure_integer_and_bounds value
|
||||||
|
@value = value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,44 @@
|
|||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
require 'concurrent/utility/native_integer'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro count_down_latch
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class MutexCountDownLatch < Synchronization::LockableObject
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_initialize
|
||||||
|
def initialize(count = 1)
|
||||||
|
Utility::NativeInteger.ensure_integer_and_bounds count
|
||||||
|
Utility::NativeInteger.ensure_positive count
|
||||||
|
|
||||||
|
super()
|
||||||
|
synchronize { ns_initialize count }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_wait
|
||||||
|
def wait(timeout = nil)
|
||||||
|
synchronize { ns_wait_until(timeout) { @count == 0 } }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_count_down
|
||||||
|
def count_down
|
||||||
|
synchronize do
|
||||||
|
@count -= 1 if @count > 0
|
||||||
|
ns_broadcast if @count == 0
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro count_down_latch_method_count
|
||||||
|
def count
|
||||||
|
synchronize { @count }
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
def ns_initialize(count)
|
||||||
|
@count = count
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,131 @@
|
|||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
require 'concurrent/utility/native_integer'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro semaphore
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class MutexSemaphore < Synchronization::LockableObject
|
||||||
|
|
||||||
|
# @!macro semaphore_method_initialize
|
||||||
|
def initialize(count)
|
||||||
|
Utility::NativeInteger.ensure_integer_and_bounds count
|
||||||
|
|
||||||
|
super()
|
||||||
|
synchronize { ns_initialize count }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro semaphore_method_acquire
|
||||||
|
def acquire(permits = 1)
|
||||||
|
Utility::NativeInteger.ensure_integer_and_bounds permits
|
||||||
|
Utility::NativeInteger.ensure_positive permits
|
||||||
|
|
||||||
|
synchronize do
|
||||||
|
try_acquire_timed(permits, nil)
|
||||||
|
end
|
||||||
|
|
||||||
|
return unless block_given?
|
||||||
|
|
||||||
|
begin
|
||||||
|
yield
|
||||||
|
ensure
|
||||||
|
release(permits)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro semaphore_method_available_permits
|
||||||
|
def available_permits
|
||||||
|
synchronize { @free }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro semaphore_method_drain_permits
|
||||||
|
#
|
||||||
|
# Acquires and returns all permits that are immediately available.
|
||||||
|
#
|
||||||
|
# @return [Integer]
|
||||||
|
def drain_permits
|
||||||
|
synchronize do
|
||||||
|
@free.tap { |_| @free = 0 }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro semaphore_method_try_acquire
|
||||||
|
def try_acquire(permits = 1, timeout = nil)
|
||||||
|
Utility::NativeInteger.ensure_integer_and_bounds permits
|
||||||
|
Utility::NativeInteger.ensure_positive permits
|
||||||
|
|
||||||
|
acquired = synchronize do
|
||||||
|
if timeout.nil?
|
||||||
|
try_acquire_now(permits)
|
||||||
|
else
|
||||||
|
try_acquire_timed(permits, timeout)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
return acquired unless block_given?
|
||||||
|
return unless acquired
|
||||||
|
|
||||||
|
begin
|
||||||
|
yield
|
||||||
|
ensure
|
||||||
|
release(permits)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro semaphore_method_release
|
||||||
|
def release(permits = 1)
|
||||||
|
Utility::NativeInteger.ensure_integer_and_bounds permits
|
||||||
|
Utility::NativeInteger.ensure_positive permits
|
||||||
|
|
||||||
|
synchronize do
|
||||||
|
@free += permits
|
||||||
|
permits.times { ns_signal }
|
||||||
|
end
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
|
||||||
|
# Shrinks the number of available permits by the indicated reduction.
|
||||||
|
#
|
||||||
|
# @param [Fixnum] reduction Number of permits to remove.
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `reduction` is not an integer or is negative
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `@free` - `@reduction` is less than zero
|
||||||
|
#
|
||||||
|
# @return [nil]
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def reduce_permits(reduction)
|
||||||
|
Utility::NativeInteger.ensure_integer_and_bounds reduction
|
||||||
|
Utility::NativeInteger.ensure_positive reduction
|
||||||
|
|
||||||
|
synchronize { @free -= reduction }
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_initialize(count)
|
||||||
|
@free = count
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def try_acquire_now(permits)
|
||||||
|
if @free >= permits
|
||||||
|
@free -= permits
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def try_acquire_timed(permits, timeout)
|
||||||
|
ns_wait_until(timeout) { try_acquire_now(permits) }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,255 @@
|
|||||||
|
require 'thread'
|
||||||
|
require 'concurrent/atomic/atomic_fixnum'
|
||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/synchronization/object'
|
||||||
|
require 'concurrent/synchronization/lock'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# Ruby read-write lock implementation
|
||||||
|
#
|
||||||
|
# Allows any number of concurrent readers, but only one concurrent writer
|
||||||
|
# (And if the "write" lock is taken, any readers who come along will have to wait)
|
||||||
|
#
|
||||||
|
# If readers are already active when a writer comes along, the writer will wait for
|
||||||
|
# all the readers to finish before going ahead.
|
||||||
|
# Any additional readers that come when the writer is already waiting, will also
|
||||||
|
# wait (so writers are not starved).
|
||||||
|
#
|
||||||
|
# This implementation is based on `java.util.concurrent.ReentrantReadWriteLock`.
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
# lock = Concurrent::ReadWriteLock.new
|
||||||
|
# lock.with_read_lock { data.retrieve }
|
||||||
|
# lock.with_write_lock { data.modify! }
|
||||||
|
#
|
||||||
|
# @note Do **not** try to acquire the write lock while already holding a read lock
|
||||||
|
# **or** try to acquire the write lock while you already have it.
|
||||||
|
# This will lead to deadlock
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock
|
||||||
|
class ReadWriteLock < Synchronization::Object
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
WAITING_WRITER = 1 << 15
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
RUNNING_WRITER = 1 << 29
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
MAX_READERS = WAITING_WRITER - 1
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1
|
||||||
|
|
||||||
|
safe_initialization!
|
||||||
|
|
||||||
|
# Implementation notes:
|
||||||
|
# A goal is to make the uncontended path for both readers/writers lock-free
|
||||||
|
# Only if there is reader-writer or writer-writer contention, should locks be used
|
||||||
|
# Internal state is represented by a single integer ("counter"), and updated
|
||||||
|
# using atomic compare-and-swap operations
|
||||||
|
# When the counter is 0, the lock is free
|
||||||
|
# Each reader increments the counter by 1 when acquiring a read lock
|
||||||
|
# (and decrements by 1 when releasing the read lock)
|
||||||
|
# The counter is increased by (1 << 15) for each writer waiting to acquire the
|
||||||
|
# write lock, and by (1 << 29) if the write lock is taken
|
||||||
|
|
||||||
|
# Create a new `ReadWriteLock` in the unlocked state.
|
||||||
|
def initialize
|
||||||
|
super()
|
||||||
|
@Counter = AtomicFixnum.new(0) # single integer which represents lock state
|
||||||
|
@ReadLock = Synchronization::Lock.new
|
||||||
|
@WriteLock = Synchronization::Lock.new
|
||||||
|
end
|
||||||
|
|
||||||
|
# Execute a block operation within a read lock.
|
||||||
|
#
|
||||||
|
# @yield the task to be performed within the lock.
|
||||||
|
#
|
||||||
|
# @return [Object] the result of the block operation.
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] when no block is given.
|
||||||
|
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
|
||||||
|
# is exceeded.
|
||||||
|
def with_read_lock
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
acquire_read_lock
|
||||||
|
begin
|
||||||
|
yield
|
||||||
|
ensure
|
||||||
|
release_read_lock
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Execute a block operation within a write lock.
|
||||||
|
#
|
||||||
|
# @yield the task to be performed within the lock.
|
||||||
|
#
|
||||||
|
# @return [Object] the result of the block operation.
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] when no block is given.
|
||||||
|
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
|
||||||
|
# is exceeded.
|
||||||
|
def with_write_lock
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
acquire_write_lock
|
||||||
|
begin
|
||||||
|
yield
|
||||||
|
ensure
|
||||||
|
release_write_lock
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Acquire a read lock. If a write lock has been acquired will block until
|
||||||
|
# it is released. Will not block if other read locks have been acquired.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the lock is successfully acquired
|
||||||
|
#
|
||||||
|
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
|
||||||
|
# is exceeded.
|
||||||
|
def acquire_read_lock
|
||||||
|
while true
|
||||||
|
c = @Counter.value
|
||||||
|
raise ResourceLimitError.new('Too many reader threads') if max_readers?(c)
|
||||||
|
|
||||||
|
# If a writer is waiting when we first queue up, we need to wait
|
||||||
|
if waiting_writer?(c)
|
||||||
|
@ReadLock.wait_until { !waiting_writer? }
|
||||||
|
|
||||||
|
# after a reader has waited once, they are allowed to "barge" ahead of waiting writers
|
||||||
|
# but if a writer is *running*, the reader still needs to wait (naturally)
|
||||||
|
while true
|
||||||
|
c = @Counter.value
|
||||||
|
if running_writer?(c)
|
||||||
|
@ReadLock.wait_until { !running_writer? }
|
||||||
|
else
|
||||||
|
return if @Counter.compare_and_set(c, c+1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
else
|
||||||
|
break if @Counter.compare_and_set(c, c+1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Release a previously acquired read lock.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the lock is successfully released
|
||||||
|
def release_read_lock
|
||||||
|
while true
|
||||||
|
c = @Counter.value
|
||||||
|
if @Counter.compare_and_set(c, c-1)
|
||||||
|
# If one or more writers were waiting, and we were the last reader, wake a writer up
|
||||||
|
if waiting_writer?(c) && running_readers(c) == 1
|
||||||
|
@WriteLock.signal
|
||||||
|
end
|
||||||
|
break
|
||||||
|
end
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Acquire a write lock. Will block and wait for all active readers and writers.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the lock is successfully acquired
|
||||||
|
#
|
||||||
|
# @raise [Concurrent::ResourceLimitError] if the maximum number of writers
|
||||||
|
# is exceeded.
|
||||||
|
def acquire_write_lock
|
||||||
|
while true
|
||||||
|
c = @Counter.value
|
||||||
|
raise ResourceLimitError.new('Too many writer threads') if max_writers?(c)
|
||||||
|
|
||||||
|
if c == 0 # no readers OR writers running
|
||||||
|
# if we successfully swap the RUNNING_WRITER bit on, then we can go ahead
|
||||||
|
break if @Counter.compare_and_set(0, RUNNING_WRITER)
|
||||||
|
elsif @Counter.compare_and_set(c, c+WAITING_WRITER)
|
||||||
|
while true
|
||||||
|
# Now we have successfully incremented, so no more readers will be able to increment
|
||||||
|
# (they will wait instead)
|
||||||
|
# However, readers OR writers could decrement right here, OR another writer could increment
|
||||||
|
@WriteLock.wait_until do
|
||||||
|
# So we have to do another check inside the synchronized section
|
||||||
|
# If a writer OR reader is running, then go to sleep
|
||||||
|
c = @Counter.value
|
||||||
|
!running_writer?(c) && !running_readers?(c)
|
||||||
|
end
|
||||||
|
|
||||||
|
# We just came out of a wait
|
||||||
|
# If we successfully turn the RUNNING_WRITER bit on with an atomic swap,
|
||||||
|
# Then we are OK to stop waiting and go ahead
|
||||||
|
# Otherwise go back and wait again
|
||||||
|
c = @Counter.value
|
||||||
|
break if !running_writer?(c) && !running_readers?(c) && @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER)
|
||||||
|
end
|
||||||
|
break
|
||||||
|
end
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Release a previously acquired write lock.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the lock is successfully released
|
||||||
|
def release_write_lock
|
||||||
|
return true unless running_writer?
|
||||||
|
c = @Counter.update { |counter| counter - RUNNING_WRITER }
|
||||||
|
@ReadLock.broadcast
|
||||||
|
@WriteLock.signal if waiting_writers(c) > 0
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Queries if the write lock is held by any thread.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the write lock is held else false`
|
||||||
|
def write_locked?
|
||||||
|
@Counter.value >= RUNNING_WRITER
|
||||||
|
end
|
||||||
|
|
||||||
|
# Queries whether any threads are waiting to acquire the read or write lock.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if any threads are waiting for a lock else false
|
||||||
|
def has_waiters?
|
||||||
|
waiting_writer?(@Counter.value)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def running_readers(c = @Counter.value)
|
||||||
|
c & MAX_READERS
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def running_readers?(c = @Counter.value)
|
||||||
|
(c & MAX_READERS) > 0
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def running_writer?(c = @Counter.value)
|
||||||
|
c >= RUNNING_WRITER
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def waiting_writers(c = @Counter.value)
|
||||||
|
(c & MAX_WRITERS) / WAITING_WRITER
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def waiting_writer?(c = @Counter.value)
|
||||||
|
c >= WAITING_WRITER
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def max_readers?(c = @Counter.value)
|
||||||
|
(c & MAX_READERS) == MAX_READERS
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def max_writers?(c = @Counter.value)
|
||||||
|
(c & MAX_WRITERS) == MAX_WRITERS
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,379 @@
|
|||||||
|
require 'thread'
|
||||||
|
require 'concurrent/atomic/atomic_reference'
|
||||||
|
require 'concurrent/atomic/atomic_fixnum'
|
||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/synchronization/object'
|
||||||
|
require 'concurrent/synchronization/lock'
|
||||||
|
require 'concurrent/atomic/lock_local_var'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# Re-entrant read-write lock implementation
|
||||||
|
#
|
||||||
|
# Allows any number of concurrent readers, but only one concurrent writer
|
||||||
|
# (And while the "write" lock is taken, no read locks can be obtained either.
|
||||||
|
# Hence, the write lock can also be called an "exclusive" lock.)
|
||||||
|
#
|
||||||
|
# If another thread has taken a read lock, any thread which wants a write lock
|
||||||
|
# will block until all the readers release their locks. However, once a thread
|
||||||
|
# starts waiting to obtain a write lock, any additional readers that come along
|
||||||
|
# will also wait (so writers are not starved).
|
||||||
|
#
|
||||||
|
# A thread can acquire both a read and write lock at the same time. A thread can
|
||||||
|
# also acquire a read lock OR a write lock more than once. Only when the read (or
|
||||||
|
# write) lock is released as many times as it was acquired, will the thread
|
||||||
|
# actually let it go, allowing other threads which might have been waiting
|
||||||
|
# to proceed. Therefore the lock can be upgraded by first acquiring
|
||||||
|
# read lock and then write lock and that the lock can be downgraded by first
|
||||||
|
# having both read and write lock a releasing just the write lock.
|
||||||
|
#
|
||||||
|
# If both read and write locks are acquired by the same thread, it is not strictly
|
||||||
|
# necessary to release them in the same order they were acquired. In other words,
|
||||||
|
# the following code is legal:
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
# lock = Concurrent::ReentrantReadWriteLock.new
|
||||||
|
# lock.acquire_write_lock
|
||||||
|
# lock.acquire_read_lock
|
||||||
|
# lock.release_write_lock
|
||||||
|
# # At this point, the current thread is holding only a read lock, not a write
|
||||||
|
# # lock. So other threads can take read locks, but not a write lock.
|
||||||
|
# lock.release_read_lock
|
||||||
|
# # Now the current thread is not holding either a read or write lock, so
|
||||||
|
# # another thread could potentially acquire a write lock.
|
||||||
|
#
|
||||||
|
# This implementation was inspired by `java.util.concurrent.ReentrantReadWriteLock`.
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
# lock = Concurrent::ReentrantReadWriteLock.new
|
||||||
|
# lock.with_read_lock { data.retrieve }
|
||||||
|
# lock.with_write_lock { data.modify! }
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock
|
||||||
|
class ReentrantReadWriteLock < Synchronization::Object
|
||||||
|
|
||||||
|
# Implementation notes:
|
||||||
|
#
|
||||||
|
# A goal is to make the uncontended path for both readers/writers mutex-free
|
||||||
|
# Only if there is reader-writer or writer-writer contention, should mutexes be used
|
||||||
|
# Otherwise, a single CAS operation is all we need to acquire/release a lock
|
||||||
|
#
|
||||||
|
# Internal state is represented by a single integer ("counter"), and updated
|
||||||
|
# using atomic compare-and-swap operations
|
||||||
|
# When the counter is 0, the lock is free
|
||||||
|
# Each thread which has one OR MORE read locks increments the counter by 1
|
||||||
|
# (and decrements by 1 when releasing the read lock)
|
||||||
|
# The counter is increased by (1 << 15) for each writer waiting to acquire the
|
||||||
|
# write lock, and by (1 << 29) if the write lock is taken
|
||||||
|
#
|
||||||
|
# Additionally, each thread uses a thread-local variable to count how many times
|
||||||
|
# it has acquired a read lock, AND how many times it has acquired a write lock.
|
||||||
|
# It uses a similar trick; an increment of 1 means a read lock was taken, and
|
||||||
|
# an increment of (1 << 15) means a write lock was taken
|
||||||
|
# This is what makes re-entrancy possible
|
||||||
|
#
|
||||||
|
# 2 rules are followed to ensure good liveness properties:
|
||||||
|
# 1) Once a writer has queued up and is waiting for a write lock, no other thread
|
||||||
|
# can take a lock without waiting
|
||||||
|
# 2) When a write lock is released, readers are given the "first chance" to wake
|
||||||
|
# up and acquire a read lock
|
||||||
|
# Following these rules means readers and writers tend to "take turns", so neither
|
||||||
|
# can starve the other, even under heavy contention
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
READER_BITS = 15
|
||||||
|
# @!visibility private
|
||||||
|
WRITER_BITS = 14
|
||||||
|
|
||||||
|
# Used with @Counter:
|
||||||
|
# @!visibility private
|
||||||
|
WAITING_WRITER = 1 << READER_BITS
|
||||||
|
# @!visibility private
|
||||||
|
RUNNING_WRITER = 1 << (READER_BITS + WRITER_BITS)
|
||||||
|
# @!visibility private
|
||||||
|
MAX_READERS = WAITING_WRITER - 1
|
||||||
|
# @!visibility private
|
||||||
|
MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1
|
||||||
|
|
||||||
|
# Used with @HeldCount:
|
||||||
|
# @!visibility private
|
||||||
|
WRITE_LOCK_HELD = 1 << READER_BITS
|
||||||
|
# @!visibility private
|
||||||
|
READ_LOCK_MASK = WRITE_LOCK_HELD - 1
|
||||||
|
# @!visibility private
|
||||||
|
WRITE_LOCK_MASK = MAX_WRITERS
|
||||||
|
|
||||||
|
safe_initialization!
|
||||||
|
|
||||||
|
# Create a new `ReentrantReadWriteLock` in the unlocked state.
|
||||||
|
def initialize
|
||||||
|
super()
|
||||||
|
@Counter = AtomicFixnum.new(0) # single integer which represents lock state
|
||||||
|
@ReadQueue = Synchronization::Lock.new # used to queue waiting readers
|
||||||
|
@WriteQueue = Synchronization::Lock.new # used to queue waiting writers
|
||||||
|
@HeldCount = LockLocalVar.new(0) # indicates # of R & W locks held by this thread
|
||||||
|
end
|
||||||
|
|
||||||
|
# Execute a block operation within a read lock.
|
||||||
|
#
|
||||||
|
# @yield the task to be performed within the lock.
|
||||||
|
#
|
||||||
|
# @return [Object] the result of the block operation.
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] when no block is given.
|
||||||
|
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
|
||||||
|
# is exceeded.
|
||||||
|
def with_read_lock
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
acquire_read_lock
|
||||||
|
begin
|
||||||
|
yield
|
||||||
|
ensure
|
||||||
|
release_read_lock
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Execute a block operation within a write lock.
|
||||||
|
#
|
||||||
|
# @yield the task to be performed within the lock.
|
||||||
|
#
|
||||||
|
# @return [Object] the result of the block operation.
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] when no block is given.
|
||||||
|
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
|
||||||
|
# is exceeded.
|
||||||
|
def with_write_lock
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
acquire_write_lock
|
||||||
|
begin
|
||||||
|
yield
|
||||||
|
ensure
|
||||||
|
release_write_lock
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Acquire a read lock. If a write lock is held by another thread, will block
|
||||||
|
# until it is released.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the lock is successfully acquired
|
||||||
|
#
|
||||||
|
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
|
||||||
|
# is exceeded.
|
||||||
|
def acquire_read_lock
|
||||||
|
if (held = @HeldCount.value) > 0
|
||||||
|
# If we already have a lock, there's no need to wait
|
||||||
|
if held & READ_LOCK_MASK == 0
|
||||||
|
# But we do need to update the counter, if we were holding a write
|
||||||
|
# lock but not a read lock
|
||||||
|
@Counter.update { |c| c + 1 }
|
||||||
|
end
|
||||||
|
@HeldCount.value = held + 1
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
|
||||||
|
while true
|
||||||
|
c = @Counter.value
|
||||||
|
raise ResourceLimitError.new('Too many reader threads') if max_readers?(c)
|
||||||
|
|
||||||
|
# If a writer is waiting OR running when we first queue up, we need to wait
|
||||||
|
if waiting_or_running_writer?(c)
|
||||||
|
# Before going to sleep, check again with the ReadQueue mutex held
|
||||||
|
@ReadQueue.synchronize do
|
||||||
|
@ReadQueue.ns_wait if waiting_or_running_writer?
|
||||||
|
end
|
||||||
|
# Note: the above 'synchronize' block could have used #wait_until,
|
||||||
|
# but that waits repeatedly in a loop, checking the wait condition
|
||||||
|
# each time it wakes up (to protect against spurious wakeups)
|
||||||
|
# But we are already in a loop, which is only broken when we successfully
|
||||||
|
# acquire the lock! So we don't care about spurious wakeups, and would
|
||||||
|
# rather not pay the extra overhead of using #wait_until
|
||||||
|
|
||||||
|
# After a reader has waited once, they are allowed to "barge" ahead of waiting writers
|
||||||
|
# But if a writer is *running*, the reader still needs to wait (naturally)
|
||||||
|
while true
|
||||||
|
c = @Counter.value
|
||||||
|
if running_writer?(c)
|
||||||
|
@ReadQueue.synchronize do
|
||||||
|
@ReadQueue.ns_wait if running_writer?
|
||||||
|
end
|
||||||
|
elsif @Counter.compare_and_set(c, c+1)
|
||||||
|
@HeldCount.value = held + 1
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
elsif @Counter.compare_and_set(c, c+1)
|
||||||
|
@HeldCount.value = held + 1
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Try to acquire a read lock and return true if we succeed. If it cannot be
|
||||||
|
# acquired immediately, return false.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the lock is successfully acquired
|
||||||
|
def try_read_lock
|
||||||
|
if (held = @HeldCount.value) > 0
|
||||||
|
if held & READ_LOCK_MASK == 0
|
||||||
|
# If we hold a write lock, but not a read lock...
|
||||||
|
@Counter.update { |c| c + 1 }
|
||||||
|
end
|
||||||
|
@HeldCount.value = held + 1
|
||||||
|
return true
|
||||||
|
else
|
||||||
|
c = @Counter.value
|
||||||
|
if !waiting_or_running_writer?(c) && @Counter.compare_and_set(c, c+1)
|
||||||
|
@HeldCount.value = held + 1
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
false
|
||||||
|
end
|
||||||
|
|
||||||
|
# Release a previously acquired read lock.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the lock is successfully released
|
||||||
|
def release_read_lock
|
||||||
|
held = @HeldCount.value = @HeldCount.value - 1
|
||||||
|
rlocks_held = held & READ_LOCK_MASK
|
||||||
|
if rlocks_held == 0
|
||||||
|
c = @Counter.update { |counter| counter - 1 }
|
||||||
|
# If one or more writers were waiting, and we were the last reader, wake a writer up
|
||||||
|
if waiting_or_running_writer?(c) && running_readers(c) == 0
|
||||||
|
@WriteQueue.signal
|
||||||
|
end
|
||||||
|
elsif rlocks_held == READ_LOCK_MASK
|
||||||
|
raise IllegalOperationError, "Cannot release a read lock which is not held"
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Acquire a write lock. Will block and wait for all active readers and writers.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the lock is successfully acquired
|
||||||
|
#
|
||||||
|
# @raise [Concurrent::ResourceLimitError] if the maximum number of writers
|
||||||
|
# is exceeded.
|
||||||
|
def acquire_write_lock
|
||||||
|
if (held = @HeldCount.value) >= WRITE_LOCK_HELD
|
||||||
|
# if we already have a write (exclusive) lock, there's no need to wait
|
||||||
|
@HeldCount.value = held + WRITE_LOCK_HELD
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
|
||||||
|
while true
|
||||||
|
c = @Counter.value
|
||||||
|
raise ResourceLimitError.new('Too many writer threads') if max_writers?(c)
|
||||||
|
|
||||||
|
# To go ahead and take the lock without waiting, there must be no writer
|
||||||
|
# running right now, AND no writers who came before us still waiting to
|
||||||
|
# acquire the lock
|
||||||
|
# Additionally, if any read locks have been taken, we must hold all of them
|
||||||
|
if held > 0 && @Counter.compare_and_set(1, c+RUNNING_WRITER)
|
||||||
|
# If we are the only one reader and successfully swap the RUNNING_WRITER bit on, then we can go ahead
|
||||||
|
@HeldCount.value = held + WRITE_LOCK_HELD
|
||||||
|
return true
|
||||||
|
elsif @Counter.compare_and_set(c, c+WAITING_WRITER)
|
||||||
|
while true
|
||||||
|
# Now we have successfully incremented, so no more readers will be able to increment
|
||||||
|
# (they will wait instead)
|
||||||
|
# However, readers OR writers could decrement right here
|
||||||
|
@WriteQueue.synchronize do
|
||||||
|
# So we have to do another check inside the synchronized section
|
||||||
|
# If a writer OR another reader is running, then go to sleep
|
||||||
|
c = @Counter.value
|
||||||
|
@WriteQueue.ns_wait if running_writer?(c) || running_readers(c) != held
|
||||||
|
end
|
||||||
|
# Note: if you are thinking of replacing the above 'synchronize' block
|
||||||
|
# with #wait_until, read the comment in #acquire_read_lock first!
|
||||||
|
|
||||||
|
# We just came out of a wait
|
||||||
|
# If we successfully turn the RUNNING_WRITER bit on with an atomic swap,
|
||||||
|
# then we are OK to stop waiting and go ahead
|
||||||
|
# Otherwise go back and wait again
|
||||||
|
c = @Counter.value
|
||||||
|
if !running_writer?(c) &&
|
||||||
|
running_readers(c) == held &&
|
||||||
|
@Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER)
|
||||||
|
@HeldCount.value = held + WRITE_LOCK_HELD
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Try to acquire a write lock and return true if we succeed. If it cannot be
|
||||||
|
# acquired immediately, return false.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the lock is successfully acquired
|
||||||
|
def try_write_lock
|
||||||
|
if (held = @HeldCount.value) >= WRITE_LOCK_HELD
|
||||||
|
@HeldCount.value = held + WRITE_LOCK_HELD
|
||||||
|
return true
|
||||||
|
else
|
||||||
|
c = @Counter.value
|
||||||
|
if !waiting_or_running_writer?(c) &&
|
||||||
|
running_readers(c) == held &&
|
||||||
|
@Counter.compare_and_set(c, c+RUNNING_WRITER)
|
||||||
|
@HeldCount.value = held + WRITE_LOCK_HELD
|
||||||
|
return true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
false
|
||||||
|
end
|
||||||
|
|
||||||
|
# Release a previously acquired write lock.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the lock is successfully released
|
||||||
|
def release_write_lock
|
||||||
|
held = @HeldCount.value = @HeldCount.value - WRITE_LOCK_HELD
|
||||||
|
wlocks_held = held & WRITE_LOCK_MASK
|
||||||
|
if wlocks_held == 0
|
||||||
|
c = @Counter.update { |counter| counter - RUNNING_WRITER }
|
||||||
|
@ReadQueue.broadcast
|
||||||
|
@WriteQueue.signal if waiting_writers(c) > 0
|
||||||
|
elsif wlocks_held == WRITE_LOCK_MASK
|
||||||
|
raise IllegalOperationError, "Cannot release a write lock which is not held"
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def running_readers(c = @Counter.value)
|
||||||
|
c & MAX_READERS
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def running_readers?(c = @Counter.value)
|
||||||
|
(c & MAX_READERS) > 0
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def running_writer?(c = @Counter.value)
|
||||||
|
c >= RUNNING_WRITER
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def waiting_writers(c = @Counter.value)
|
||||||
|
(c & MAX_WRITERS) >> READER_BITS
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def waiting_or_running_writer?(c = @Counter.value)
|
||||||
|
c >= WAITING_WRITER
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def max_readers?(c = @Counter.value)
|
||||||
|
(c & MAX_READERS) == MAX_READERS
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def max_writers?(c = @Counter.value)
|
||||||
|
(c & MAX_WRITERS) == MAX_WRITERS
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,163 @@
|
|||||||
|
require 'concurrent/atomic/mutex_semaphore'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro semaphore_method_initialize
|
||||||
|
#
|
||||||
|
# Create a new `Semaphore` with the initial `count`.
|
||||||
|
#
|
||||||
|
# @param [Fixnum] count the initial count
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `count` is not an integer
|
||||||
|
|
||||||
|
# @!macro semaphore_method_acquire
|
||||||
|
#
|
||||||
|
# Acquires the given number of permits from this semaphore,
|
||||||
|
# blocking until all are available. If a block is given,
|
||||||
|
# yields to it and releases the permits afterwards.
|
||||||
|
#
|
||||||
|
# @param [Fixnum] permits Number of permits to acquire
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `permits` is not an integer or is less than zero
|
||||||
|
#
|
||||||
|
# @return [nil, BasicObject] Without a block, `nil` is returned. If a block
|
||||||
|
# is given, its return value is returned.
|
||||||
|
|
||||||
|
# @!macro semaphore_method_available_permits
|
||||||
|
#
|
||||||
|
# Returns the current number of permits available in this semaphore.
|
||||||
|
#
|
||||||
|
# @return [Integer]
|
||||||
|
|
||||||
|
# @!macro semaphore_method_drain_permits
|
||||||
|
#
|
||||||
|
# Acquires and returns all permits that are immediately available.
|
||||||
|
#
|
||||||
|
# @return [Integer]
|
||||||
|
|
||||||
|
# @!macro semaphore_method_try_acquire
|
||||||
|
#
|
||||||
|
# Acquires the given number of permits from this semaphore,
|
||||||
|
# only if all are available at the time of invocation or within
|
||||||
|
# `timeout` interval. If a block is given, yields to it if the permits
|
||||||
|
# were successfully acquired, and releases them afterward, returning the
|
||||||
|
# block's return value.
|
||||||
|
#
|
||||||
|
# @param [Fixnum] permits the number of permits to acquire
|
||||||
|
#
|
||||||
|
# @param [Fixnum] timeout the number of seconds to wait for the counter
|
||||||
|
# or `nil` to return immediately
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `permits` is not an integer or is less than zero
|
||||||
|
#
|
||||||
|
# @return [true, false, nil, BasicObject] `false` if no permits are
|
||||||
|
# available, `true` when acquired a permit. If a block is given, the
|
||||||
|
# block's return value is returned if the permits were acquired; if not,
|
||||||
|
# `nil` is returned.
|
||||||
|
|
||||||
|
# @!macro semaphore_method_release
|
||||||
|
#
|
||||||
|
# Releases the given number of permits, returning them to the semaphore.
|
||||||
|
#
|
||||||
|
# @param [Fixnum] permits Number of permits to return to the semaphore.
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `permits` is not a number or is less than zero
|
||||||
|
#
|
||||||
|
# @return [nil]
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro semaphore_public_api
|
||||||
|
#
|
||||||
|
# @!method initialize(count)
|
||||||
|
# @!macro semaphore_method_initialize
|
||||||
|
#
|
||||||
|
# @!method acquire(permits = 1)
|
||||||
|
# @!macro semaphore_method_acquire
|
||||||
|
#
|
||||||
|
# @!method available_permits
|
||||||
|
# @!macro semaphore_method_available_permits
|
||||||
|
#
|
||||||
|
# @!method drain_permits
|
||||||
|
# @!macro semaphore_method_drain_permits
|
||||||
|
#
|
||||||
|
# @!method try_acquire(permits = 1, timeout = nil)
|
||||||
|
# @!macro semaphore_method_try_acquire
|
||||||
|
#
|
||||||
|
# @!method release(permits = 1)
|
||||||
|
# @!macro semaphore_method_release
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
SemaphoreImplementation = if Concurrent.on_jruby?
|
||||||
|
require 'concurrent/utility/native_extension_loader'
|
||||||
|
JavaSemaphore
|
||||||
|
else
|
||||||
|
MutexSemaphore
|
||||||
|
end
|
||||||
|
private_constant :SemaphoreImplementation
|
||||||
|
|
||||||
|
# @!macro semaphore
|
||||||
|
#
|
||||||
|
# A counting semaphore. Conceptually, a semaphore maintains a set of
|
||||||
|
# permits. Each {#acquire} blocks if necessary until a permit is
|
||||||
|
# available, and then takes it. Each {#release} adds a permit, potentially
|
||||||
|
# releasing a blocking acquirer.
|
||||||
|
# However, no actual permit objects are used; the Semaphore just keeps a
|
||||||
|
# count of the number available and acts accordingly.
|
||||||
|
# Alternatively, permits may be acquired within a block, and automatically
|
||||||
|
# released after the block finishes executing.
|
||||||
|
#
|
||||||
|
# @!macro semaphore_public_api
|
||||||
|
# @example
|
||||||
|
# semaphore = Concurrent::Semaphore.new(2)
|
||||||
|
#
|
||||||
|
# t1 = Thread.new do
|
||||||
|
# semaphore.acquire
|
||||||
|
# puts "Thread 1 acquired semaphore"
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# t2 = Thread.new do
|
||||||
|
# semaphore.acquire
|
||||||
|
# puts "Thread 2 acquired semaphore"
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# t3 = Thread.new do
|
||||||
|
# semaphore.acquire
|
||||||
|
# puts "Thread 3 acquired semaphore"
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# t4 = Thread.new do
|
||||||
|
# sleep(2)
|
||||||
|
# puts "Thread 4 releasing semaphore"
|
||||||
|
# semaphore.release
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# [t1, t2, t3, t4].each(&:join)
|
||||||
|
#
|
||||||
|
# # prints:
|
||||||
|
# # Thread 3 acquired semaphore
|
||||||
|
# # Thread 2 acquired semaphore
|
||||||
|
# # Thread 4 releasing semaphore
|
||||||
|
# # Thread 1 acquired semaphore
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
# semaphore = Concurrent::Semaphore.new(1)
|
||||||
|
#
|
||||||
|
# puts semaphore.available_permits
|
||||||
|
# semaphore.acquire do
|
||||||
|
# puts semaphore.available_permits
|
||||||
|
# end
|
||||||
|
# puts semaphore.available_permits
|
||||||
|
#
|
||||||
|
# # prints:
|
||||||
|
# # 1
|
||||||
|
# # 0
|
||||||
|
# # 1
|
||||||
|
class Semaphore < SemaphoreImplementation
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,111 @@
|
|||||||
|
require 'concurrent/constants'
|
||||||
|
require_relative 'locals'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# A `ThreadLocalVar` is a variable where the value is different for each thread.
|
||||||
|
# Each variable may have a default value, but when you modify the variable only
|
||||||
|
# the current thread will ever see that change.
|
||||||
|
#
|
||||||
|
# This is similar to Ruby's built-in thread-local variables (`Thread#thread_variable_get`),
|
||||||
|
# but with these major advantages:
|
||||||
|
# * `ThreadLocalVar` has its own identity, it doesn't need a Symbol.
|
||||||
|
# * Each Ruby's built-in thread-local variable leaks some memory forever (it's a Symbol held forever on the thread),
|
||||||
|
# so it's only OK to create a small amount of them.
|
||||||
|
# `ThreadLocalVar` has no such issue and it is fine to create many of them.
|
||||||
|
# * Ruby's built-in thread-local variables leak forever the value set on each thread (unless set to nil explicitly).
|
||||||
|
# `ThreadLocalVar` automatically removes the mapping for each thread once the `ThreadLocalVar` instance is GC'd.
|
||||||
|
#
|
||||||
|
# @!macro thread_safe_variable_comparison
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
# v = ThreadLocalVar.new(14)
|
||||||
|
# v.value #=> 14
|
||||||
|
# v.value = 2
|
||||||
|
# v.value #=> 2
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
# v = ThreadLocalVar.new(14)
|
||||||
|
#
|
||||||
|
# t1 = Thread.new do
|
||||||
|
# v.value #=> 14
|
||||||
|
# v.value = 1
|
||||||
|
# v.value #=> 1
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# t2 = Thread.new do
|
||||||
|
# v.value #=> 14
|
||||||
|
# v.value = 2
|
||||||
|
# v.value #=> 2
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# v.value #=> 14
|
||||||
|
class ThreadLocalVar
|
||||||
|
LOCALS = ThreadLocals.new
|
||||||
|
|
||||||
|
# Creates a thread local variable.
|
||||||
|
#
|
||||||
|
# @param [Object] default the default value when otherwise unset
|
||||||
|
# @param [Proc] default_block Optional block that gets called to obtain the
|
||||||
|
# default value for each thread
|
||||||
|
def initialize(default = nil, &default_block)
|
||||||
|
if default && block_given?
|
||||||
|
raise ArgumentError, "Cannot use both value and block as default value"
|
||||||
|
end
|
||||||
|
|
||||||
|
if block_given?
|
||||||
|
@default_block = default_block
|
||||||
|
@default = nil
|
||||||
|
else
|
||||||
|
@default_block = nil
|
||||||
|
@default = default
|
||||||
|
end
|
||||||
|
|
||||||
|
@index = LOCALS.next_index(self)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Returns the value in the current thread's copy of this thread-local variable.
|
||||||
|
#
|
||||||
|
# @return [Object] the current value
|
||||||
|
def value
|
||||||
|
LOCALS.fetch(@index) { default }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Sets the current thread's copy of this thread-local variable to the specified value.
|
||||||
|
#
|
||||||
|
# @param [Object] value the value to set
|
||||||
|
# @return [Object] the new value
|
||||||
|
def value=(value)
|
||||||
|
LOCALS.set(@index, value)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Bind the given value to thread local storage during
|
||||||
|
# execution of the given block.
|
||||||
|
#
|
||||||
|
# @param [Object] value the value to bind
|
||||||
|
# @yield the operation to be performed with the bound variable
|
||||||
|
# @return [Object] the value
|
||||||
|
def bind(value)
|
||||||
|
if block_given?
|
||||||
|
old_value = self.value
|
||||||
|
self.value = value
|
||||||
|
begin
|
||||||
|
yield
|
||||||
|
ensure
|
||||||
|
self.value = old_value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def default
|
||||||
|
if @default_block
|
||||||
|
self.value = @default_block.call
|
||||||
|
else
|
||||||
|
@default
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,37 @@
|
|||||||
|
require 'concurrent/errors'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# Define update methods that use direct paths
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
module AtomicDirectUpdate
|
||||||
|
def update
|
||||||
|
true until compare_and_set(old_value = get, new_value = yield(old_value))
|
||||||
|
new_value
|
||||||
|
end
|
||||||
|
|
||||||
|
def try_update
|
||||||
|
old_value = get
|
||||||
|
new_value = yield old_value
|
||||||
|
|
||||||
|
return unless compare_and_set old_value, new_value
|
||||||
|
|
||||||
|
new_value
|
||||||
|
end
|
||||||
|
|
||||||
|
def try_update!
|
||||||
|
old_value = get
|
||||||
|
new_value = yield old_value
|
||||||
|
unless compare_and_set(old_value, new_value)
|
||||||
|
if $VERBOSE
|
||||||
|
raise ConcurrentUpdateError, "Update failed"
|
||||||
|
else
|
||||||
|
raise ConcurrentUpdateError, "Update failed", ConcurrentUpdateError::CONC_UP_ERR_BACKTRACE
|
||||||
|
end
|
||||||
|
end
|
||||||
|
new_value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,67 @@
|
|||||||
|
require 'concurrent/atomic_reference/atomic_direct_update'
|
||||||
|
require 'concurrent/atomic_reference/numeric_cas_wrapper'
|
||||||
|
require 'concurrent/synchronization/safe_initialization'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class MutexAtomicReference
|
||||||
|
extend Concurrent::Synchronization::SafeInitialization
|
||||||
|
include AtomicDirectUpdate
|
||||||
|
include AtomicNumericCompareAndSetWrapper
|
||||||
|
alias_method :compare_and_swap, :compare_and_set
|
||||||
|
|
||||||
|
# @!macro atomic_reference_method_initialize
|
||||||
|
def initialize(value = nil)
|
||||||
|
super()
|
||||||
|
@Lock = ::Mutex.new
|
||||||
|
@value = value
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro atomic_reference_method_get
|
||||||
|
def get
|
||||||
|
synchronize { @value }
|
||||||
|
end
|
||||||
|
alias_method :value, :get
|
||||||
|
|
||||||
|
# @!macro atomic_reference_method_set
|
||||||
|
def set(new_value)
|
||||||
|
synchronize { @value = new_value }
|
||||||
|
end
|
||||||
|
alias_method :value=, :set
|
||||||
|
|
||||||
|
# @!macro atomic_reference_method_get_and_set
|
||||||
|
def get_and_set(new_value)
|
||||||
|
synchronize do
|
||||||
|
old_value = @value
|
||||||
|
@value = new_value
|
||||||
|
old_value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
alias_method :swap, :get_and_set
|
||||||
|
|
||||||
|
# @!macro atomic_reference_method_compare_and_set
|
||||||
|
def _compare_and_set(old_value, new_value)
|
||||||
|
synchronize do
|
||||||
|
if @value.equal? old_value
|
||||||
|
@value = new_value
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def synchronize
|
||||||
|
if @Lock.owned?
|
||||||
|
yield
|
||||||
|
else
|
||||||
|
@Lock.synchronize { yield }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,28 @@
|
|||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# Special "compare and set" handling of numeric values.
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
module AtomicNumericCompareAndSetWrapper
|
||||||
|
|
||||||
|
# @!macro atomic_reference_method_compare_and_set
|
||||||
|
def compare_and_set(old_value, new_value)
|
||||||
|
if old_value.kind_of? Numeric
|
||||||
|
while true
|
||||||
|
old = get
|
||||||
|
|
||||||
|
return false unless old.kind_of? Numeric
|
||||||
|
|
||||||
|
return false unless old == old_value
|
||||||
|
|
||||||
|
result = _compare_and_set(old, new_value)
|
||||||
|
return result if result
|
||||||
|
end
|
||||||
|
else
|
||||||
|
_compare_and_set(old_value, new_value)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,10 @@
|
|||||||
|
require 'concurrent/atomic/atomic_reference'
|
||||||
|
require 'concurrent/atomic/atomic_boolean'
|
||||||
|
require 'concurrent/atomic/atomic_fixnum'
|
||||||
|
require 'concurrent/atomic/cyclic_barrier'
|
||||||
|
require 'concurrent/atomic/count_down_latch'
|
||||||
|
require 'concurrent/atomic/event'
|
||||||
|
require 'concurrent/atomic/read_write_lock'
|
||||||
|
require 'concurrent/atomic/reentrant_read_write_lock'
|
||||||
|
require 'concurrent/atomic/semaphore'
|
||||||
|
require 'concurrent/atomic/thread_local_var'
|
||||||
@ -0,0 +1,107 @@
|
|||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
module Collection
|
||||||
|
|
||||||
|
# A thread safe observer set implemented using copy-on-read approach:
|
||||||
|
# observers are added and removed from a thread safe collection; every time
|
||||||
|
# a notification is required the internal data structure is copied to
|
||||||
|
# prevent concurrency issues
|
||||||
|
#
|
||||||
|
# @api private
|
||||||
|
class CopyOnNotifyObserverSet < Synchronization::LockableObject
|
||||||
|
|
||||||
|
def initialize
|
||||||
|
super()
|
||||||
|
synchronize { ns_initialize }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_add_observer
|
||||||
|
def add_observer(observer = nil, func = :update, &block)
|
||||||
|
if observer.nil? && block.nil?
|
||||||
|
raise ArgumentError, 'should pass observer as a first argument or block'
|
||||||
|
elsif observer && block
|
||||||
|
raise ArgumentError.new('cannot provide both an observer and a block')
|
||||||
|
end
|
||||||
|
|
||||||
|
if block
|
||||||
|
observer = block
|
||||||
|
func = :call
|
||||||
|
end
|
||||||
|
|
||||||
|
synchronize do
|
||||||
|
@observers[observer] = func
|
||||||
|
observer
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_delete_observer
|
||||||
|
def delete_observer(observer)
|
||||||
|
synchronize do
|
||||||
|
@observers.delete(observer)
|
||||||
|
observer
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_delete_observers
|
||||||
|
def delete_observers
|
||||||
|
synchronize do
|
||||||
|
@observers.clear
|
||||||
|
self
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_count_observers
|
||||||
|
def count_observers
|
||||||
|
synchronize { @observers.count }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Notifies all registered observers with optional args
|
||||||
|
# @param [Object] args arguments to be passed to each observer
|
||||||
|
# @return [CopyOnWriteObserverSet] self
|
||||||
|
def notify_observers(*args, &block)
|
||||||
|
observers = duplicate_observers
|
||||||
|
notify_to(observers, *args, &block)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# Notifies all registered observers with optional args and deletes them.
|
||||||
|
#
|
||||||
|
# @param [Object] args arguments to be passed to each observer
|
||||||
|
# @return [CopyOnWriteObserverSet] self
|
||||||
|
def notify_and_delete_observers(*args, &block)
|
||||||
|
observers = duplicate_and_clear_observers
|
||||||
|
notify_to(observers, *args, &block)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
def ns_initialize
|
||||||
|
@observers = {}
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def duplicate_and_clear_observers
|
||||||
|
synchronize do
|
||||||
|
observers = @observers.dup
|
||||||
|
@observers.clear
|
||||||
|
observers
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def duplicate_observers
|
||||||
|
synchronize { @observers.dup }
|
||||||
|
end
|
||||||
|
|
||||||
|
def notify_to(observers, *args)
|
||||||
|
raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty?
|
||||||
|
observers.each do |observer, function|
|
||||||
|
args = yield if block_given?
|
||||||
|
observer.send(function, *args)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,111 @@
|
|||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
module Collection
|
||||||
|
|
||||||
|
# A thread safe observer set implemented using copy-on-write approach:
|
||||||
|
# every time an observer is added or removed the whole internal data structure is
|
||||||
|
# duplicated and replaced with a new one.
|
||||||
|
#
|
||||||
|
# @api private
|
||||||
|
class CopyOnWriteObserverSet < Synchronization::LockableObject
|
||||||
|
|
||||||
|
def initialize
|
||||||
|
super()
|
||||||
|
synchronize { ns_initialize }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_add_observer
|
||||||
|
def add_observer(observer = nil, func = :update, &block)
|
||||||
|
if observer.nil? && block.nil?
|
||||||
|
raise ArgumentError, 'should pass observer as a first argument or block'
|
||||||
|
elsif observer && block
|
||||||
|
raise ArgumentError.new('cannot provide both an observer and a block')
|
||||||
|
end
|
||||||
|
|
||||||
|
if block
|
||||||
|
observer = block
|
||||||
|
func = :call
|
||||||
|
end
|
||||||
|
|
||||||
|
synchronize do
|
||||||
|
new_observers = @observers.dup
|
||||||
|
new_observers[observer] = func
|
||||||
|
@observers = new_observers
|
||||||
|
observer
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_delete_observer
|
||||||
|
def delete_observer(observer)
|
||||||
|
synchronize do
|
||||||
|
new_observers = @observers.dup
|
||||||
|
new_observers.delete(observer)
|
||||||
|
@observers = new_observers
|
||||||
|
observer
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_delete_observers
|
||||||
|
def delete_observers
|
||||||
|
self.observers = {}
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_count_observers
|
||||||
|
def count_observers
|
||||||
|
observers.count
|
||||||
|
end
|
||||||
|
|
||||||
|
# Notifies all registered observers with optional args
|
||||||
|
# @param [Object] args arguments to be passed to each observer
|
||||||
|
# @return [CopyOnWriteObserverSet] self
|
||||||
|
def notify_observers(*args, &block)
|
||||||
|
notify_to(observers, *args, &block)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# Notifies all registered observers with optional args and deletes them.
|
||||||
|
#
|
||||||
|
# @param [Object] args arguments to be passed to each observer
|
||||||
|
# @return [CopyOnWriteObserverSet] self
|
||||||
|
def notify_and_delete_observers(*args, &block)
|
||||||
|
old = clear_observers_and_return_old
|
||||||
|
notify_to(old, *args, &block)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
def ns_initialize
|
||||||
|
@observers = {}
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def notify_to(observers, *args)
|
||||||
|
raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty?
|
||||||
|
observers.each do |observer, function|
|
||||||
|
args = yield if block_given?
|
||||||
|
observer.send(function, *args)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def observers
|
||||||
|
synchronize { @observers }
|
||||||
|
end
|
||||||
|
|
||||||
|
def observers=(new_set)
|
||||||
|
synchronize { @observers = new_set }
|
||||||
|
end
|
||||||
|
|
||||||
|
def clear_observers_and_return_old
|
||||||
|
synchronize do
|
||||||
|
old_observers = @observers
|
||||||
|
@observers = {}
|
||||||
|
old_observers
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,84 @@
|
|||||||
|
if Concurrent.on_jruby?
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
module Collection
|
||||||
|
|
||||||
|
|
||||||
|
# @!macro priority_queue
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class JavaNonConcurrentPriorityQueue
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_initialize
|
||||||
|
def initialize(opts = {})
|
||||||
|
order = opts.fetch(:order, :max)
|
||||||
|
if [:min, :low].include?(order)
|
||||||
|
@queue = java.util.PriorityQueue.new(11) # 11 is the default initial capacity
|
||||||
|
else
|
||||||
|
@queue = java.util.PriorityQueue.new(11, java.util.Collections.reverseOrder())
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_clear
|
||||||
|
def clear
|
||||||
|
@queue.clear
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_delete
|
||||||
|
def delete(item)
|
||||||
|
found = false
|
||||||
|
while @queue.remove(item) do
|
||||||
|
found = true
|
||||||
|
end
|
||||||
|
found
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_empty
|
||||||
|
def empty?
|
||||||
|
@queue.size == 0
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_include
|
||||||
|
def include?(item)
|
||||||
|
@queue.contains(item)
|
||||||
|
end
|
||||||
|
alias_method :has_priority?, :include?
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_length
|
||||||
|
def length
|
||||||
|
@queue.size
|
||||||
|
end
|
||||||
|
alias_method :size, :length
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_peek
|
||||||
|
def peek
|
||||||
|
@queue.peek
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_pop
|
||||||
|
def pop
|
||||||
|
@queue.poll
|
||||||
|
end
|
||||||
|
alias_method :deq, :pop
|
||||||
|
alias_method :shift, :pop
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_push
|
||||||
|
def push(item)
|
||||||
|
raise ArgumentError.new('cannot enqueue nil') if item.nil?
|
||||||
|
@queue.add(item)
|
||||||
|
end
|
||||||
|
alias_method :<<, :push
|
||||||
|
alias_method :enq, :push
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_from_list
|
||||||
|
def self.from_list(list, opts = {})
|
||||||
|
queue = new(opts)
|
||||||
|
list.each{|item| queue << item }
|
||||||
|
queue
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,160 @@
|
|||||||
|
require 'concurrent/synchronization/object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro warn.edge
|
||||||
|
class LockFreeStack < Synchronization::Object
|
||||||
|
|
||||||
|
safe_initialization!
|
||||||
|
|
||||||
|
class Node
|
||||||
|
# TODO (pitr-ch 20-Dec-2016): Could be unified with Stack class?
|
||||||
|
|
||||||
|
# @return [Node]
|
||||||
|
attr_reader :next_node
|
||||||
|
|
||||||
|
# @return [Object]
|
||||||
|
attr_reader :value
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# allow to nil-ify to free GC when the entry is no longer relevant, not synchronised
|
||||||
|
attr_writer :value
|
||||||
|
|
||||||
|
def initialize(value, next_node)
|
||||||
|
@value = value
|
||||||
|
@next_node = next_node
|
||||||
|
end
|
||||||
|
|
||||||
|
singleton_class.send :alias_method, :[], :new
|
||||||
|
end
|
||||||
|
|
||||||
|
# The singleton for empty node
|
||||||
|
EMPTY = Node[nil, nil]
|
||||||
|
def EMPTY.next_node
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
attr_atomic(:head)
|
||||||
|
private :head, :head=, :swap_head, :compare_and_set_head, :update_head
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def self.of1(value)
|
||||||
|
new Node[value, EMPTY]
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def self.of2(value1, value2)
|
||||||
|
new Node[value1, Node[value2, EMPTY]]
|
||||||
|
end
|
||||||
|
|
||||||
|
# @param [Node] head
|
||||||
|
def initialize(head = EMPTY)
|
||||||
|
super()
|
||||||
|
self.head = head
|
||||||
|
end
|
||||||
|
|
||||||
|
# @param [Node] head
|
||||||
|
# @return [true, false]
|
||||||
|
def empty?(head = head())
|
||||||
|
head.equal? EMPTY
|
||||||
|
end
|
||||||
|
|
||||||
|
# @param [Node] head
|
||||||
|
# @param [Object] value
|
||||||
|
# @return [true, false]
|
||||||
|
def compare_and_push(head, value)
|
||||||
|
compare_and_set_head head, Node[value, head]
|
||||||
|
end
|
||||||
|
|
||||||
|
# @param [Object] value
|
||||||
|
# @return [self]
|
||||||
|
def push(value)
|
||||||
|
while true
|
||||||
|
current_head = head
|
||||||
|
return self if compare_and_set_head current_head, Node[value, current_head]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @return [Node]
|
||||||
|
def peek
|
||||||
|
head
|
||||||
|
end
|
||||||
|
|
||||||
|
# @param [Node] head
|
||||||
|
# @return [true, false]
|
||||||
|
def compare_and_pop(head)
|
||||||
|
compare_and_set_head head, head.next_node
|
||||||
|
end
|
||||||
|
|
||||||
|
# @return [Object]
|
||||||
|
def pop
|
||||||
|
while true
|
||||||
|
current_head = head
|
||||||
|
return current_head.value if compare_and_set_head current_head, current_head.next_node
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @param [Node] head
|
||||||
|
# @return [true, false]
|
||||||
|
def compare_and_clear(head)
|
||||||
|
compare_and_set_head head, EMPTY
|
||||||
|
end
|
||||||
|
|
||||||
|
include Enumerable
|
||||||
|
|
||||||
|
# @param [Node] head
|
||||||
|
# @return [self]
|
||||||
|
def each(head = nil)
|
||||||
|
return to_enum(:each, head) unless block_given?
|
||||||
|
it = head || peek
|
||||||
|
until it.equal?(EMPTY)
|
||||||
|
yield it.value
|
||||||
|
it = it.next_node
|
||||||
|
end
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @return [true, false]
|
||||||
|
def clear
|
||||||
|
while true
|
||||||
|
current_head = head
|
||||||
|
return false if current_head == EMPTY
|
||||||
|
return true if compare_and_set_head current_head, EMPTY
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @param [Node] head
|
||||||
|
# @return [true, false]
|
||||||
|
def clear_if(head)
|
||||||
|
compare_and_set_head head, EMPTY
|
||||||
|
end
|
||||||
|
|
||||||
|
# @param [Node] head
|
||||||
|
# @param [Node] new_head
|
||||||
|
# @return [true, false]
|
||||||
|
def replace_if(head, new_head)
|
||||||
|
compare_and_set_head head, new_head
|
||||||
|
end
|
||||||
|
|
||||||
|
# @return [self]
|
||||||
|
# @yield over the cleared stack
|
||||||
|
# @yieldparam [Object] value
|
||||||
|
def clear_each(&block)
|
||||||
|
while true
|
||||||
|
current_head = head
|
||||||
|
return self if current_head == EMPTY
|
||||||
|
if compare_and_set_head current_head, EMPTY
|
||||||
|
each current_head, &block
|
||||||
|
return self
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @return [String] Short string representation.
|
||||||
|
def to_s
|
||||||
|
format '%s %s>', super[0..-2], to_a.to_s
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :inspect, :to_s
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,66 @@
|
|||||||
|
require 'thread'
|
||||||
|
require 'concurrent/collection/map/non_concurrent_map_backend'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
module Collection
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
class MriMapBackend < NonConcurrentMapBackend
|
||||||
|
|
||||||
|
def initialize(options = nil, &default_proc)
|
||||||
|
super(options, &default_proc)
|
||||||
|
@write_lock = Mutex.new
|
||||||
|
end
|
||||||
|
|
||||||
|
def []=(key, value)
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def compute_if_absent(key)
|
||||||
|
if NULL != (stored_value = @backend.fetch(key, NULL)) # fast non-blocking path for the most likely case
|
||||||
|
stored_value
|
||||||
|
else
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def compute_if_present(key)
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def compute(key)
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def merge_pair(key, value)
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def replace_pair(key, old_value, new_value)
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def replace_if_exists(key, new_value)
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_and_set(key, value)
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete(key)
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete_pair(key, value)
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def clear
|
||||||
|
@write_lock.synchronize { super }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,148 @@
|
|||||||
|
require 'concurrent/constants'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
module Collection
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
class NonConcurrentMapBackend
|
||||||
|
|
||||||
|
# WARNING: all public methods of the class must operate on the @backend
|
||||||
|
# directly without calling each other. This is important because of the
|
||||||
|
# SynchronizedMapBackend which uses a non-reentrant mutex for performance
|
||||||
|
# reasons.
|
||||||
|
def initialize(options = nil, &default_proc)
|
||||||
|
validate_options_hash!(options) if options.kind_of?(::Hash)
|
||||||
|
set_backend(default_proc)
|
||||||
|
@default_proc = default_proc
|
||||||
|
end
|
||||||
|
|
||||||
|
def [](key)
|
||||||
|
@backend[key]
|
||||||
|
end
|
||||||
|
|
||||||
|
def []=(key, value)
|
||||||
|
@backend[key] = value
|
||||||
|
end
|
||||||
|
|
||||||
|
def compute_if_absent(key)
|
||||||
|
if NULL != (stored_value = @backend.fetch(key, NULL))
|
||||||
|
stored_value
|
||||||
|
else
|
||||||
|
@backend[key] = yield
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def replace_pair(key, old_value, new_value)
|
||||||
|
if pair?(key, old_value)
|
||||||
|
@backend[key] = new_value
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def replace_if_exists(key, new_value)
|
||||||
|
if NULL != (stored_value = @backend.fetch(key, NULL))
|
||||||
|
@backend[key] = new_value
|
||||||
|
stored_value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def compute_if_present(key)
|
||||||
|
if NULL != (stored_value = @backend.fetch(key, NULL))
|
||||||
|
store_computed_value(key, yield(stored_value))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def compute(key)
|
||||||
|
store_computed_value(key, yield(get_or_default(key, nil)))
|
||||||
|
end
|
||||||
|
|
||||||
|
def merge_pair(key, value)
|
||||||
|
if NULL == (stored_value = @backend.fetch(key, NULL))
|
||||||
|
@backend[key] = value
|
||||||
|
else
|
||||||
|
store_computed_value(key, yield(stored_value))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_and_set(key, value)
|
||||||
|
stored_value = get_or_default(key, nil)
|
||||||
|
@backend[key] = value
|
||||||
|
stored_value
|
||||||
|
end
|
||||||
|
|
||||||
|
def key?(key)
|
||||||
|
@backend.key?(key)
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete(key)
|
||||||
|
@backend.delete(key)
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete_pair(key, value)
|
||||||
|
if pair?(key, value)
|
||||||
|
@backend.delete(key)
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def clear
|
||||||
|
@backend.clear
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
def each_pair
|
||||||
|
dupped_backend.each_pair do |k, v|
|
||||||
|
yield k, v
|
||||||
|
end
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
def size
|
||||||
|
@backend.size
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_or_default(key, default_value)
|
||||||
|
@backend.fetch(key, default_value)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def set_backend(default_proc)
|
||||||
|
if default_proc
|
||||||
|
@backend = ::Hash.new { |_h, key| default_proc.call(self, key) }
|
||||||
|
else
|
||||||
|
@backend = {}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def initialize_copy(other)
|
||||||
|
super
|
||||||
|
set_backend(@default_proc)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
def dupped_backend
|
||||||
|
@backend.dup
|
||||||
|
end
|
||||||
|
|
||||||
|
def pair?(key, expected_value)
|
||||||
|
NULL != (stored_value = @backend.fetch(key, NULL)) && expected_value.equal?(stored_value)
|
||||||
|
end
|
||||||
|
|
||||||
|
def store_computed_value(key, new_value)
|
||||||
|
if new_value.nil?
|
||||||
|
@backend.delete(key)
|
||||||
|
nil
|
||||||
|
else
|
||||||
|
@backend[key] = new_value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,85 @@
|
|||||||
|
require 'concurrent/collection/map/non_concurrent_map_backend'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
module Collection
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
class SynchronizedMapBackend < NonConcurrentMapBackend
|
||||||
|
|
||||||
|
def initialize(*args, &block)
|
||||||
|
super
|
||||||
|
|
||||||
|
# WARNING: Mutex is a non-reentrant lock, so the synchronized methods are
|
||||||
|
# not allowed to call each other.
|
||||||
|
@mutex = Mutex.new
|
||||||
|
end
|
||||||
|
|
||||||
|
def [](key)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def []=(key, value)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def compute_if_absent(key)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def compute_if_present(key)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def compute(key)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def merge_pair(key, value)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def replace_pair(key, old_value, new_value)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def replace_if_exists(key, new_value)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_and_set(key, value)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def key?(key)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete(key)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete_pair(key, value)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def clear
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def size
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_or_default(key, default_value)
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
def dupped_backend
|
||||||
|
@mutex.synchronize { super }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,14 @@
|
|||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
module Collection
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
class TruffleRubyMapBackend < TruffleRuby::ConcurrentMap
|
||||||
|
def initialize(options = nil)
|
||||||
|
options ||= {}
|
||||||
|
super(initial_capacity: options[:initial_capacity], load_factor: options[:load_factor])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,143 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require 'concurrent/collection/java_non_concurrent_priority_queue'
|
||||||
|
require 'concurrent/collection/ruby_non_concurrent_priority_queue'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
module Collection
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
NonConcurrentPriorityQueueImplementation = case
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
JavaNonConcurrentPriorityQueue
|
||||||
|
else
|
||||||
|
RubyNonConcurrentPriorityQueue
|
||||||
|
end
|
||||||
|
private_constant :NonConcurrentPriorityQueueImplementation
|
||||||
|
|
||||||
|
# @!macro priority_queue
|
||||||
|
#
|
||||||
|
# A queue collection in which the elements are sorted based on their
|
||||||
|
# comparison (spaceship) operator `<=>`. Items are added to the queue
|
||||||
|
# at a position relative to their priority. On removal the element
|
||||||
|
# with the "highest" priority is removed. By default the sort order is
|
||||||
|
# from highest to lowest, but a lowest-to-highest sort order can be
|
||||||
|
# set on construction.
|
||||||
|
#
|
||||||
|
# The API is based on the `Queue` class from the Ruby standard library.
|
||||||
|
#
|
||||||
|
# The pure Ruby implementation, `RubyNonConcurrentPriorityQueue` uses a heap algorithm
|
||||||
|
# stored in an array. The algorithm is based on the work of Robert Sedgewick
|
||||||
|
# and Kevin Wayne.
|
||||||
|
#
|
||||||
|
# The JRuby native implementation is a thin wrapper around the standard
|
||||||
|
# library `java.util.NonConcurrentPriorityQueue`.
|
||||||
|
#
|
||||||
|
# When running under JRuby the class `NonConcurrentPriorityQueue` extends `JavaNonConcurrentPriorityQueue`.
|
||||||
|
# When running under all other interpreters it extends `RubyNonConcurrentPriorityQueue`.
|
||||||
|
#
|
||||||
|
# @note This implementation is *not* thread safe.
|
||||||
|
#
|
||||||
|
# @see http://en.wikipedia.org/wiki/Priority_queue
|
||||||
|
# @see http://ruby-doc.org/stdlib-2.0.0/libdoc/thread/rdoc/Queue.html
|
||||||
|
#
|
||||||
|
# @see http://algs4.cs.princeton.edu/24pq/index.php#2.6
|
||||||
|
# @see http://algs4.cs.princeton.edu/24pq/MaxPQ.java.html
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/PriorityQueue.html
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
class NonConcurrentPriorityQueue < NonConcurrentPriorityQueueImplementation
|
||||||
|
|
||||||
|
alias_method :has_priority?, :include?
|
||||||
|
|
||||||
|
alias_method :size, :length
|
||||||
|
|
||||||
|
alias_method :deq, :pop
|
||||||
|
alias_method :shift, :pop
|
||||||
|
|
||||||
|
alias_method :<<, :push
|
||||||
|
alias_method :enq, :push
|
||||||
|
|
||||||
|
# @!method initialize(opts = {})
|
||||||
|
# @!macro priority_queue_method_initialize
|
||||||
|
#
|
||||||
|
# Create a new priority queue with no items.
|
||||||
|
#
|
||||||
|
# @param [Hash] opts the options for creating the queue
|
||||||
|
# @option opts [Symbol] :order (:max) dictates the order in which items are
|
||||||
|
# stored: from highest to lowest when `:max` or `:high`; from lowest to
|
||||||
|
# highest when `:min` or `:low`
|
||||||
|
|
||||||
|
# @!method clear
|
||||||
|
# @!macro priority_queue_method_clear
|
||||||
|
#
|
||||||
|
# Removes all of the elements from this priority queue.
|
||||||
|
|
||||||
|
# @!method delete(item)
|
||||||
|
# @!macro priority_queue_method_delete
|
||||||
|
#
|
||||||
|
# Deletes all items from `self` that are equal to `item`.
|
||||||
|
#
|
||||||
|
# @param [Object] item the item to be removed from the queue
|
||||||
|
# @return [Object] true if the item is found else false
|
||||||
|
|
||||||
|
# @!method empty?
|
||||||
|
# @!macro priority_queue_method_empty
|
||||||
|
#
|
||||||
|
# Returns `true` if `self` contains no elements.
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if there are no items in the queue else false
|
||||||
|
|
||||||
|
# @!method include?(item)
|
||||||
|
# @!macro priority_queue_method_include
|
||||||
|
#
|
||||||
|
# Returns `true` if the given item is present in `self` (that is, if any
|
||||||
|
# element == `item`), otherwise returns false.
|
||||||
|
#
|
||||||
|
# @param [Object] item the item to search for
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the item is found else false
|
||||||
|
|
||||||
|
# @!method length
|
||||||
|
# @!macro priority_queue_method_length
|
||||||
|
#
|
||||||
|
# The current length of the queue.
|
||||||
|
#
|
||||||
|
# @return [Fixnum] the number of items in the queue
|
||||||
|
|
||||||
|
# @!method peek
|
||||||
|
# @!macro priority_queue_method_peek
|
||||||
|
#
|
||||||
|
# Retrieves, but does not remove, the head of this queue, or returns `nil`
|
||||||
|
# if this queue is empty.
|
||||||
|
#
|
||||||
|
# @return [Object] the head of the queue or `nil` when empty
|
||||||
|
|
||||||
|
# @!method pop
|
||||||
|
# @!macro priority_queue_method_pop
|
||||||
|
#
|
||||||
|
# Retrieves and removes the head of this queue, or returns `nil` if this
|
||||||
|
# queue is empty.
|
||||||
|
#
|
||||||
|
# @return [Object] the head of the queue or `nil` when empty
|
||||||
|
|
||||||
|
# @!method push(item)
|
||||||
|
# @!macro priority_queue_method_push
|
||||||
|
#
|
||||||
|
# Inserts the specified element into this priority queue.
|
||||||
|
#
|
||||||
|
# @param [Object] item the item to insert onto the queue
|
||||||
|
|
||||||
|
# @!method self.from_list(list, opts = {})
|
||||||
|
# @!macro priority_queue_method_from_list
|
||||||
|
#
|
||||||
|
# Create a new priority queue from the given list.
|
||||||
|
#
|
||||||
|
# @param [Enumerable] list the list to build the queue from
|
||||||
|
# @param [Hash] opts the options for creating the queue
|
||||||
|
#
|
||||||
|
# @return [NonConcurrentPriorityQueue] the newly created and populated queue
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,160 @@
|
|||||||
|
module Concurrent
|
||||||
|
module Collection
|
||||||
|
|
||||||
|
# @!macro priority_queue
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class RubyNonConcurrentPriorityQueue
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_initialize
|
||||||
|
def initialize(opts = {})
|
||||||
|
order = opts.fetch(:order, :max)
|
||||||
|
@comparator = [:min, :low].include?(order) ? -1 : 1
|
||||||
|
clear
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_clear
|
||||||
|
def clear
|
||||||
|
@queue = [nil]
|
||||||
|
@length = 0
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_delete
|
||||||
|
def delete(item)
|
||||||
|
return false if empty?
|
||||||
|
original_length = @length
|
||||||
|
k = 1
|
||||||
|
while k <= @length
|
||||||
|
if @queue[k] == item
|
||||||
|
swap(k, @length)
|
||||||
|
@length -= 1
|
||||||
|
sink(k) || swim(k)
|
||||||
|
@queue.pop
|
||||||
|
else
|
||||||
|
k += 1
|
||||||
|
end
|
||||||
|
end
|
||||||
|
@length != original_length
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_empty
|
||||||
|
def empty?
|
||||||
|
size == 0
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_include
|
||||||
|
def include?(item)
|
||||||
|
@queue.include?(item)
|
||||||
|
end
|
||||||
|
alias_method :has_priority?, :include?
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_length
|
||||||
|
def length
|
||||||
|
@length
|
||||||
|
end
|
||||||
|
alias_method :size, :length
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_peek
|
||||||
|
def peek
|
||||||
|
empty? ? nil : @queue[1]
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_pop
|
||||||
|
def pop
|
||||||
|
return nil if empty?
|
||||||
|
max = @queue[1]
|
||||||
|
swap(1, @length)
|
||||||
|
@length -= 1
|
||||||
|
sink(1)
|
||||||
|
@queue.pop
|
||||||
|
max
|
||||||
|
end
|
||||||
|
alias_method :deq, :pop
|
||||||
|
alias_method :shift, :pop
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_push
|
||||||
|
def push(item)
|
||||||
|
raise ArgumentError.new('cannot enqueue nil') if item.nil?
|
||||||
|
@length += 1
|
||||||
|
@queue << item
|
||||||
|
swim(@length)
|
||||||
|
true
|
||||||
|
end
|
||||||
|
alias_method :<<, :push
|
||||||
|
alias_method :enq, :push
|
||||||
|
|
||||||
|
# @!macro priority_queue_method_from_list
|
||||||
|
def self.from_list(list, opts = {})
|
||||||
|
queue = new(opts)
|
||||||
|
list.each{|item| queue << item }
|
||||||
|
queue
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# Exchange the values at the given indexes within the internal array.
|
||||||
|
#
|
||||||
|
# @param [Integer] x the first index to swap
|
||||||
|
# @param [Integer] y the second index to swap
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def swap(x, y)
|
||||||
|
temp = @queue[x]
|
||||||
|
@queue[x] = @queue[y]
|
||||||
|
@queue[y] = temp
|
||||||
|
end
|
||||||
|
|
||||||
|
# Are the items at the given indexes ordered based on the priority
|
||||||
|
# order specified at construction?
|
||||||
|
#
|
||||||
|
# @param [Integer] x the first index from which to retrieve a comparable value
|
||||||
|
# @param [Integer] y the second index from which to retrieve a comparable value
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the two elements are in the correct priority order
|
||||||
|
# else false
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def ordered?(x, y)
|
||||||
|
(@queue[x] <=> @queue[y]) == @comparator
|
||||||
|
end
|
||||||
|
|
||||||
|
# Percolate down to maintain heap invariant.
|
||||||
|
#
|
||||||
|
# @param [Integer] k the index at which to start the percolation
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def sink(k)
|
||||||
|
success = false
|
||||||
|
|
||||||
|
while (j = (2 * k)) <= @length do
|
||||||
|
j += 1 if j < @length && ! ordered?(j, j+1)
|
||||||
|
break if ordered?(k, j)
|
||||||
|
swap(k, j)
|
||||||
|
success = true
|
||||||
|
k = j
|
||||||
|
end
|
||||||
|
|
||||||
|
success
|
||||||
|
end
|
||||||
|
|
||||||
|
# Percolate up to maintain heap invariant.
|
||||||
|
#
|
||||||
|
# @param [Integer] k the index at which to start the percolation
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def swim(k)
|
||||||
|
success = false
|
||||||
|
|
||||||
|
while k > 1 && ! ordered?(k/2, k) do
|
||||||
|
swap(k, k/2)
|
||||||
|
k = k/2
|
||||||
|
success = true
|
||||||
|
end
|
||||||
|
|
||||||
|
success
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,34 @@
|
|||||||
|
require 'concurrent/concern/logging'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
module Concern
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
module Deprecation
|
||||||
|
# TODO require additional parameter: a version. Display when it'll be removed based on that. Error if not removed.
|
||||||
|
include Concern::Logging
|
||||||
|
|
||||||
|
def deprecated(message, strip = 2)
|
||||||
|
caller_line = caller(strip).first if strip > 0
|
||||||
|
klass = if Module === self
|
||||||
|
self
|
||||||
|
else
|
||||||
|
self.class
|
||||||
|
end
|
||||||
|
message = if strip > 0
|
||||||
|
format("[DEPRECATED] %s\ncalled on: %s", message, caller_line)
|
||||||
|
else
|
||||||
|
format('[DEPRECATED] %s', message)
|
||||||
|
end
|
||||||
|
log WARN, klass.to_s, message
|
||||||
|
end
|
||||||
|
|
||||||
|
def deprecated_method(old_name, new_name)
|
||||||
|
deprecated "`#{old_name}` is deprecated and it'll removed in next release, use `#{new_name}` instead", 3
|
||||||
|
end
|
||||||
|
|
||||||
|
extend self
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,73 @@
|
|||||||
|
module Concurrent
|
||||||
|
module Concern
|
||||||
|
|
||||||
|
# Object references in Ruby are mutable. This can lead to serious problems when
|
||||||
|
# the `#value` of a concurrent object is a mutable reference. Which is always the
|
||||||
|
# case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type.
|
||||||
|
# Most classes in this library that expose a `#value` getter method do so using the
|
||||||
|
# `Dereferenceable` mixin module.
|
||||||
|
#
|
||||||
|
# @!macro copy_options
|
||||||
|
module Dereferenceable
|
||||||
|
# NOTE: This module is going away in 2.0. In the mean time we need it to
|
||||||
|
# play nicely with the synchronization layer. This means that the
|
||||||
|
# including class SHOULD be synchronized and it MUST implement a
|
||||||
|
# `#synchronize` method. Not doing so will lead to runtime errors.
|
||||||
|
|
||||||
|
# Return the value this object represents after applying the options specified
|
||||||
|
# by the `#set_deref_options` method.
|
||||||
|
#
|
||||||
|
# @return [Object] the current value of the object
|
||||||
|
def value
|
||||||
|
synchronize { apply_deref_options(@value) }
|
||||||
|
end
|
||||||
|
alias_method :deref, :value
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
# Set the internal value of this object
|
||||||
|
#
|
||||||
|
# @param [Object] value the new value
|
||||||
|
def value=(value)
|
||||||
|
synchronize{ @value = value }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro dereferenceable_set_deref_options
|
||||||
|
# Set the options which define the operations #value performs before
|
||||||
|
# returning data to the caller (dereferencing).
|
||||||
|
#
|
||||||
|
# @note Most classes that include this module will call `#set_deref_options`
|
||||||
|
# from within the constructor, thus allowing these options to be set at
|
||||||
|
# object creation.
|
||||||
|
#
|
||||||
|
# @param [Hash] opts the options defining dereference behavior.
|
||||||
|
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
|
||||||
|
# @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
|
||||||
|
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing
|
||||||
|
# the internal value and returning the value returned from the proc
|
||||||
|
def set_deref_options(opts = {})
|
||||||
|
synchronize{ ns_set_deref_options(opts) }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro dereferenceable_set_deref_options
|
||||||
|
# @!visibility private
|
||||||
|
def ns_set_deref_options(opts)
|
||||||
|
@dup_on_deref = opts[:dup_on_deref] || opts[:dup]
|
||||||
|
@freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze]
|
||||||
|
@copy_on_deref = opts[:copy_on_deref] || opts[:copy]
|
||||||
|
@do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref)
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def apply_deref_options(value)
|
||||||
|
return nil if value.nil?
|
||||||
|
return value if @do_nothing_on_deref
|
||||||
|
value = @copy_on_deref.call(value) if @copy_on_deref
|
||||||
|
value = value.dup if @dup_on_deref
|
||||||
|
value = value.freeze if @freeze_on_deref
|
||||||
|
value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,116 @@
|
|||||||
|
require 'logger'
|
||||||
|
require 'concurrent/atomic/atomic_reference'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
module Concern
|
||||||
|
|
||||||
|
# Include where logging is needed
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
module Logging
|
||||||
|
include Logger::Severity
|
||||||
|
|
||||||
|
# Logs through {Concurrent.global_logger}, it can be overridden by setting @logger
|
||||||
|
# @param [Integer] level one of Logger::Severity constants
|
||||||
|
# @param [String] progname e.g. a path of an Actor
|
||||||
|
# @param [String, nil] message when nil block is used to generate the message
|
||||||
|
# @yieldreturn [String] a message
|
||||||
|
def log(level, progname, message = nil, &block)
|
||||||
|
logger = if defined?(@logger) && @logger
|
||||||
|
@logger
|
||||||
|
else
|
||||||
|
Concurrent.global_logger
|
||||||
|
end
|
||||||
|
logger.call level, progname, message, &block
|
||||||
|
rescue => error
|
||||||
|
$stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" +
|
||||||
|
"#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
extend Concern::Logging
|
||||||
|
|
||||||
|
# @return [Logger] Logger with provided level and output.
|
||||||
|
def self.create_simple_logger(level = Logger::FATAL, output = $stderr)
|
||||||
|
# TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking
|
||||||
|
lambda do |severity, progname, message = nil, &block|
|
||||||
|
return false if severity < level
|
||||||
|
|
||||||
|
message = block ? block.call : message
|
||||||
|
formatted_message = case message
|
||||||
|
when String
|
||||||
|
message
|
||||||
|
when Exception
|
||||||
|
format "%s (%s)\n%s",
|
||||||
|
message.message, message.class, (message.backtrace || []).join("\n")
|
||||||
|
else
|
||||||
|
message.inspect
|
||||||
|
end
|
||||||
|
|
||||||
|
output.print format "[%s] %5s -- %s: %s\n",
|
||||||
|
Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'),
|
||||||
|
Logger::SEV_LABEL[severity],
|
||||||
|
progname,
|
||||||
|
formatted_message
|
||||||
|
true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Use logger created by #create_simple_logger to log concurrent-ruby messages.
|
||||||
|
def self.use_simple_logger(level = Logger::FATAL, output = $stderr)
|
||||||
|
Concurrent.global_logger = create_simple_logger level, output
|
||||||
|
end
|
||||||
|
|
||||||
|
# @return [Logger] Logger with provided level and output.
|
||||||
|
# @deprecated
|
||||||
|
def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr)
|
||||||
|
logger = Logger.new(output)
|
||||||
|
logger.level = level
|
||||||
|
logger.formatter = lambda do |severity, datetime, progname, msg|
|
||||||
|
formatted_message = case msg
|
||||||
|
when String
|
||||||
|
msg
|
||||||
|
when Exception
|
||||||
|
format "%s (%s)\n%s",
|
||||||
|
msg.message, msg.class, (msg.backtrace || []).join("\n")
|
||||||
|
else
|
||||||
|
msg.inspect
|
||||||
|
end
|
||||||
|
format "[%s] %5s -- %s: %s\n",
|
||||||
|
datetime.strftime('%Y-%m-%d %H:%M:%S.%L'),
|
||||||
|
severity,
|
||||||
|
progname,
|
||||||
|
formatted_message
|
||||||
|
end
|
||||||
|
|
||||||
|
lambda do |loglevel, progname, message = nil, &block|
|
||||||
|
logger.add loglevel, message, progname, &block
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Use logger created by #create_stdlib_logger to log concurrent-ruby messages.
|
||||||
|
# @deprecated
|
||||||
|
def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr)
|
||||||
|
Concurrent.global_logger = create_stdlib_logger level, output
|
||||||
|
end
|
||||||
|
|
||||||
|
# TODO (pitr-ch 27-Dec-2016): remove deadlocking stdlib_logger methods
|
||||||
|
|
||||||
|
# Suppresses all output when used for logging.
|
||||||
|
NULL_LOGGER = lambda { |level, progname, message = nil, &block| }
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN))
|
||||||
|
private_constant :GLOBAL_LOGGER
|
||||||
|
|
||||||
|
def self.global_logger
|
||||||
|
GLOBAL_LOGGER.value
|
||||||
|
end
|
||||||
|
|
||||||
|
def self.global_logger=(value)
|
||||||
|
GLOBAL_LOGGER.value = value
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,220 @@
|
|||||||
|
require 'thread'
|
||||||
|
require 'timeout'
|
||||||
|
|
||||||
|
require 'concurrent/atomic/event'
|
||||||
|
require 'concurrent/concern/dereferenceable'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
module Concern
|
||||||
|
|
||||||
|
module Obligation
|
||||||
|
include Concern::Dereferenceable
|
||||||
|
# NOTE: The Dereferenceable module is going away in 2.0. In the mean time
|
||||||
|
# we need it to place nicely with the synchronization layer. This means
|
||||||
|
# that the including class SHOULD be synchronized and it MUST implement a
|
||||||
|
# `#synchronize` method. Not doing so will lead to runtime errors.
|
||||||
|
|
||||||
|
# Has the obligation been fulfilled?
|
||||||
|
#
|
||||||
|
# @return [Boolean]
|
||||||
|
def fulfilled?
|
||||||
|
state == :fulfilled
|
||||||
|
end
|
||||||
|
alias_method :realized?, :fulfilled?
|
||||||
|
|
||||||
|
# Has the obligation been rejected?
|
||||||
|
#
|
||||||
|
# @return [Boolean]
|
||||||
|
def rejected?
|
||||||
|
state == :rejected
|
||||||
|
end
|
||||||
|
|
||||||
|
# Is obligation completion still pending?
|
||||||
|
#
|
||||||
|
# @return [Boolean]
|
||||||
|
def pending?
|
||||||
|
state == :pending
|
||||||
|
end
|
||||||
|
|
||||||
|
# Is the obligation still unscheduled?
|
||||||
|
#
|
||||||
|
# @return [Boolean]
|
||||||
|
def unscheduled?
|
||||||
|
state == :unscheduled
|
||||||
|
end
|
||||||
|
|
||||||
|
# Has the obligation completed processing?
|
||||||
|
#
|
||||||
|
# @return [Boolean]
|
||||||
|
def complete?
|
||||||
|
[:fulfilled, :rejected].include? state
|
||||||
|
end
|
||||||
|
|
||||||
|
# Is the obligation still awaiting completion of processing?
|
||||||
|
#
|
||||||
|
# @return [Boolean]
|
||||||
|
def incomplete?
|
||||||
|
! complete?
|
||||||
|
end
|
||||||
|
|
||||||
|
# The current value of the obligation. Will be `nil` while the state is
|
||||||
|
# pending or the operation has been rejected.
|
||||||
|
#
|
||||||
|
# @param [Numeric] timeout the maximum time in seconds to wait.
|
||||||
|
# @return [Object] see Dereferenceable#deref
|
||||||
|
def value(timeout = nil)
|
||||||
|
wait timeout
|
||||||
|
deref
|
||||||
|
end
|
||||||
|
|
||||||
|
# Wait until obligation is complete or the timeout has been reached.
|
||||||
|
#
|
||||||
|
# @param [Numeric] timeout the maximum time in seconds to wait.
|
||||||
|
# @return [Obligation] self
|
||||||
|
def wait(timeout = nil)
|
||||||
|
event.wait(timeout) if timeout != 0 && incomplete?
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# Wait until obligation is complete or the timeout is reached. Will re-raise
|
||||||
|
# any exceptions raised during processing (but will not raise an exception
|
||||||
|
# on timeout).
|
||||||
|
#
|
||||||
|
# @param [Numeric] timeout the maximum time in seconds to wait.
|
||||||
|
# @return [Obligation] self
|
||||||
|
# @raise [Exception] raises the reason when rejected
|
||||||
|
def wait!(timeout = nil)
|
||||||
|
wait(timeout).tap { raise self if rejected? }
|
||||||
|
end
|
||||||
|
alias_method :no_error!, :wait!
|
||||||
|
|
||||||
|
# The current value of the obligation. Will be `nil` while the state is
|
||||||
|
# pending or the operation has been rejected. Will re-raise any exceptions
|
||||||
|
# raised during processing (but will not raise an exception on timeout).
|
||||||
|
#
|
||||||
|
# @param [Numeric] timeout the maximum time in seconds to wait.
|
||||||
|
# @return [Object] see Dereferenceable#deref
|
||||||
|
# @raise [Exception] raises the reason when rejected
|
||||||
|
def value!(timeout = nil)
|
||||||
|
wait(timeout)
|
||||||
|
if rejected?
|
||||||
|
raise self
|
||||||
|
else
|
||||||
|
deref
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# The current state of the obligation.
|
||||||
|
#
|
||||||
|
# @return [Symbol] the current state
|
||||||
|
def state
|
||||||
|
synchronize { @state }
|
||||||
|
end
|
||||||
|
|
||||||
|
# If an exception was raised during processing this will return the
|
||||||
|
# exception object. Will return `nil` when the state is pending or if
|
||||||
|
# the obligation has been successfully fulfilled.
|
||||||
|
#
|
||||||
|
# @return [Exception] the exception raised during processing or `nil`
|
||||||
|
def reason
|
||||||
|
synchronize { @reason }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @example allows Obligation to be risen
|
||||||
|
# rejected_ivar = Ivar.new.fail
|
||||||
|
# raise rejected_ivar
|
||||||
|
def exception(*args)
|
||||||
|
raise 'obligation is not rejected' unless rejected?
|
||||||
|
reason.exception(*args)
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def get_arguments_from(opts = {})
|
||||||
|
[*opts.fetch(:args, [])]
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def init_obligation
|
||||||
|
@event = Event.new
|
||||||
|
@value = @reason = nil
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def event
|
||||||
|
@event
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def set_state(success, value, reason)
|
||||||
|
if success
|
||||||
|
@value = value
|
||||||
|
@state = :fulfilled
|
||||||
|
else
|
||||||
|
@reason = reason
|
||||||
|
@state = :rejected
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def state=(value)
|
||||||
|
synchronize { ns_set_state(value) }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Atomic compare and set operation
|
||||||
|
# State is set to `next_state` only if `current state == expected_current`.
|
||||||
|
#
|
||||||
|
# @param [Symbol] next_state
|
||||||
|
# @param [Symbol] expected_current
|
||||||
|
#
|
||||||
|
# @return [Boolean] true is state is changed, false otherwise
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def compare_and_set_state(next_state, *expected_current)
|
||||||
|
synchronize do
|
||||||
|
if expected_current.include? @state
|
||||||
|
@state = next_state
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Executes the block within mutex if current state is included in expected_states
|
||||||
|
#
|
||||||
|
# @return block value if executed, false otherwise
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def if_state(*expected_states)
|
||||||
|
synchronize do
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
|
||||||
|
if expected_states.include? @state
|
||||||
|
yield
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
# Am I in the current state?
|
||||||
|
#
|
||||||
|
# @param [Symbol] expected The state to check against
|
||||||
|
# @return [Boolean] true if in the expected state else false
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def ns_check_state?(expected)
|
||||||
|
@state == expected
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_set_state(value)
|
||||||
|
@state = value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,110 @@
|
|||||||
|
require 'concurrent/collection/copy_on_notify_observer_set'
|
||||||
|
require 'concurrent/collection/copy_on_write_observer_set'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
module Concern
|
||||||
|
|
||||||
|
# The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one
|
||||||
|
# of the most useful design patterns.
|
||||||
|
#
|
||||||
|
# The workflow is very simple:
|
||||||
|
# - an `observer` can register itself to a `subject` via a callback
|
||||||
|
# - many `observers` can be registered to the same `subject`
|
||||||
|
# - the `subject` notifies all registered observers when its status changes
|
||||||
|
# - an `observer` can deregister itself when is no more interested to receive
|
||||||
|
# event notifications
|
||||||
|
#
|
||||||
|
# In a single threaded environment the whole pattern is very easy: the
|
||||||
|
# `subject` can use a simple data structure to manage all its subscribed
|
||||||
|
# `observer`s and every `observer` can react directly to every event without
|
||||||
|
# caring about synchronization.
|
||||||
|
#
|
||||||
|
# In a multi threaded environment things are more complex. The `subject` must
|
||||||
|
# synchronize the access to its data structure and to do so currently we're
|
||||||
|
# using two specialized ObserverSet: {Concurrent::Concern::CopyOnWriteObserverSet}
|
||||||
|
# and {Concurrent::Concern::CopyOnNotifyObserverSet}.
|
||||||
|
#
|
||||||
|
# When implementing and `observer` there's a very important rule to remember:
|
||||||
|
# **there are no guarantees about the thread that will execute the callback**
|
||||||
|
#
|
||||||
|
# Let's take this example
|
||||||
|
# ```
|
||||||
|
# class Observer
|
||||||
|
# def initialize
|
||||||
|
# @count = 0
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# def update
|
||||||
|
# @count += 1
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# obs = Observer.new
|
||||||
|
# [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) }
|
||||||
|
# # execute [obj1, obj2, obj3, obj4]
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# `obs` is wrong because the variable `@count` can be accessed by different
|
||||||
|
# threads at the same time, so it should be synchronized (using either a Mutex
|
||||||
|
# or an AtomicFixum)
|
||||||
|
module Observable
|
||||||
|
|
||||||
|
# @!macro observable_add_observer
|
||||||
|
#
|
||||||
|
# Adds an observer to this set. If a block is passed, the observer will be
|
||||||
|
# created by this method and no other params should be passed.
|
||||||
|
#
|
||||||
|
# @param [Object] observer the observer to add
|
||||||
|
# @param [Symbol] func the function to call on the observer during notification.
|
||||||
|
# Default is :update
|
||||||
|
# @return [Object] the added observer
|
||||||
|
def add_observer(observer = nil, func = :update, &block)
|
||||||
|
observers.add_observer(observer, func, &block)
|
||||||
|
end
|
||||||
|
|
||||||
|
# As `#add_observer` but can be used for chaining.
|
||||||
|
#
|
||||||
|
# @param [Object] observer the observer to add
|
||||||
|
# @param [Symbol] func the function to call on the observer during notification.
|
||||||
|
# @return [Observable] self
|
||||||
|
def with_observer(observer = nil, func = :update, &block)
|
||||||
|
add_observer(observer, func, &block)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_delete_observer
|
||||||
|
#
|
||||||
|
# Remove `observer` as an observer on this object so that it will no
|
||||||
|
# longer receive notifications.
|
||||||
|
#
|
||||||
|
# @param [Object] observer the observer to remove
|
||||||
|
# @return [Object] the deleted observer
|
||||||
|
def delete_observer(observer)
|
||||||
|
observers.delete_observer(observer)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_delete_observers
|
||||||
|
#
|
||||||
|
# Remove all observers associated with this object.
|
||||||
|
#
|
||||||
|
# @return [Observable] self
|
||||||
|
def delete_observers
|
||||||
|
observers.delete_observers
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro observable_count_observers
|
||||||
|
#
|
||||||
|
# Return the number of observers associated with this object.
|
||||||
|
#
|
||||||
|
# @return [Integer] the observers count
|
||||||
|
def count_observers
|
||||||
|
observers.count_observers
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
attr_accessor :observers
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
Binary file not shown.
@ -0,0 +1,105 @@
|
|||||||
|
require 'thread'
|
||||||
|
require 'concurrent/delay'
|
||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/concern/deprecation'
|
||||||
|
require 'concurrent/executor/immediate_executor'
|
||||||
|
require 'concurrent/executor/fixed_thread_pool'
|
||||||
|
require 'concurrent/executor/cached_thread_pool'
|
||||||
|
require 'concurrent/utility/processor_counter'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
extend Concern::Deprecation
|
||||||
|
|
||||||
|
autoload :Options, 'concurrent/options'
|
||||||
|
autoload :TimerSet, 'concurrent/executor/timer_set'
|
||||||
|
autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor'
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor }
|
||||||
|
private_constant :GLOBAL_FAST_EXECUTOR
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor }
|
||||||
|
private_constant :GLOBAL_IO_EXECUTOR
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
GLOBAL_TIMER_SET = Delay.new { TimerSet.new }
|
||||||
|
private_constant :GLOBAL_TIMER_SET
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
GLOBAL_IMMEDIATE_EXECUTOR = ImmediateExecutor.new
|
||||||
|
private_constant :GLOBAL_IMMEDIATE_EXECUTOR
|
||||||
|
|
||||||
|
# Disables AtExit handlers including pool auto-termination handlers.
|
||||||
|
# When disabled it will be the application programmer's responsibility
|
||||||
|
# to ensure that the handlers are shutdown properly prior to application
|
||||||
|
# exit by calling `AtExit.run` method.
|
||||||
|
#
|
||||||
|
# @note this option should be needed only because of `at_exit` ordering
|
||||||
|
# issues which may arise when running some of the testing frameworks.
|
||||||
|
# E.g. Minitest's test-suite runs itself in `at_exit` callback which
|
||||||
|
# executes after the pools are already terminated. Then auto termination
|
||||||
|
# needs to be disabled and called manually after test-suite ends.
|
||||||
|
# @note This method should *never* be called
|
||||||
|
# from within a gem. It should *only* be used from within the main
|
||||||
|
# application and even then it should be used only when necessary.
|
||||||
|
# @deprecated Has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841.
|
||||||
|
#
|
||||||
|
def self.disable_at_exit_handlers!
|
||||||
|
deprecated "Method #disable_at_exit_handlers! has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841."
|
||||||
|
end
|
||||||
|
|
||||||
|
# Global thread pool optimized for short, fast *operations*.
|
||||||
|
#
|
||||||
|
# @return [ThreadPoolExecutor] the thread pool
|
||||||
|
def self.global_fast_executor
|
||||||
|
GLOBAL_FAST_EXECUTOR.value!
|
||||||
|
end
|
||||||
|
|
||||||
|
# Global thread pool optimized for long, blocking (IO) *tasks*.
|
||||||
|
#
|
||||||
|
# @return [ThreadPoolExecutor] the thread pool
|
||||||
|
def self.global_io_executor
|
||||||
|
GLOBAL_IO_EXECUTOR.value!
|
||||||
|
end
|
||||||
|
|
||||||
|
def self.global_immediate_executor
|
||||||
|
GLOBAL_IMMEDIATE_EXECUTOR
|
||||||
|
end
|
||||||
|
|
||||||
|
# Global thread pool user for global *timers*.
|
||||||
|
#
|
||||||
|
# @return [Concurrent::TimerSet] the thread pool
|
||||||
|
def self.global_timer_set
|
||||||
|
GLOBAL_TIMER_SET.value!
|
||||||
|
end
|
||||||
|
|
||||||
|
# General access point to global executors.
|
||||||
|
# @param [Symbol, Executor] executor_identifier symbols:
|
||||||
|
# - :fast - {Concurrent.global_fast_executor}
|
||||||
|
# - :io - {Concurrent.global_io_executor}
|
||||||
|
# - :immediate - {Concurrent.global_immediate_executor}
|
||||||
|
# @return [Executor]
|
||||||
|
def self.executor(executor_identifier)
|
||||||
|
Options.executor(executor_identifier)
|
||||||
|
end
|
||||||
|
|
||||||
|
def self.new_fast_executor(opts = {})
|
||||||
|
FixedThreadPool.new(
|
||||||
|
[2, Concurrent.processor_count].max,
|
||||||
|
auto_terminate: opts.fetch(:auto_terminate, true),
|
||||||
|
idletime: 60, # 1 minute
|
||||||
|
max_queue: 0, # unlimited
|
||||||
|
fallback_policy: :abort, # shouldn't matter -- 0 max queue
|
||||||
|
name: "fast"
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
def self.new_io_executor(opts = {})
|
||||||
|
CachedThreadPool.new(
|
||||||
|
auto_terminate: opts.fetch(:auto_terminate, true),
|
||||||
|
fallback_policy: :abort, # shouldn't matter -- 0 max queue
|
||||||
|
name: "io"
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,8 @@
|
|||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# Various classes within allows for +nil+ values to be stored,
|
||||||
|
# so a special +NULL+ token is required to indicate the "nil-ness".
|
||||||
|
# @!visibility private
|
||||||
|
NULL = ::Object.new
|
||||||
|
|
||||||
|
end
|
||||||
@ -0,0 +1,81 @@
|
|||||||
|
require 'concurrent/future'
|
||||||
|
require 'concurrent/atomic/atomic_fixnum'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
class DependencyCounter # :nodoc:
|
||||||
|
|
||||||
|
def initialize(count, &block)
|
||||||
|
@counter = AtomicFixnum.new(count)
|
||||||
|
@block = block
|
||||||
|
end
|
||||||
|
|
||||||
|
def update(time, value, reason)
|
||||||
|
if @counter.decrement == 0
|
||||||
|
@block.call
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Dataflow allows you to create a task that will be scheduled when all of its data dependencies are available.
|
||||||
|
# {include:file:docs-source/dataflow.md}
|
||||||
|
#
|
||||||
|
# @param [Future] inputs zero or more `Future` operations that this dataflow depends upon
|
||||||
|
#
|
||||||
|
# @yield The operation to perform once all the dependencies are met
|
||||||
|
# @yieldparam [Future] inputs each of the `Future` inputs to the dataflow
|
||||||
|
# @yieldreturn [Object] the result of the block operation
|
||||||
|
#
|
||||||
|
# @return [Object] the result of all the operations
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if no block is given
|
||||||
|
# @raise [ArgumentError] if any of the inputs are not `IVar`s
|
||||||
|
def dataflow(*inputs, &block)
|
||||||
|
dataflow_with(Concurrent.global_io_executor, *inputs, &block)
|
||||||
|
end
|
||||||
|
module_function :dataflow
|
||||||
|
|
||||||
|
def dataflow_with(executor, *inputs, &block)
|
||||||
|
call_dataflow(:value, executor, *inputs, &block)
|
||||||
|
end
|
||||||
|
module_function :dataflow_with
|
||||||
|
|
||||||
|
def dataflow!(*inputs, &block)
|
||||||
|
dataflow_with!(Concurrent.global_io_executor, *inputs, &block)
|
||||||
|
end
|
||||||
|
module_function :dataflow!
|
||||||
|
|
||||||
|
def dataflow_with!(executor, *inputs, &block)
|
||||||
|
call_dataflow(:value!, executor, *inputs, &block)
|
||||||
|
end
|
||||||
|
module_function :dataflow_with!
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def call_dataflow(method, executor, *inputs, &block)
|
||||||
|
raise ArgumentError.new('an executor must be provided') if executor.nil?
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
unless inputs.all? { |input| input.is_a? IVar }
|
||||||
|
raise ArgumentError.new("Not all dependencies are IVars.\nDependencies: #{ inputs.inspect }")
|
||||||
|
end
|
||||||
|
|
||||||
|
result = Future.new(executor: executor) do
|
||||||
|
values = inputs.map { |input| input.send(method) }
|
||||||
|
block.call(*values)
|
||||||
|
end
|
||||||
|
|
||||||
|
if inputs.empty?
|
||||||
|
result.execute
|
||||||
|
else
|
||||||
|
counter = DependencyCounter.new(inputs.size) { result.execute }
|
||||||
|
|
||||||
|
inputs.each do |input|
|
||||||
|
input.add_observer counter
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
result
|
||||||
|
end
|
||||||
|
module_function :call_dataflow
|
||||||
|
end
|
||||||
@ -0,0 +1,199 @@
|
|||||||
|
require 'thread'
|
||||||
|
require 'concurrent/concern/obligation'
|
||||||
|
require 'concurrent/executor/immediate_executor'
|
||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# This file has circular require issues. It must be autoloaded here.
|
||||||
|
autoload :Options, 'concurrent/options'
|
||||||
|
|
||||||
|
# Lazy evaluation of a block yielding an immutable result. Useful for
|
||||||
|
# expensive operations that may never be needed. It may be non-blocking,
|
||||||
|
# supports the `Concern::Obligation` interface, and accepts the injection of
|
||||||
|
# custom executor upon which to execute the block. Processing of
|
||||||
|
# block will be deferred until the first time `#value` is called.
|
||||||
|
# At that time the caller can choose to return immediately and let
|
||||||
|
# the block execute asynchronously, block indefinitely, or block
|
||||||
|
# with a timeout.
|
||||||
|
#
|
||||||
|
# When a `Delay` is created its state is set to `pending`. The value and
|
||||||
|
# reason are both `nil`. The first time the `#value` method is called the
|
||||||
|
# enclosed opration will be run and the calling thread will block. Other
|
||||||
|
# threads attempting to call `#value` will block as well. Once the operation
|
||||||
|
# is complete the *value* will be set to the result of the operation or the
|
||||||
|
# *reason* will be set to the raised exception, as appropriate. All threads
|
||||||
|
# blocked on `#value` will return. Subsequent calls to `#value` will immediately
|
||||||
|
# return the cached value. The operation will only be run once. This means that
|
||||||
|
# any side effects created by the operation will only happen once as well.
|
||||||
|
#
|
||||||
|
# `Delay` includes the `Concurrent::Concern::Dereferenceable` mixin to support thread
|
||||||
|
# safety of the reference returned by `#value`.
|
||||||
|
#
|
||||||
|
# @!macro copy_options
|
||||||
|
#
|
||||||
|
# @!macro delay_note_regarding_blocking
|
||||||
|
# @note The default behavior of `Delay` is to block indefinitely when
|
||||||
|
# calling either `value` or `wait`, executing the delayed operation on
|
||||||
|
# the current thread. This makes the `timeout` value completely
|
||||||
|
# irrelevant. To enable non-blocking behavior, use the `executor`
|
||||||
|
# constructor option. This will cause the delayed operation to be
|
||||||
|
# execute on the given executor, allowing the call to timeout.
|
||||||
|
#
|
||||||
|
# @see Concurrent::Concern::Dereferenceable
|
||||||
|
class Delay < Synchronization::LockableObject
|
||||||
|
include Concern::Obligation
|
||||||
|
|
||||||
|
# NOTE: Because the global thread pools are lazy-loaded with these objects
|
||||||
|
# there is a performance hit every time we post a new task to one of these
|
||||||
|
# thread pools. Subsequently it is critical that `Delay` perform as fast
|
||||||
|
# as possible post-completion. This class has been highly optimized using
|
||||||
|
# the benchmark script `examples/lazy_and_delay.rb`. Do NOT attempt to
|
||||||
|
# DRY-up this class or perform other refactoring with running the
|
||||||
|
# benchmarks and ensuring that performance is not negatively impacted.
|
||||||
|
|
||||||
|
# Create a new `Delay` in the `:pending` state.
|
||||||
|
#
|
||||||
|
# @!macro executor_and_deref_options
|
||||||
|
#
|
||||||
|
# @yield the delayed operation to perform
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if no block is given
|
||||||
|
def initialize(opts = {}, &block)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
super(&nil)
|
||||||
|
synchronize { ns_initialize(opts, &block) }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Return the value this object represents after applying the options
|
||||||
|
# specified by the `#set_deref_options` method. If the delayed operation
|
||||||
|
# raised an exception this method will return nil. The exception object
|
||||||
|
# can be accessed via the `#reason` method.
|
||||||
|
#
|
||||||
|
# @param [Numeric] timeout the maximum number of seconds to wait
|
||||||
|
# @return [Object] the current value of the object
|
||||||
|
#
|
||||||
|
# @!macro delay_note_regarding_blocking
|
||||||
|
def value(timeout = nil)
|
||||||
|
if @executor # TODO (pitr 12-Sep-2015): broken unsafe read?
|
||||||
|
super
|
||||||
|
else
|
||||||
|
# this function has been optimized for performance and
|
||||||
|
# should not be modified without running new benchmarks
|
||||||
|
synchronize do
|
||||||
|
execute = @evaluation_started = true unless @evaluation_started
|
||||||
|
if execute
|
||||||
|
begin
|
||||||
|
set_state(true, @task.call, nil)
|
||||||
|
rescue => ex
|
||||||
|
set_state(false, nil, ex)
|
||||||
|
end
|
||||||
|
elsif incomplete?
|
||||||
|
raise IllegalOperationError, 'Recursive call to #value during evaluation of the Delay'
|
||||||
|
end
|
||||||
|
end
|
||||||
|
if @do_nothing_on_deref
|
||||||
|
@value
|
||||||
|
else
|
||||||
|
apply_deref_options(@value)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Return the value this object represents after applying the options
|
||||||
|
# specified by the `#set_deref_options` method. If the delayed operation
|
||||||
|
# raised an exception, this method will raise that exception (even when)
|
||||||
|
# the operation has already been executed).
|
||||||
|
#
|
||||||
|
# @param [Numeric] timeout the maximum number of seconds to wait
|
||||||
|
# @return [Object] the current value of the object
|
||||||
|
# @raise [Exception] when `#rejected?` raises `#reason`
|
||||||
|
#
|
||||||
|
# @!macro delay_note_regarding_blocking
|
||||||
|
def value!(timeout = nil)
|
||||||
|
if @executor
|
||||||
|
super
|
||||||
|
else
|
||||||
|
result = value
|
||||||
|
raise @reason if @reason
|
||||||
|
result
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Return the value this object represents after applying the options
|
||||||
|
# specified by the `#set_deref_options` method.
|
||||||
|
#
|
||||||
|
# @param [Integer] timeout (nil) the maximum number of seconds to wait for
|
||||||
|
# the value to be computed. When `nil` the caller will block indefinitely.
|
||||||
|
#
|
||||||
|
# @return [Object] self
|
||||||
|
#
|
||||||
|
# @!macro delay_note_regarding_blocking
|
||||||
|
def wait(timeout = nil)
|
||||||
|
if @executor
|
||||||
|
execute_task_once
|
||||||
|
super(timeout)
|
||||||
|
else
|
||||||
|
value
|
||||||
|
end
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# Reconfigures the block returning the value if still `#incomplete?`
|
||||||
|
#
|
||||||
|
# @yield the delayed operation to perform
|
||||||
|
# @return [true, false] if success
|
||||||
|
def reconfigure(&block)
|
||||||
|
synchronize do
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
unless @evaluation_started
|
||||||
|
@task = block
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
def ns_initialize(opts, &block)
|
||||||
|
init_obligation
|
||||||
|
set_deref_options(opts)
|
||||||
|
@executor = opts[:executor]
|
||||||
|
|
||||||
|
@task = block
|
||||||
|
@state = :pending
|
||||||
|
@evaluation_started = false
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def execute_task_once # :nodoc:
|
||||||
|
# this function has been optimized for performance and
|
||||||
|
# should not be modified without running new benchmarks
|
||||||
|
execute = task = nil
|
||||||
|
synchronize do
|
||||||
|
execute = @evaluation_started = true unless @evaluation_started
|
||||||
|
task = @task
|
||||||
|
end
|
||||||
|
|
||||||
|
if execute
|
||||||
|
executor = Options.executor_from_options(executor: @executor)
|
||||||
|
executor.post do
|
||||||
|
begin
|
||||||
|
result = task.call
|
||||||
|
success = true
|
||||||
|
rescue => ex
|
||||||
|
reason = ex
|
||||||
|
end
|
||||||
|
synchronize do
|
||||||
|
set_state(success, result, reason)
|
||||||
|
event.set
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,74 @@
|
|||||||
|
module Concurrent
|
||||||
|
|
||||||
|
Error = Class.new(StandardError)
|
||||||
|
|
||||||
|
# Raised when errors occur during configuration.
|
||||||
|
ConfigurationError = Class.new(Error)
|
||||||
|
|
||||||
|
# Raised when an asynchronous operation is cancelled before execution.
|
||||||
|
CancelledOperationError = Class.new(Error)
|
||||||
|
|
||||||
|
# Raised when a lifecycle method (such as `stop`) is called in an improper
|
||||||
|
# sequence or when the object is in an inappropriate state.
|
||||||
|
LifecycleError = Class.new(Error)
|
||||||
|
|
||||||
|
# Raised when an attempt is made to violate an immutability guarantee.
|
||||||
|
ImmutabilityError = Class.new(Error)
|
||||||
|
|
||||||
|
# Raised when an operation is attempted which is not legal given the
|
||||||
|
# receiver's current state
|
||||||
|
IllegalOperationError = Class.new(Error)
|
||||||
|
|
||||||
|
# Raised when an object's methods are called when it has not been
|
||||||
|
# properly initialized.
|
||||||
|
InitializationError = Class.new(Error)
|
||||||
|
|
||||||
|
# Raised when an object with a start/stop lifecycle has been started an
|
||||||
|
# excessive number of times. Often used in conjunction with a restart
|
||||||
|
# policy or strategy.
|
||||||
|
MaxRestartFrequencyError = Class.new(Error)
|
||||||
|
|
||||||
|
# Raised when an attempt is made to modify an immutable object
|
||||||
|
# (such as an `IVar`) after its final state has been set.
|
||||||
|
class MultipleAssignmentError < Error
|
||||||
|
attr_reader :inspection_data
|
||||||
|
|
||||||
|
def initialize(message = nil, inspection_data = nil)
|
||||||
|
@inspection_data = inspection_data
|
||||||
|
super message
|
||||||
|
end
|
||||||
|
|
||||||
|
def inspect
|
||||||
|
format '%s %s>', super[0..-2], @inspection_data.inspect
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Raised by an `Executor` when it is unable to process a given task,
|
||||||
|
# possibly because of a reject policy or other internal error.
|
||||||
|
RejectedExecutionError = Class.new(Error)
|
||||||
|
|
||||||
|
# Raised when any finite resource, such as a lock counter, exceeds its
|
||||||
|
# maximum limit/threshold.
|
||||||
|
ResourceLimitError = Class.new(Error)
|
||||||
|
|
||||||
|
# Raised when an operation times out.
|
||||||
|
TimeoutError = Class.new(Error)
|
||||||
|
|
||||||
|
# Aggregates multiple exceptions.
|
||||||
|
class MultipleErrors < Error
|
||||||
|
attr_reader :errors
|
||||||
|
|
||||||
|
def initialize(errors, message = "#{errors.size} errors")
|
||||||
|
@errors = errors
|
||||||
|
super [*message,
|
||||||
|
*errors.map { |e| [format('%s (%s)', e.message, e.class), *e.backtrace] }.flatten(1)
|
||||||
|
].join("\n")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
class ConcurrentUpdateError < ThreadError
|
||||||
|
# frozen pre-allocated backtrace to speed ConcurrentUpdateError
|
||||||
|
CONC_UP_ERR_BACKTRACE = ['backtrace elided; set verbose to enable'].freeze
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,353 @@
|
|||||||
|
require 'concurrent/constants'
|
||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/maybe'
|
||||||
|
require 'concurrent/atomic/atomic_reference'
|
||||||
|
require 'concurrent/atomic/count_down_latch'
|
||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require 'concurrent/utility/monotonic_time'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro exchanger
|
||||||
|
#
|
||||||
|
# A synchronization point at which threads can pair and swap elements within
|
||||||
|
# pairs. Each thread presents some object on entry to the exchange method,
|
||||||
|
# matches with a partner thread, and receives its partner's object on return.
|
||||||
|
#
|
||||||
|
# @!macro thread_safe_variable_comparison
|
||||||
|
#
|
||||||
|
# This implementation is very simple, using only a single slot for each
|
||||||
|
# exchanger (unlike more advanced implementations which use an "arena").
|
||||||
|
# This approach will work perfectly fine when there are only a few threads
|
||||||
|
# accessing a single `Exchanger`. Beyond a handful of threads the performance
|
||||||
|
# will degrade rapidly due to contention on the single slot, but the algorithm
|
||||||
|
# will remain correct.
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html java.util.concurrent.Exchanger
|
||||||
|
# @example
|
||||||
|
#
|
||||||
|
# exchanger = Concurrent::Exchanger.new
|
||||||
|
#
|
||||||
|
# threads = [
|
||||||
|
# Thread.new { puts "first: " << exchanger.exchange('foo', 1) }, #=> "first: bar"
|
||||||
|
# Thread.new { puts "second: " << exchanger.exchange('bar', 1) } #=> "second: foo"
|
||||||
|
# ]
|
||||||
|
# threads.each {|t| t.join(2) }
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
class AbstractExchanger < Synchronization::Object
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
CANCEL = ::Object.new
|
||||||
|
private_constant :CANCEL
|
||||||
|
|
||||||
|
def initialize
|
||||||
|
super
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro exchanger_method_do_exchange
|
||||||
|
#
|
||||||
|
# Waits for another thread to arrive at this exchange point (unless the
|
||||||
|
# current thread is interrupted), and then transfers the given object to
|
||||||
|
# it, receiving its object in return. The timeout value indicates the
|
||||||
|
# approximate number of seconds the method should block while waiting
|
||||||
|
# for the exchange. When the timeout value is `nil` the method will
|
||||||
|
# block indefinitely.
|
||||||
|
#
|
||||||
|
# @param [Object] value the value to exchange with another thread
|
||||||
|
# @param [Numeric, nil] timeout in seconds, `nil` blocks indefinitely
|
||||||
|
#
|
||||||
|
# @!macro exchanger_method_exchange
|
||||||
|
#
|
||||||
|
# In some edge cases when a `timeout` is given a return value of `nil` may be
|
||||||
|
# ambiguous. Specifically, if `nil` is a valid value in the exchange it will
|
||||||
|
# be impossible to tell whether `nil` is the actual return value or if it
|
||||||
|
# signifies timeout. When `nil` is a valid value in the exchange consider
|
||||||
|
# using {#exchange!} or {#try_exchange} instead.
|
||||||
|
#
|
||||||
|
# @return [Object] the value exchanged by the other thread or `nil` on timeout
|
||||||
|
def exchange(value, timeout = nil)
|
||||||
|
(value = do_exchange(value, timeout)) == CANCEL ? nil : value
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro exchanger_method_do_exchange
|
||||||
|
# @!macro exchanger_method_exchange_bang
|
||||||
|
#
|
||||||
|
# On timeout a {Concurrent::TimeoutError} exception will be raised.
|
||||||
|
#
|
||||||
|
# @return [Object] the value exchanged by the other thread
|
||||||
|
# @raise [Concurrent::TimeoutError] on timeout
|
||||||
|
def exchange!(value, timeout = nil)
|
||||||
|
if (value = do_exchange(value, timeout)) == CANCEL
|
||||||
|
raise Concurrent::TimeoutError
|
||||||
|
else
|
||||||
|
value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro exchanger_method_do_exchange
|
||||||
|
# @!macro exchanger_method_try_exchange
|
||||||
|
#
|
||||||
|
# The return value will be a {Concurrent::Maybe} set to `Just` on success or
|
||||||
|
# `Nothing` on timeout.
|
||||||
|
#
|
||||||
|
# @return [Concurrent::Maybe] on success a `Just` maybe will be returned with
|
||||||
|
# the item exchanged by the other thread as `#value`; on timeout a
|
||||||
|
# `Nothing` maybe will be returned with {Concurrent::TimeoutError} as `#reason`
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
#
|
||||||
|
# exchanger = Concurrent::Exchanger.new
|
||||||
|
#
|
||||||
|
# result = exchanger.exchange(:foo, 0.5)
|
||||||
|
#
|
||||||
|
# if result.just?
|
||||||
|
# puts result.value #=> :bar
|
||||||
|
# else
|
||||||
|
# puts 'timeout'
|
||||||
|
# end
|
||||||
|
def try_exchange(value, timeout = nil)
|
||||||
|
if (value = do_exchange(value, timeout)) == CANCEL
|
||||||
|
Concurrent::Maybe.nothing(Concurrent::TimeoutError)
|
||||||
|
else
|
||||||
|
Concurrent::Maybe.just(value)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!macro exchanger_method_do_exchange
|
||||||
|
#
|
||||||
|
# @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout
|
||||||
|
def do_exchange(value, timeout)
|
||||||
|
raise NotImplementedError
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
# @!visibility private
|
||||||
|
class RubyExchanger < AbstractExchanger
|
||||||
|
# A simplified version of java.util.concurrent.Exchanger written by
|
||||||
|
# Doug Lea, Bill Scherer, and Michael Scott with assistance from members
|
||||||
|
# of JCP JSR-166 Expert Group and released to the public domain. It does
|
||||||
|
# not include the arena or the multi-processor spin loops.
|
||||||
|
# http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/6-b14/java/util/concurrent/Exchanger.java
|
||||||
|
|
||||||
|
safe_initialization!
|
||||||
|
|
||||||
|
class Node < Concurrent::Synchronization::Object
|
||||||
|
attr_atomic :value
|
||||||
|
safe_initialization!
|
||||||
|
|
||||||
|
def initialize(item)
|
||||||
|
super()
|
||||||
|
@Item = item
|
||||||
|
@Latch = Concurrent::CountDownLatch.new
|
||||||
|
self.value = nil
|
||||||
|
end
|
||||||
|
|
||||||
|
def latch
|
||||||
|
@Latch
|
||||||
|
end
|
||||||
|
|
||||||
|
def item
|
||||||
|
@Item
|
||||||
|
end
|
||||||
|
end
|
||||||
|
private_constant :Node
|
||||||
|
|
||||||
|
def initialize
|
||||||
|
super
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
attr_atomic(:slot)
|
||||||
|
|
||||||
|
# @!macro exchanger_method_do_exchange
|
||||||
|
#
|
||||||
|
# @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout
|
||||||
|
def do_exchange(value, timeout)
|
||||||
|
|
||||||
|
# ALGORITHM
|
||||||
|
#
|
||||||
|
# From the original Java version:
|
||||||
|
#
|
||||||
|
# > The basic idea is to maintain a "slot", which is a reference to
|
||||||
|
# > a Node containing both an Item to offer and a "hole" waiting to
|
||||||
|
# > get filled in. If an incoming "occupying" thread sees that the
|
||||||
|
# > slot is null, it CAS'es (compareAndSets) a Node there and waits
|
||||||
|
# > for another to invoke exchange. That second "fulfilling" thread
|
||||||
|
# > sees that the slot is non-null, and so CASes it back to null,
|
||||||
|
# > also exchanging items by CASing the hole, plus waking up the
|
||||||
|
# > occupying thread if it is blocked. In each case CAS'es may
|
||||||
|
# > fail because a slot at first appears non-null but is null upon
|
||||||
|
# > CAS, or vice-versa. So threads may need to retry these
|
||||||
|
# > actions.
|
||||||
|
#
|
||||||
|
# This version:
|
||||||
|
#
|
||||||
|
# An exchange occurs between an "occupier" thread and a "fulfiller" thread.
|
||||||
|
# The "slot" is used to setup this interaction. The first thread in the
|
||||||
|
# exchange puts itself into the slot (occupies) and waits for a fulfiller.
|
||||||
|
# The second thread removes the occupier from the slot and attempts to
|
||||||
|
# perform the exchange. Removing the occupier also frees the slot for
|
||||||
|
# another occupier/fulfiller pair.
|
||||||
|
#
|
||||||
|
# Because the occupier and the fulfiller are operating independently and
|
||||||
|
# because there may be contention with other threads, any failed operation
|
||||||
|
# indicates contention. Both the occupier and the fulfiller operate within
|
||||||
|
# spin loops. Any failed actions along the happy path will cause the thread
|
||||||
|
# to repeat the loop and try again.
|
||||||
|
#
|
||||||
|
# When a timeout value is given the thread must be cognizant of time spent
|
||||||
|
# in the spin loop. The remaining time is checked every loop. When the time
|
||||||
|
# runs out the thread will exit.
|
||||||
|
#
|
||||||
|
# A "node" is the data structure used to perform the exchange. Only the
|
||||||
|
# occupier's node is necessary. It's the node used for the exchange.
|
||||||
|
# Each node has an "item," a "hole" (self), and a "latch." The item is the
|
||||||
|
# node's initial value. It never changes. It's what the fulfiller returns on
|
||||||
|
# success. The occupier's hole is where the fulfiller put its item. It's the
|
||||||
|
# item that the occupier returns on success. The latch is used for synchronization.
|
||||||
|
# Because a thread may act as either an occupier or fulfiller (or possibly
|
||||||
|
# both in periods of high contention) every thread creates a node when
|
||||||
|
# the exchange method is first called.
|
||||||
|
#
|
||||||
|
# The following steps occur within the spin loop. If any actions fail
|
||||||
|
# the thread will loop and try again, so long as there is time remaining.
|
||||||
|
# If time runs out the thread will return CANCEL.
|
||||||
|
#
|
||||||
|
# Check the slot for an occupier:
|
||||||
|
#
|
||||||
|
# * If the slot is empty try to occupy
|
||||||
|
# * If the slot is full try to fulfill
|
||||||
|
#
|
||||||
|
# Attempt to occupy:
|
||||||
|
#
|
||||||
|
# * Attempt to CAS myself into the slot
|
||||||
|
# * Go to sleep and wait to be woken by a fulfiller
|
||||||
|
# * If the sleep is successful then the fulfiller completed its happy path
|
||||||
|
# - Return the value from my hole (the value given by the fulfiller)
|
||||||
|
# * When the sleep fails (time ran out) attempt to cancel the operation
|
||||||
|
# - Attempt to CAS myself out of the hole
|
||||||
|
# - If successful there is no contention
|
||||||
|
# - Return CANCEL
|
||||||
|
# - On failure, I am competing with a fulfiller
|
||||||
|
# - Attempt to CAS my hole to CANCEL
|
||||||
|
# - On success
|
||||||
|
# - Let the fulfiller deal with my cancel
|
||||||
|
# - Return CANCEL
|
||||||
|
# - On failure the fulfiller has completed its happy path
|
||||||
|
# - Return th value from my hole (the fulfiller's value)
|
||||||
|
#
|
||||||
|
# Attempt to fulfill:
|
||||||
|
#
|
||||||
|
# * Attempt to CAS the occupier out of the slot
|
||||||
|
# - On failure loop again
|
||||||
|
# * Attempt to CAS my item into the occupier's hole
|
||||||
|
# - On failure the occupier is trying to cancel
|
||||||
|
# - Loop again
|
||||||
|
# - On success we are on the happy path
|
||||||
|
# - Wake the sleeping occupier
|
||||||
|
# - Return the occupier's item
|
||||||
|
|
||||||
|
value = NULL if value.nil? # The sentinel allows nil to be a valid value
|
||||||
|
me = Node.new(value) # create my node in case I need to occupy
|
||||||
|
end_at = Concurrent.monotonic_time + timeout.to_f # The time to give up
|
||||||
|
|
||||||
|
result = loop do
|
||||||
|
other = slot
|
||||||
|
if other && compare_and_set_slot(other, nil)
|
||||||
|
# try to fulfill
|
||||||
|
if other.compare_and_set_value(nil, value)
|
||||||
|
# happy path
|
||||||
|
other.latch.count_down
|
||||||
|
break other.item
|
||||||
|
end
|
||||||
|
elsif other.nil? && compare_and_set_slot(nil, me)
|
||||||
|
# try to occupy
|
||||||
|
timeout = end_at - Concurrent.monotonic_time if timeout
|
||||||
|
if me.latch.wait(timeout)
|
||||||
|
# happy path
|
||||||
|
break me.value
|
||||||
|
else
|
||||||
|
# attempt to remove myself from the slot
|
||||||
|
if compare_and_set_slot(me, nil)
|
||||||
|
break CANCEL
|
||||||
|
elsif !me.compare_and_set_value(nil, CANCEL)
|
||||||
|
# I've failed to block the fulfiller
|
||||||
|
break me.value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
break CANCEL if timeout && Concurrent.monotonic_time >= end_at
|
||||||
|
end
|
||||||
|
|
||||||
|
result == NULL ? nil : result
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
if Concurrent.on_jruby?
|
||||||
|
require 'concurrent/utility/native_extension_loader'
|
||||||
|
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
# @!visibility private
|
||||||
|
class JavaExchanger < AbstractExchanger
|
||||||
|
|
||||||
|
def initialize
|
||||||
|
@exchanger = java.util.concurrent.Exchanger.new
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!macro exchanger_method_do_exchange
|
||||||
|
#
|
||||||
|
# @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout
|
||||||
|
def do_exchange(value, timeout)
|
||||||
|
result = nil
|
||||||
|
if timeout.nil?
|
||||||
|
Synchronization::JRuby.sleep_interruptibly do
|
||||||
|
result = @exchanger.exchange(value)
|
||||||
|
end
|
||||||
|
else
|
||||||
|
Synchronization::JRuby.sleep_interruptibly do
|
||||||
|
result = @exchanger.exchange(value, 1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
result
|
||||||
|
rescue java.util.concurrent.TimeoutException
|
||||||
|
CANCEL
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
ExchangerImplementation = case
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
JavaExchanger
|
||||||
|
else
|
||||||
|
RubyExchanger
|
||||||
|
end
|
||||||
|
private_constant :ExchangerImplementation
|
||||||
|
|
||||||
|
# @!macro exchanger
|
||||||
|
class Exchanger < ExchangerImplementation
|
||||||
|
|
||||||
|
# @!method initialize
|
||||||
|
# Creates exchanger instance
|
||||||
|
|
||||||
|
# @!method exchange(value, timeout = nil)
|
||||||
|
# @!macro exchanger_method_do_exchange
|
||||||
|
# @!macro exchanger_method_exchange
|
||||||
|
|
||||||
|
# @!method exchange!(value, timeout = nil)
|
||||||
|
# @!macro exchanger_method_do_exchange
|
||||||
|
# @!macro exchanger_method_exchange_bang
|
||||||
|
|
||||||
|
# @!method try_exchange(value, timeout = nil)
|
||||||
|
# @!macro exchanger_method_do_exchange
|
||||||
|
# @!macro exchanger_method_try_exchange
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,131 @@
|
|||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/concern/deprecation'
|
||||||
|
require 'concurrent/executor/executor_service'
|
||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro abstract_executor_service_public_api
|
||||||
|
# @!visibility private
|
||||||
|
class AbstractExecutorService < Synchronization::LockableObject
|
||||||
|
include ExecutorService
|
||||||
|
include Concern::Deprecation
|
||||||
|
|
||||||
|
# The set of possible fallback policies that may be set at thread pool creation.
|
||||||
|
FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze
|
||||||
|
|
||||||
|
# @!macro executor_service_attr_reader_fallback_policy
|
||||||
|
attr_reader :fallback_policy
|
||||||
|
|
||||||
|
attr_reader :name
|
||||||
|
|
||||||
|
# Create a new thread pool.
|
||||||
|
def initialize(opts = {}, &block)
|
||||||
|
super(&nil)
|
||||||
|
synchronize do
|
||||||
|
@auto_terminate = opts.fetch(:auto_terminate, true)
|
||||||
|
@name = opts.fetch(:name) if opts.key?(:name)
|
||||||
|
ns_initialize(opts, &block)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def to_s
|
||||||
|
name ? "#{super[0..-2]} name: #{name}>" : super
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shutdown
|
||||||
|
def shutdown
|
||||||
|
raise NotImplementedError
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_kill
|
||||||
|
def kill
|
||||||
|
raise NotImplementedError
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_wait_for_termination
|
||||||
|
def wait_for_termination(timeout = nil)
|
||||||
|
raise NotImplementedError
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_running_question
|
||||||
|
def running?
|
||||||
|
synchronize { ns_running? }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shuttingdown_question
|
||||||
|
def shuttingdown?
|
||||||
|
synchronize { ns_shuttingdown? }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shutdown_question
|
||||||
|
def shutdown?
|
||||||
|
synchronize { ns_shutdown? }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_auto_terminate_question
|
||||||
|
def auto_terminate?
|
||||||
|
synchronize { @auto_terminate }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_auto_terminate_setter
|
||||||
|
def auto_terminate=(value)
|
||||||
|
deprecated "Method #auto_terminate= has no effect. Set :auto_terminate option when executor is initialized."
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# Returns an action which executes the `fallback_policy` once the queue
|
||||||
|
# size reaches `max_queue`. The reason for the indirection of an action
|
||||||
|
# is so that the work can be deferred outside of synchronization.
|
||||||
|
#
|
||||||
|
# @param [Array] args the arguments to the task which is being handled.
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def fallback_action(*args)
|
||||||
|
case fallback_policy
|
||||||
|
when :abort
|
||||||
|
lambda { raise RejectedExecutionError }
|
||||||
|
when :discard
|
||||||
|
lambda { false }
|
||||||
|
when :caller_runs
|
||||||
|
lambda {
|
||||||
|
begin
|
||||||
|
yield(*args)
|
||||||
|
rescue => ex
|
||||||
|
# let it fail
|
||||||
|
log DEBUG, ex
|
||||||
|
end
|
||||||
|
true
|
||||||
|
}
|
||||||
|
else
|
||||||
|
lambda { fail "Unknown fallback policy #{fallback_policy}" }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_execute(*args, &task)
|
||||||
|
raise NotImplementedError
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_ns_shutdown_execution
|
||||||
|
#
|
||||||
|
# Callback method called when an orderly shutdown has completed.
|
||||||
|
# The default behavior is to signal all waiting threads.
|
||||||
|
def ns_shutdown_execution
|
||||||
|
# do nothing
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_ns_kill_execution
|
||||||
|
#
|
||||||
|
# Callback method called when the executor has been killed.
|
||||||
|
# The default behavior is to do nothing.
|
||||||
|
def ns_kill_execution
|
||||||
|
# do nothing
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_auto_terminate?
|
||||||
|
@auto_terminate
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,62 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require 'concurrent/executor/thread_pool_executor'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# A thread pool that dynamically grows and shrinks to fit the current workload.
|
||||||
|
# New threads are created as needed, existing threads are reused, and threads
|
||||||
|
# that remain idle for too long are killed and removed from the pool. These
|
||||||
|
# pools are particularly suited to applications that perform a high volume of
|
||||||
|
# short-lived tasks.
|
||||||
|
#
|
||||||
|
# On creation a `CachedThreadPool` has zero running threads. New threads are
|
||||||
|
# created on the pool as new operations are `#post`. The size of the pool
|
||||||
|
# will grow until `#max_length` threads are in the pool or until the number
|
||||||
|
# of threads exceeds the number of running and pending operations. When a new
|
||||||
|
# operation is post to the pool the first available idle thread will be tasked
|
||||||
|
# with the new operation.
|
||||||
|
#
|
||||||
|
# Should a thread crash for any reason the thread will immediately be removed
|
||||||
|
# from the pool. Similarly, threads which remain idle for an extended period
|
||||||
|
# of time will be killed and reclaimed. Thus these thread pools are very
|
||||||
|
# efficient at reclaiming unused resources.
|
||||||
|
#
|
||||||
|
# The API and behavior of this class are based on Java's `CachedThreadPool`
|
||||||
|
#
|
||||||
|
# @!macro thread_pool_options
|
||||||
|
class CachedThreadPool < ThreadPoolExecutor
|
||||||
|
|
||||||
|
# @!macro cached_thread_pool_method_initialize
|
||||||
|
#
|
||||||
|
# Create a new thread pool.
|
||||||
|
#
|
||||||
|
# @param [Hash] opts the options defining pool behavior.
|
||||||
|
# @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `fallback_policy` is not a known policy
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool--
|
||||||
|
def initialize(opts = {})
|
||||||
|
defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT }
|
||||||
|
overrides = { min_threads: 0,
|
||||||
|
max_threads: DEFAULT_MAX_POOL_SIZE,
|
||||||
|
max_queue: DEFAULT_MAX_QUEUE_SIZE }
|
||||||
|
super(defaults.merge(opts).merge(overrides))
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!macro cached_thread_pool_method_initialize
|
||||||
|
# @!visibility private
|
||||||
|
def ns_initialize(opts)
|
||||||
|
super(opts)
|
||||||
|
if Concurrent.on_jruby?
|
||||||
|
@max_queue = 0
|
||||||
|
@executor = java.util.concurrent.Executors.newCachedThreadPool(
|
||||||
|
DaemonThreadFactory.new(ns_auto_terminate?))
|
||||||
|
@executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new)
|
||||||
|
@executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,185 @@
|
|||||||
|
require 'concurrent/concern/logging'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro executor_service_method_post
|
||||||
|
#
|
||||||
|
# Submit a task to the executor for asynchronous processing.
|
||||||
|
#
|
||||||
|
# @param [Array] args zero or more arguments to be passed to the task
|
||||||
|
#
|
||||||
|
# @yield the asynchronous task to perform
|
||||||
|
#
|
||||||
|
# @return [Boolean] `true` if the task is queued, `false` if the executor
|
||||||
|
# is not running
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if no task is given
|
||||||
|
|
||||||
|
# @!macro executor_service_method_left_shift
|
||||||
|
#
|
||||||
|
# Submit a task to the executor for asynchronous processing.
|
||||||
|
#
|
||||||
|
# @param [Proc] task the asynchronous task to perform
|
||||||
|
#
|
||||||
|
# @return [self] returns itself
|
||||||
|
|
||||||
|
# @!macro executor_service_method_can_overflow_question
|
||||||
|
#
|
||||||
|
# Does the task queue have a maximum size?
|
||||||
|
#
|
||||||
|
# @return [Boolean] True if the task queue has a maximum size else false.
|
||||||
|
|
||||||
|
# @!macro executor_service_method_serialized_question
|
||||||
|
#
|
||||||
|
# Does this executor guarantee serialization of its operations?
|
||||||
|
#
|
||||||
|
# @return [Boolean] True if the executor guarantees that all operations
|
||||||
|
# will be post in the order they are received and no two operations may
|
||||||
|
# occur simultaneously. Else false.
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro executor_service_public_api
|
||||||
|
#
|
||||||
|
# @!method post(*args, &task)
|
||||||
|
# @!macro executor_service_method_post
|
||||||
|
#
|
||||||
|
# @!method <<(task)
|
||||||
|
# @!macro executor_service_method_left_shift
|
||||||
|
#
|
||||||
|
# @!method can_overflow?
|
||||||
|
# @!macro executor_service_method_can_overflow_question
|
||||||
|
#
|
||||||
|
# @!method serialized?
|
||||||
|
# @!macro executor_service_method_serialized_question
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro executor_service_attr_reader_fallback_policy
|
||||||
|
# @return [Symbol] The fallback policy in effect. Either `:abort`, `:discard`, or `:caller_runs`.
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shutdown
|
||||||
|
#
|
||||||
|
# Begin an orderly shutdown. Tasks already in the queue will be executed,
|
||||||
|
# but no new tasks will be accepted. Has no additional effect if the
|
||||||
|
# thread pool is not running.
|
||||||
|
|
||||||
|
# @!macro executor_service_method_kill
|
||||||
|
#
|
||||||
|
# Begin an immediate shutdown. In-progress tasks will be allowed to
|
||||||
|
# complete but enqueued tasks will be dismissed and no new tasks
|
||||||
|
# will be accepted. Has no additional effect if the thread pool is
|
||||||
|
# not running.
|
||||||
|
|
||||||
|
# @!macro executor_service_method_wait_for_termination
|
||||||
|
#
|
||||||
|
# Block until executor shutdown is complete or until `timeout` seconds have
|
||||||
|
# passed.
|
||||||
|
#
|
||||||
|
# @note Does not initiate shutdown or termination. Either `shutdown` or `kill`
|
||||||
|
# must be called before this method (or on another thread).
|
||||||
|
#
|
||||||
|
# @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete
|
||||||
|
#
|
||||||
|
# @return [Boolean] `true` if shutdown complete or false on `timeout`
|
||||||
|
|
||||||
|
# @!macro executor_service_method_running_question
|
||||||
|
#
|
||||||
|
# Is the executor running?
|
||||||
|
#
|
||||||
|
# @return [Boolean] `true` when running, `false` when shutting down or shutdown
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shuttingdown_question
|
||||||
|
#
|
||||||
|
# Is the executor shuttingdown?
|
||||||
|
#
|
||||||
|
# @return [Boolean] `true` when not running and not shutdown, else `false`
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shutdown_question
|
||||||
|
#
|
||||||
|
# Is the executor shutdown?
|
||||||
|
#
|
||||||
|
# @return [Boolean] `true` when shutdown, `false` when shutting down or running
|
||||||
|
|
||||||
|
# @!macro executor_service_method_auto_terminate_question
|
||||||
|
#
|
||||||
|
# Is the executor auto-terminate when the application exits?
|
||||||
|
#
|
||||||
|
# @return [Boolean] `true` when auto-termination is enabled else `false`.
|
||||||
|
|
||||||
|
# @!macro executor_service_method_auto_terminate_setter
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Set the auto-terminate behavior for this executor.
|
||||||
|
# @deprecated Has no effect
|
||||||
|
# @param [Boolean] value The new auto-terminate value to set for this executor.
|
||||||
|
# @return [Boolean] `true` when auto-termination is enabled else `false`.
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro abstract_executor_service_public_api
|
||||||
|
#
|
||||||
|
# @!macro executor_service_public_api
|
||||||
|
#
|
||||||
|
# @!attribute [r] fallback_policy
|
||||||
|
# @!macro executor_service_attr_reader_fallback_policy
|
||||||
|
#
|
||||||
|
# @!method shutdown
|
||||||
|
# @!macro executor_service_method_shutdown
|
||||||
|
#
|
||||||
|
# @!method kill
|
||||||
|
# @!macro executor_service_method_kill
|
||||||
|
#
|
||||||
|
# @!method wait_for_termination(timeout = nil)
|
||||||
|
# @!macro executor_service_method_wait_for_termination
|
||||||
|
#
|
||||||
|
# @!method running?
|
||||||
|
# @!macro executor_service_method_running_question
|
||||||
|
#
|
||||||
|
# @!method shuttingdown?
|
||||||
|
# @!macro executor_service_method_shuttingdown_question
|
||||||
|
#
|
||||||
|
# @!method shutdown?
|
||||||
|
# @!macro executor_service_method_shutdown_question
|
||||||
|
#
|
||||||
|
# @!method auto_terminate?
|
||||||
|
# @!macro executor_service_method_auto_terminate_question
|
||||||
|
#
|
||||||
|
# @!method auto_terminate=(value)
|
||||||
|
# @!macro executor_service_method_auto_terminate_setter
|
||||||
|
|
||||||
|
###################################################################
|
||||||
|
|
||||||
|
# @!macro executor_service_public_api
|
||||||
|
# @!visibility private
|
||||||
|
module ExecutorService
|
||||||
|
include Concern::Logging
|
||||||
|
|
||||||
|
# @!macro executor_service_method_post
|
||||||
|
def post(*args, &task)
|
||||||
|
raise NotImplementedError
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_left_shift
|
||||||
|
def <<(task)
|
||||||
|
post(&task)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_can_overflow_question
|
||||||
|
#
|
||||||
|
# @note Always returns `false`
|
||||||
|
def can_overflow?
|
||||||
|
false
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_serialized_question
|
||||||
|
#
|
||||||
|
# @note Always returns `false`
|
||||||
|
def serialized?
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,224 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require 'concurrent/executor/thread_pool_executor'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_max_pool_size
|
||||||
|
# Default maximum number of threads that will be created in the pool.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_min_pool_size
|
||||||
|
# Default minimum number of threads that will be retained in the pool.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_max_queue_size
|
||||||
|
# Default maximum number of tasks that may be added to the task queue.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_thread_timeout
|
||||||
|
# Default maximum number of seconds a thread in the pool may remain idle
|
||||||
|
# before being reclaimed.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_synchronous
|
||||||
|
# Default value of the :synchronous option.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_max_length
|
||||||
|
# The maximum number of threads that may be created in the pool.
|
||||||
|
# @return [Integer] The maximum number of threads that may be created in the pool.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_min_length
|
||||||
|
# The minimum number of threads that may be retained in the pool.
|
||||||
|
# @return [Integer] The minimum number of threads that may be retained in the pool.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_largest_length
|
||||||
|
# The largest number of threads that have been created in the pool since construction.
|
||||||
|
# @return [Integer] The largest number of threads that have been created in the pool since construction.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
|
||||||
|
# The number of tasks that have been scheduled for execution on the pool since construction.
|
||||||
|
# @return [Integer] The number of tasks that have been scheduled for execution on the pool since construction.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_completed_task_count
|
||||||
|
# The number of tasks that have been completed by the pool since construction.
|
||||||
|
# @return [Integer] The number of tasks that have been completed by the pool since construction.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_method_active_count
|
||||||
|
# The number of threads that are actively executing tasks.
|
||||||
|
# @return [Integer] The number of threads that are actively executing tasks.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_idletime
|
||||||
|
# The number of seconds that a thread may be idle before being reclaimed.
|
||||||
|
# @return [Integer] The number of seconds that a thread may be idle before being reclaimed.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_synchronous
|
||||||
|
# Whether or not a value of 0 for :max_queue option means the queue must perform direct hand-off or rather unbounded queue.
|
||||||
|
# @return [true, false]
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_max_queue
|
||||||
|
# The maximum number of tasks that may be waiting in the work queue at any one time.
|
||||||
|
# When the queue size reaches `max_queue` subsequent tasks will be rejected in
|
||||||
|
# accordance with the configured `fallback_policy`.
|
||||||
|
#
|
||||||
|
# @return [Integer] The maximum number of tasks that may be waiting in the work queue at any one time.
|
||||||
|
# When the queue size reaches `max_queue` subsequent tasks will be rejected in
|
||||||
|
# accordance with the configured `fallback_policy`.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_length
|
||||||
|
# The number of threads currently in the pool.
|
||||||
|
# @return [Integer] The number of threads currently in the pool.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_queue_length
|
||||||
|
# The number of tasks in the queue awaiting execution.
|
||||||
|
# @return [Integer] The number of tasks in the queue awaiting execution.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_remaining_capacity
|
||||||
|
# Number of tasks that may be enqueued before reaching `max_queue` and rejecting
|
||||||
|
# new tasks. A value of -1 indicates that the queue may grow without bound.
|
||||||
|
#
|
||||||
|
# @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting
|
||||||
|
# new tasks. A value of -1 indicates that the queue may grow without bound.
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_method_prune_pool
|
||||||
|
# Prune the thread pool of unneeded threads
|
||||||
|
#
|
||||||
|
# What is being pruned is controlled by the min_threads and idletime
|
||||||
|
# parameters passed at pool creation time
|
||||||
|
#
|
||||||
|
# This is a no-op on some pool implementation (e.g. the Java one). The Ruby
|
||||||
|
# pool will auto-prune each time a new job is posted. You will need to call
|
||||||
|
# this method explicitely in case your application post jobs in bursts (a
|
||||||
|
# lot of jobs and then nothing for long periods)
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_public_api
|
||||||
|
#
|
||||||
|
# @!macro abstract_executor_service_public_api
|
||||||
|
#
|
||||||
|
# @!attribute [r] max_length
|
||||||
|
# @!macro thread_pool_executor_attr_reader_max_length
|
||||||
|
#
|
||||||
|
# @!attribute [r] min_length
|
||||||
|
# @!macro thread_pool_executor_attr_reader_min_length
|
||||||
|
#
|
||||||
|
# @!attribute [r] largest_length
|
||||||
|
# @!macro thread_pool_executor_attr_reader_largest_length
|
||||||
|
#
|
||||||
|
# @!attribute [r] scheduled_task_count
|
||||||
|
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
|
||||||
|
#
|
||||||
|
# @!attribute [r] completed_task_count
|
||||||
|
# @!macro thread_pool_executor_attr_reader_completed_task_count
|
||||||
|
#
|
||||||
|
# @!attribute [r] idletime
|
||||||
|
# @!macro thread_pool_executor_attr_reader_idletime
|
||||||
|
#
|
||||||
|
# @!attribute [r] max_queue
|
||||||
|
# @!macro thread_pool_executor_attr_reader_max_queue
|
||||||
|
#
|
||||||
|
# @!attribute [r] length
|
||||||
|
# @!macro thread_pool_executor_attr_reader_length
|
||||||
|
#
|
||||||
|
# @!attribute [r] queue_length
|
||||||
|
# @!macro thread_pool_executor_attr_reader_queue_length
|
||||||
|
#
|
||||||
|
# @!attribute [r] remaining_capacity
|
||||||
|
# @!macro thread_pool_executor_attr_reader_remaining_capacity
|
||||||
|
#
|
||||||
|
# @!method can_overflow?
|
||||||
|
# @!macro executor_service_method_can_overflow_question
|
||||||
|
#
|
||||||
|
# @!method prune_pool
|
||||||
|
# @!macro thread_pool_executor_method_prune_pool
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# @!macro thread_pool_options
|
||||||
|
#
|
||||||
|
# **Thread Pool Options**
|
||||||
|
#
|
||||||
|
# Thread pools support several configuration options:
|
||||||
|
#
|
||||||
|
# * `idletime`: The number of seconds that a thread may be idle before being reclaimed.
|
||||||
|
# * `name`: The name of the executor (optional). Printed in the executor's `#to_s` output and
|
||||||
|
# a `<name>-worker-<id>` name is given to its threads if supported by used Ruby
|
||||||
|
# implementation. `<id>` is uniq for each thread.
|
||||||
|
# * `max_queue`: The maximum number of tasks that may be waiting in the work queue at
|
||||||
|
# any one time. When the queue size reaches `max_queue` and no new threads can be created,
|
||||||
|
# subsequent tasks will be rejected in accordance with the configured `fallback_policy`.
|
||||||
|
# * `auto_terminate`: When true (default), the threads started will be marked as daemon.
|
||||||
|
# * `fallback_policy`: The policy defining how rejected tasks are handled.
|
||||||
|
#
|
||||||
|
# Three fallback policies are supported:
|
||||||
|
#
|
||||||
|
# * `:abort`: Raise a `RejectedExecutionError` exception and discard the task.
|
||||||
|
# * `:discard`: Discard the task and return false.
|
||||||
|
# * `:caller_runs`: Execute the task on the calling thread.
|
||||||
|
#
|
||||||
|
# **Shutting Down Thread Pools**
|
||||||
|
#
|
||||||
|
# Killing a thread pool while tasks are still being processed, either by calling
|
||||||
|
# the `#kill` method or at application exit, will have unpredictable results. There
|
||||||
|
# is no way for the thread pool to know what resources are being used by the
|
||||||
|
# in-progress tasks. When those tasks are killed the impact on those resources
|
||||||
|
# cannot be predicted. The *best* practice is to explicitly shutdown all thread
|
||||||
|
# pools using the provided methods:
|
||||||
|
#
|
||||||
|
# * Call `#shutdown` to initiate an orderly termination of all in-progress tasks
|
||||||
|
# * Call `#wait_for_termination` with an appropriate timeout interval an allow
|
||||||
|
# the orderly shutdown to complete
|
||||||
|
# * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time
|
||||||
|
#
|
||||||
|
# On some runtime platforms (most notably the JVM) the application will not
|
||||||
|
# exit until all thread pools have been shutdown. To prevent applications from
|
||||||
|
# "hanging" on exit, all threads can be marked as daemon according to the
|
||||||
|
# `:auto_terminate` option.
|
||||||
|
#
|
||||||
|
# ```ruby
|
||||||
|
# pool1 = Concurrent::FixedThreadPool.new(5) # threads will be marked as daemon
|
||||||
|
# pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # mark threads as non-daemon
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# @note Failure to properly shutdown a thread pool can lead to unpredictable results.
|
||||||
|
# Please read *Shutting Down Thread Pools* for more information.
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class
|
||||||
|
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface
|
||||||
|
# @see https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html#setDaemon-boolean-
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# @!macro fixed_thread_pool
|
||||||
|
#
|
||||||
|
# A thread pool that reuses a fixed number of threads operating off an unbounded queue.
|
||||||
|
# At any point, at most `num_threads` will be active processing tasks. When all threads are busy new
|
||||||
|
# tasks `#post` to the thread pool are enqueued until a thread becomes available.
|
||||||
|
# Should a thread crash for any reason the thread will immediately be removed
|
||||||
|
# from the pool and replaced.
|
||||||
|
#
|
||||||
|
# The API and behavior of this class are based on Java's `FixedThreadPool`
|
||||||
|
#
|
||||||
|
# @!macro thread_pool_options
|
||||||
|
class FixedThreadPool < ThreadPoolExecutor
|
||||||
|
|
||||||
|
# @!macro fixed_thread_pool_method_initialize
|
||||||
|
#
|
||||||
|
# Create a new thread pool.
|
||||||
|
#
|
||||||
|
# @param [Integer] num_threads the number of threads to allocate
|
||||||
|
# @param [Hash] opts the options defining pool behavior.
|
||||||
|
# @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `num_threads` is less than or equal to zero
|
||||||
|
# @raise [ArgumentError] if `fallback_policy` is not a known policy
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int-
|
||||||
|
def initialize(num_threads, opts = {})
|
||||||
|
raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1
|
||||||
|
defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE,
|
||||||
|
idletime: DEFAULT_THREAD_IDLETIMEOUT }
|
||||||
|
overrides = { min_threads: num_threads,
|
||||||
|
max_threads: num_threads }
|
||||||
|
super(defaults.merge(opts).merge(overrides))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,66 @@
|
|||||||
|
require 'concurrent/atomic/event'
|
||||||
|
require 'concurrent/executor/abstract_executor_service'
|
||||||
|
require 'concurrent/executor/serial_executor_service'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# An executor service which runs all operations on the current thread,
|
||||||
|
# blocking as necessary. Operations are performed in the order they are
|
||||||
|
# received and no two operations can be performed simultaneously.
|
||||||
|
#
|
||||||
|
# This executor service exists mainly for testing an debugging. When used
|
||||||
|
# it immediately runs every `#post` operation on the current thread, blocking
|
||||||
|
# that thread until the operation is complete. This can be very beneficial
|
||||||
|
# during testing because it makes all operations deterministic.
|
||||||
|
#
|
||||||
|
# @note Intended for use primarily in testing and debugging.
|
||||||
|
class ImmediateExecutor < AbstractExecutorService
|
||||||
|
include SerialExecutorService
|
||||||
|
|
||||||
|
# Creates a new executor
|
||||||
|
def initialize
|
||||||
|
@stopped = Concurrent::Event.new
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_post
|
||||||
|
def post(*args, &task)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
return false unless running?
|
||||||
|
task.call(*args)
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_left_shift
|
||||||
|
def <<(task)
|
||||||
|
post(&task)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_running_question
|
||||||
|
def running?
|
||||||
|
! shutdown?
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shuttingdown_question
|
||||||
|
def shuttingdown?
|
||||||
|
false
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shutdown_question
|
||||||
|
def shutdown?
|
||||||
|
@stopped.set?
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shutdown
|
||||||
|
def shutdown
|
||||||
|
@stopped.set
|
||||||
|
true
|
||||||
|
end
|
||||||
|
alias_method :kill, :shutdown
|
||||||
|
|
||||||
|
# @!macro executor_service_method_wait_for_termination
|
||||||
|
def wait_for_termination(timeout = nil)
|
||||||
|
@stopped.wait(timeout)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,44 @@
|
|||||||
|
require 'concurrent/executor/immediate_executor'
|
||||||
|
require 'concurrent/executor/simple_executor_service'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
# An executor service which runs all operations on a new thread, blocking
|
||||||
|
# until it completes. Operations are performed in the order they are received
|
||||||
|
# and no two operations can be performed simultaneously.
|
||||||
|
#
|
||||||
|
# This executor service exists mainly for testing an debugging. When used it
|
||||||
|
# immediately runs every `#post` operation on a new thread, blocking the
|
||||||
|
# current thread until the operation is complete. This is similar to how the
|
||||||
|
# ImmediateExecutor works, but the operation has the full stack of the new
|
||||||
|
# thread at its disposal. This can be helpful when the operations will spawn
|
||||||
|
# more operations on the same executor and so on - such a situation might
|
||||||
|
# overflow the single stack in case of an ImmediateExecutor, which is
|
||||||
|
# inconsistent with how it would behave for a threaded executor.
|
||||||
|
#
|
||||||
|
# @note Intended for use primarily in testing and debugging.
|
||||||
|
class IndirectImmediateExecutor < ImmediateExecutor
|
||||||
|
# Creates a new executor
|
||||||
|
def initialize
|
||||||
|
super
|
||||||
|
@internal_executor = SimpleExecutorService.new
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_post
|
||||||
|
def post(*args, &task)
|
||||||
|
raise ArgumentError.new("no block given") unless block_given?
|
||||||
|
return false unless running?
|
||||||
|
|
||||||
|
event = Concurrent::Event.new
|
||||||
|
@internal_executor.post do
|
||||||
|
begin
|
||||||
|
task.call(*args)
|
||||||
|
ensure
|
||||||
|
event.set
|
||||||
|
end
|
||||||
|
end
|
||||||
|
event.wait
|
||||||
|
|
||||||
|
true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,100 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
|
|
||||||
|
if Concurrent.on_jruby?
|
||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/executor/abstract_executor_service'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro abstract_executor_service_public_api
|
||||||
|
# @!visibility private
|
||||||
|
class JavaExecutorService < AbstractExecutorService
|
||||||
|
java_import 'java.lang.Runnable'
|
||||||
|
|
||||||
|
FALLBACK_POLICY_CLASSES = {
|
||||||
|
abort: java.util.concurrent.ThreadPoolExecutor::AbortPolicy,
|
||||||
|
discard: java.util.concurrent.ThreadPoolExecutor::DiscardPolicy,
|
||||||
|
caller_runs: java.util.concurrent.ThreadPoolExecutor::CallerRunsPolicy
|
||||||
|
}.freeze
|
||||||
|
private_constant :FALLBACK_POLICY_CLASSES
|
||||||
|
|
||||||
|
def post(*args, &task)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
return fallback_action(*args, &task).call unless running?
|
||||||
|
@executor.submit Job.new(args, task)
|
||||||
|
true
|
||||||
|
rescue Java::JavaUtilConcurrent::RejectedExecutionException
|
||||||
|
raise RejectedExecutionError
|
||||||
|
end
|
||||||
|
|
||||||
|
def wait_for_termination(timeout = nil)
|
||||||
|
if timeout.nil?
|
||||||
|
ok = @executor.awaitTermination(60, java.util.concurrent.TimeUnit::SECONDS) until ok
|
||||||
|
true
|
||||||
|
else
|
||||||
|
@executor.awaitTermination(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def shutdown
|
||||||
|
synchronize do
|
||||||
|
@executor.shutdown
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def kill
|
||||||
|
synchronize do
|
||||||
|
@executor.shutdownNow
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def ns_running?
|
||||||
|
!(ns_shuttingdown? || ns_shutdown?)
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_shuttingdown?
|
||||||
|
@executor.isShutdown && !@executor.isTerminated
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_shutdown?
|
||||||
|
@executor.isTerminated
|
||||||
|
end
|
||||||
|
|
||||||
|
class Job
|
||||||
|
include Runnable
|
||||||
|
def initialize(args, block)
|
||||||
|
@args = args
|
||||||
|
@block = block
|
||||||
|
end
|
||||||
|
|
||||||
|
def run
|
||||||
|
@block.call(*@args)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
private_constant :Job
|
||||||
|
end
|
||||||
|
|
||||||
|
class DaemonThreadFactory
|
||||||
|
# hide include from YARD
|
||||||
|
send :include, java.util.concurrent.ThreadFactory
|
||||||
|
|
||||||
|
def initialize(daemonize = true)
|
||||||
|
@daemonize = daemonize
|
||||||
|
@java_thread_factory = java.util.concurrent.Executors.defaultThreadFactory
|
||||||
|
end
|
||||||
|
|
||||||
|
def newThread(runnable)
|
||||||
|
thread = @java_thread_factory.newThread(runnable)
|
||||||
|
thread.setDaemon(@daemonize)
|
||||||
|
return thread
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private_constant :DaemonThreadFactory
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,30 @@
|
|||||||
|
if Concurrent.on_jruby?
|
||||||
|
|
||||||
|
require 'concurrent/executor/java_executor_service'
|
||||||
|
require 'concurrent/executor/serial_executor_service'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro single_thread_executor
|
||||||
|
# @!macro abstract_executor_service_public_api
|
||||||
|
# @!visibility private
|
||||||
|
class JavaSingleThreadExecutor < JavaExecutorService
|
||||||
|
include SerialExecutorService
|
||||||
|
|
||||||
|
# @!macro single_thread_executor_method_initialize
|
||||||
|
def initialize(opts = {})
|
||||||
|
super(opts)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def ns_initialize(opts)
|
||||||
|
@executor = java.util.concurrent.Executors.newSingleThreadExecutor(
|
||||||
|
DaemonThreadFactory.new(ns_auto_terminate?)
|
||||||
|
)
|
||||||
|
@fallback_policy = opts.fetch(:fallback_policy, :discard)
|
||||||
|
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,145 @@
|
|||||||
|
if Concurrent.on_jruby?
|
||||||
|
|
||||||
|
require 'concurrent/executor/java_executor_service'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor
|
||||||
|
# @!macro thread_pool_options
|
||||||
|
# @!visibility private
|
||||||
|
class JavaThreadPoolExecutor < JavaExecutorService
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_max_pool_size
|
||||||
|
DEFAULT_MAX_POOL_SIZE = java.lang.Integer::MAX_VALUE # 2147483647
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_min_pool_size
|
||||||
|
DEFAULT_MIN_POOL_SIZE = 0
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_max_queue_size
|
||||||
|
DEFAULT_MAX_QUEUE_SIZE = 0
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_thread_timeout
|
||||||
|
DEFAULT_THREAD_IDLETIMEOUT = 60
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_synchronous
|
||||||
|
DEFAULT_SYNCHRONOUS = false
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_max_length
|
||||||
|
attr_reader :max_length
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_max_queue
|
||||||
|
attr_reader :max_queue
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_synchronous
|
||||||
|
attr_reader :synchronous
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_method_initialize
|
||||||
|
def initialize(opts = {})
|
||||||
|
super(opts)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_can_overflow_question
|
||||||
|
def can_overflow?
|
||||||
|
@max_queue != 0
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_min_length
|
||||||
|
def min_length
|
||||||
|
@executor.getCorePoolSize
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_max_length
|
||||||
|
def max_length
|
||||||
|
@executor.getMaximumPoolSize
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_length
|
||||||
|
def length
|
||||||
|
@executor.getPoolSize
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_largest_length
|
||||||
|
def largest_length
|
||||||
|
@executor.getLargestPoolSize
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
|
||||||
|
def scheduled_task_count
|
||||||
|
@executor.getTaskCount
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_completed_task_count
|
||||||
|
def completed_task_count
|
||||||
|
@executor.getCompletedTaskCount
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_method_active_count
|
||||||
|
def active_count
|
||||||
|
@executor.getActiveCount
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_idletime
|
||||||
|
def idletime
|
||||||
|
@executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_queue_length
|
||||||
|
def queue_length
|
||||||
|
@executor.getQueue.size
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_remaining_capacity
|
||||||
|
def remaining_capacity
|
||||||
|
@max_queue == 0 ? -1 : @executor.getQueue.remainingCapacity
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_running_question
|
||||||
|
def running?
|
||||||
|
super && !@executor.isTerminating
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_method_prune_pool
|
||||||
|
def prune_pool
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def ns_initialize(opts)
|
||||||
|
min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
|
||||||
|
max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
|
||||||
|
idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
|
||||||
|
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
|
||||||
|
@synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS)
|
||||||
|
@fallback_policy = opts.fetch(:fallback_policy, :abort)
|
||||||
|
|
||||||
|
raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0
|
||||||
|
raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if max_length < DEFAULT_MIN_POOL_SIZE
|
||||||
|
raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if max_length > DEFAULT_MAX_POOL_SIZE
|
||||||
|
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if min_length < DEFAULT_MIN_POOL_SIZE
|
||||||
|
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
|
||||||
|
raise ArgumentError.new("#{fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.include?(@fallback_policy)
|
||||||
|
|
||||||
|
if @max_queue == 0
|
||||||
|
if @synchronous
|
||||||
|
queue = java.util.concurrent.SynchronousQueue.new
|
||||||
|
else
|
||||||
|
queue = java.util.concurrent.LinkedBlockingQueue.new
|
||||||
|
end
|
||||||
|
else
|
||||||
|
queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue)
|
||||||
|
end
|
||||||
|
|
||||||
|
@executor = java.util.concurrent.ThreadPoolExecutor.new(
|
||||||
|
min_length,
|
||||||
|
max_length,
|
||||||
|
idletime,
|
||||||
|
java.util.concurrent.TimeUnit::SECONDS,
|
||||||
|
queue,
|
||||||
|
DaemonThreadFactory.new(ns_auto_terminate?),
|
||||||
|
FALLBACK_POLICY_CLASSES[@fallback_policy].new)
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,82 @@
|
|||||||
|
require 'concurrent/executor/abstract_executor_service'
|
||||||
|
require 'concurrent/atomic/event'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro abstract_executor_service_public_api
|
||||||
|
# @!visibility private
|
||||||
|
class RubyExecutorService < AbstractExecutorService
|
||||||
|
safe_initialization!
|
||||||
|
|
||||||
|
def initialize(*args, &block)
|
||||||
|
super
|
||||||
|
@StopEvent = Event.new
|
||||||
|
@StoppedEvent = Event.new
|
||||||
|
end
|
||||||
|
|
||||||
|
def post(*args, &task)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
deferred_action = synchronize {
|
||||||
|
if running?
|
||||||
|
ns_execute(*args, &task)
|
||||||
|
else
|
||||||
|
fallback_action(*args, &task)
|
||||||
|
end
|
||||||
|
}
|
||||||
|
if deferred_action
|
||||||
|
deferred_action.call
|
||||||
|
else
|
||||||
|
true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def shutdown
|
||||||
|
synchronize do
|
||||||
|
break unless running?
|
||||||
|
stop_event.set
|
||||||
|
ns_shutdown_execution
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
def kill
|
||||||
|
synchronize do
|
||||||
|
break if shutdown?
|
||||||
|
stop_event.set
|
||||||
|
ns_kill_execution
|
||||||
|
stopped_event.set
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
def wait_for_termination(timeout = nil)
|
||||||
|
stopped_event.wait(timeout)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def stop_event
|
||||||
|
@StopEvent
|
||||||
|
end
|
||||||
|
|
||||||
|
def stopped_event
|
||||||
|
@StoppedEvent
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_shutdown_execution
|
||||||
|
stopped_event.set
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_running?
|
||||||
|
!stop_event.set?
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_shuttingdown?
|
||||||
|
!(ns_running? || ns_shutdown?)
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_shutdown?
|
||||||
|
stopped_event.set?
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,21 @@
|
|||||||
|
require 'concurrent/executor/ruby_thread_pool_executor'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro single_thread_executor
|
||||||
|
# @!macro abstract_executor_service_public_api
|
||||||
|
# @!visibility private
|
||||||
|
class RubySingleThreadExecutor < RubyThreadPoolExecutor
|
||||||
|
|
||||||
|
# @!macro single_thread_executor_method_initialize
|
||||||
|
def initialize(opts = {})
|
||||||
|
super(
|
||||||
|
min_threads: 1,
|
||||||
|
max_threads: 1,
|
||||||
|
max_queue: 0,
|
||||||
|
idletime: DEFAULT_THREAD_IDLETIMEOUT,
|
||||||
|
fallback_policy: opts.fetch(:fallback_policy, :discard),
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,373 @@
|
|||||||
|
require 'thread'
|
||||||
|
require 'concurrent/atomic/event'
|
||||||
|
require 'concurrent/concern/logging'
|
||||||
|
require 'concurrent/executor/ruby_executor_service'
|
||||||
|
require 'concurrent/utility/monotonic_time'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor
|
||||||
|
# @!macro thread_pool_options
|
||||||
|
# @!visibility private
|
||||||
|
class RubyThreadPoolExecutor < RubyExecutorService
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_max_pool_size
|
||||||
|
DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_min_pool_size
|
||||||
|
DEFAULT_MIN_POOL_SIZE = 0
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_max_queue_size
|
||||||
|
DEFAULT_MAX_QUEUE_SIZE = 0
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_thread_timeout
|
||||||
|
DEFAULT_THREAD_IDLETIMEOUT = 60
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_constant_default_synchronous
|
||||||
|
DEFAULT_SYNCHRONOUS = false
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_max_length
|
||||||
|
attr_reader :max_length
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_min_length
|
||||||
|
attr_reader :min_length
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_idletime
|
||||||
|
attr_reader :idletime
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_max_queue
|
||||||
|
attr_reader :max_queue
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_synchronous
|
||||||
|
attr_reader :synchronous
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_method_initialize
|
||||||
|
def initialize(opts = {})
|
||||||
|
super(opts)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_largest_length
|
||||||
|
def largest_length
|
||||||
|
synchronize { @largest_length }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
|
||||||
|
def scheduled_task_count
|
||||||
|
synchronize { @scheduled_task_count }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_completed_task_count
|
||||||
|
def completed_task_count
|
||||||
|
synchronize { @completed_task_count }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_method_active_count
|
||||||
|
def active_count
|
||||||
|
synchronize do
|
||||||
|
@pool.length - @ready.length
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_can_overflow_question
|
||||||
|
def can_overflow?
|
||||||
|
synchronize { ns_limited_queue? }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_length
|
||||||
|
def length
|
||||||
|
synchronize { @pool.length }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_queue_length
|
||||||
|
def queue_length
|
||||||
|
synchronize { @queue.length }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_attr_reader_remaining_capacity
|
||||||
|
def remaining_capacity
|
||||||
|
synchronize do
|
||||||
|
if ns_limited_queue?
|
||||||
|
@max_queue - @queue.length
|
||||||
|
else
|
||||||
|
-1
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def remove_busy_worker(worker)
|
||||||
|
synchronize { ns_remove_busy_worker worker }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ready_worker(worker, last_message)
|
||||||
|
synchronize { ns_ready_worker worker, last_message }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def worker_died(worker)
|
||||||
|
synchronize { ns_worker_died worker }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def worker_task_completed
|
||||||
|
synchronize { @completed_task_count += 1 }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_method_prune_pool
|
||||||
|
def prune_pool
|
||||||
|
synchronize { ns_prune_pool }
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_initialize(opts)
|
||||||
|
@min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
|
||||||
|
@max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
|
||||||
|
@idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
|
||||||
|
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
|
||||||
|
@synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS)
|
||||||
|
@fallback_policy = opts.fetch(:fallback_policy, :abort)
|
||||||
|
|
||||||
|
raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0
|
||||||
|
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy)
|
||||||
|
raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE
|
||||||
|
raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE
|
||||||
|
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE
|
||||||
|
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
|
||||||
|
|
||||||
|
@pool = [] # all workers
|
||||||
|
@ready = [] # used as a stash (most idle worker is at the start)
|
||||||
|
@queue = [] # used as queue
|
||||||
|
# @ready or @queue is empty at all times
|
||||||
|
@scheduled_task_count = 0
|
||||||
|
@completed_task_count = 0
|
||||||
|
@largest_length = 0
|
||||||
|
@workers_counter = 0
|
||||||
|
@ruby_pid = $$ # detects if Ruby has forked
|
||||||
|
|
||||||
|
@gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented
|
||||||
|
@next_gc_time = Concurrent.monotonic_time + @gc_interval
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_limited_queue?
|
||||||
|
@max_queue != 0
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_execute(*args, &task)
|
||||||
|
ns_reset_if_forked
|
||||||
|
|
||||||
|
if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task)
|
||||||
|
@scheduled_task_count += 1
|
||||||
|
else
|
||||||
|
return fallback_action(*args, &task)
|
||||||
|
end
|
||||||
|
|
||||||
|
ns_prune_pool if @next_gc_time < Concurrent.monotonic_time
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_shutdown_execution
|
||||||
|
ns_reset_if_forked
|
||||||
|
|
||||||
|
if @pool.empty?
|
||||||
|
# nothing to do
|
||||||
|
stopped_event.set
|
||||||
|
end
|
||||||
|
|
||||||
|
if @queue.empty?
|
||||||
|
# no more tasks will be accepted, just stop all workers
|
||||||
|
@pool.each(&:stop)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_kill_execution
|
||||||
|
# TODO log out unprocessed tasks in queue
|
||||||
|
# TODO try to shutdown first?
|
||||||
|
@pool.each(&:kill)
|
||||||
|
@pool.clear
|
||||||
|
@ready.clear
|
||||||
|
end
|
||||||
|
|
||||||
|
# tries to assign task to a worker, tries to get one from @ready or to create new one
|
||||||
|
# @return [true, false] if task is assigned to a worker
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def ns_assign_worker(*args, &task)
|
||||||
|
# keep growing if the pool is not at the minimum yet
|
||||||
|
worker, _ = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker
|
||||||
|
if worker
|
||||||
|
worker << [task, args]
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
rescue ThreadError
|
||||||
|
# Raised when the operating system refuses to create the new thread
|
||||||
|
return false
|
||||||
|
end
|
||||||
|
|
||||||
|
# tries to enqueue task
|
||||||
|
# @return [true, false] if enqueued
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def ns_enqueue(*args, &task)
|
||||||
|
return false if @synchronous
|
||||||
|
|
||||||
|
if !ns_limited_queue? || @queue.size < @max_queue
|
||||||
|
@queue << [task, args]
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_worker_died(worker)
|
||||||
|
ns_remove_busy_worker worker
|
||||||
|
replacement_worker = ns_add_busy_worker
|
||||||
|
ns_ready_worker replacement_worker, Concurrent.monotonic_time, false if replacement_worker
|
||||||
|
end
|
||||||
|
|
||||||
|
# creates new worker which has to receive work to do after it's added
|
||||||
|
# @return [nil, Worker] nil of max capacity is reached
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def ns_add_busy_worker
|
||||||
|
return if @pool.size >= @max_length
|
||||||
|
|
||||||
|
@workers_counter += 1
|
||||||
|
@pool << (worker = Worker.new(self, @workers_counter))
|
||||||
|
@largest_length = @pool.length if @pool.length > @largest_length
|
||||||
|
worker
|
||||||
|
end
|
||||||
|
|
||||||
|
# handle ready worker, giving it new job or assigning back to @ready
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def ns_ready_worker(worker, last_message, success = true)
|
||||||
|
task_and_args = @queue.shift
|
||||||
|
if task_and_args
|
||||||
|
worker << task_and_args
|
||||||
|
else
|
||||||
|
# stop workers when !running?, do not return them to @ready
|
||||||
|
if running?
|
||||||
|
raise unless last_message
|
||||||
|
@ready.push([worker, last_message])
|
||||||
|
else
|
||||||
|
worker.stop
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# removes a worker which is not in not tracked in @ready
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def ns_remove_busy_worker(worker)
|
||||||
|
@pool.delete(worker)
|
||||||
|
stopped_event.set if @pool.empty? && !running?
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# try oldest worker if it is idle for enough time, it's returned back at the start
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def ns_prune_pool
|
||||||
|
now = Concurrent.monotonic_time
|
||||||
|
stopped_workers = 0
|
||||||
|
while !@ready.empty? && (@pool.size - stopped_workers > @min_length)
|
||||||
|
worker, last_message = @ready.first
|
||||||
|
if now - last_message > self.idletime
|
||||||
|
stopped_workers += 1
|
||||||
|
@ready.shift
|
||||||
|
worker << :stop
|
||||||
|
else break
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@next_gc_time = Concurrent.monotonic_time + @gc_interval
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_reset_if_forked
|
||||||
|
if $$ != @ruby_pid
|
||||||
|
@queue.clear
|
||||||
|
@ready.clear
|
||||||
|
@pool.clear
|
||||||
|
@scheduled_task_count = 0
|
||||||
|
@completed_task_count = 0
|
||||||
|
@largest_length = 0
|
||||||
|
@workers_counter = 0
|
||||||
|
@ruby_pid = $$
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
class Worker
|
||||||
|
include Concern::Logging
|
||||||
|
|
||||||
|
def initialize(pool, id)
|
||||||
|
# instance variables accessed only under pool's lock so no need to sync here again
|
||||||
|
@queue = Queue.new
|
||||||
|
@pool = pool
|
||||||
|
@thread = create_worker @queue, pool, pool.idletime
|
||||||
|
|
||||||
|
if @thread.respond_to?(:name=)
|
||||||
|
@thread.name = [pool.name, 'worker', id].compact.join('-')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def <<(message)
|
||||||
|
@queue << message
|
||||||
|
end
|
||||||
|
|
||||||
|
def stop
|
||||||
|
@queue << :stop
|
||||||
|
end
|
||||||
|
|
||||||
|
def kill
|
||||||
|
@thread.kill
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def create_worker(queue, pool, idletime)
|
||||||
|
Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime|
|
||||||
|
catch(:stop) do
|
||||||
|
loop do
|
||||||
|
|
||||||
|
case message = my_queue.pop
|
||||||
|
when :stop
|
||||||
|
my_pool.remove_busy_worker(self)
|
||||||
|
throw :stop
|
||||||
|
|
||||||
|
else
|
||||||
|
task, args = message
|
||||||
|
run_task my_pool, task, args
|
||||||
|
my_pool.ready_worker(self, Concurrent.monotonic_time)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def run_task(pool, task, args)
|
||||||
|
task.call(*args)
|
||||||
|
pool.worker_task_completed
|
||||||
|
rescue => ex
|
||||||
|
# let it fail
|
||||||
|
log DEBUG, ex
|
||||||
|
rescue Exception => ex
|
||||||
|
log ERROR, ex
|
||||||
|
pool.worker_died(self)
|
||||||
|
throw :stop
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private_constant :Worker
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,35 @@
|
|||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# A simple utility class that executes a callable and returns and array of three elements:
|
||||||
|
# success - indicating if the callable has been executed without errors
|
||||||
|
# value - filled by the callable result if it has been executed without errors, nil otherwise
|
||||||
|
# reason - the error risen by the callable if it has been executed with errors, nil otherwise
|
||||||
|
class SafeTaskExecutor < Synchronization::LockableObject
|
||||||
|
|
||||||
|
def initialize(task, opts = {})
|
||||||
|
@task = task
|
||||||
|
@exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError
|
||||||
|
super() # ensures visibility
|
||||||
|
end
|
||||||
|
|
||||||
|
# @return [Array]
|
||||||
|
def execute(*args)
|
||||||
|
success = true
|
||||||
|
value = reason = nil
|
||||||
|
|
||||||
|
synchronize do
|
||||||
|
begin
|
||||||
|
value = @task.call(*args)
|
||||||
|
success = true
|
||||||
|
rescue @exception_class => ex
|
||||||
|
reason = ex
|
||||||
|
success = false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
[success, value, reason]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,34 @@
|
|||||||
|
require 'concurrent/executor/executor_service'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# Indicates that the including `ExecutorService` guarantees
|
||||||
|
# that all operations will occur in the order they are post and that no
|
||||||
|
# two operations may occur simultaneously. This module provides no
|
||||||
|
# functionality and provides no guarantees. That is the responsibility
|
||||||
|
# of the including class. This module exists solely to allow the including
|
||||||
|
# object to be interrogated for its serialization status.
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
# class Foo
|
||||||
|
# include Concurrent::SerialExecutor
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# foo = Foo.new
|
||||||
|
#
|
||||||
|
# foo.is_a? Concurrent::ExecutorService #=> true
|
||||||
|
# foo.is_a? Concurrent::SerialExecutor #=> true
|
||||||
|
# foo.serialized? #=> true
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
module SerialExecutorService
|
||||||
|
include ExecutorService
|
||||||
|
|
||||||
|
# @!macro executor_service_method_serialized_question
|
||||||
|
#
|
||||||
|
# @note Always returns `true`
|
||||||
|
def serialized?
|
||||||
|
true
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,107 @@
|
|||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/concern/logging'
|
||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# Ensures passed jobs in a serialized order never running at the same time.
|
||||||
|
class SerializedExecution < Synchronization::LockableObject
|
||||||
|
include Concern::Logging
|
||||||
|
|
||||||
|
def initialize()
|
||||||
|
super()
|
||||||
|
synchronize { ns_initialize }
|
||||||
|
end
|
||||||
|
|
||||||
|
Job = Struct.new(:executor, :args, :block) do
|
||||||
|
def call
|
||||||
|
block.call(*args)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Submit a task to the executor for asynchronous processing.
|
||||||
|
#
|
||||||
|
# @param [Executor] executor to be used for this job
|
||||||
|
#
|
||||||
|
# @param [Array] args zero or more arguments to be passed to the task
|
||||||
|
#
|
||||||
|
# @yield the asynchronous task to perform
|
||||||
|
#
|
||||||
|
# @return [Boolean] `true` if the task is queued, `false` if the executor
|
||||||
|
# is not running
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if no task is given
|
||||||
|
def post(executor, *args, &task)
|
||||||
|
posts [[executor, args, task]]
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not
|
||||||
|
# be interleaved by other tasks.
|
||||||
|
#
|
||||||
|
# @param [Array<Array(ExecutorService, Array<Object>, Proc)>] posts array of triplets where
|
||||||
|
# first is a {ExecutorService}, second is array of args for task, third is a task (Proc)
|
||||||
|
def posts(posts)
|
||||||
|
# if can_overflow?
|
||||||
|
# raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow'
|
||||||
|
# end
|
||||||
|
|
||||||
|
return nil if posts.empty?
|
||||||
|
|
||||||
|
jobs = posts.map { |executor, args, task| Job.new executor, args, task }
|
||||||
|
|
||||||
|
job_to_post = synchronize do
|
||||||
|
if @being_executed
|
||||||
|
@stash.push(*jobs)
|
||||||
|
nil
|
||||||
|
else
|
||||||
|
@being_executed = true
|
||||||
|
@stash.push(*jobs[1..-1])
|
||||||
|
jobs.first
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
call_job job_to_post if job_to_post
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def ns_initialize
|
||||||
|
@being_executed = false
|
||||||
|
@stash = []
|
||||||
|
end
|
||||||
|
|
||||||
|
def call_job(job)
|
||||||
|
did_it_run = begin
|
||||||
|
job.executor.post { work(job) }
|
||||||
|
true
|
||||||
|
rescue RejectedExecutionError => ex
|
||||||
|
false
|
||||||
|
end
|
||||||
|
|
||||||
|
# TODO not the best idea to run it myself
|
||||||
|
unless did_it_run
|
||||||
|
begin
|
||||||
|
work job
|
||||||
|
rescue => ex
|
||||||
|
# let it fail
|
||||||
|
log DEBUG, ex
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# ensures next job is executed if any is stashed
|
||||||
|
def work(job)
|
||||||
|
job.call
|
||||||
|
ensure
|
||||||
|
synchronize do
|
||||||
|
job = @stash.shift || (@being_executed = false)
|
||||||
|
end
|
||||||
|
|
||||||
|
# TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end
|
||||||
|
# of this block
|
||||||
|
call_job job if job
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,28 @@
|
|||||||
|
require 'delegate'
|
||||||
|
require 'concurrent/executor/serial_executor_service'
|
||||||
|
require 'concurrent/executor/serialized_execution'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# A wrapper/delegator for any `ExecutorService` that
|
||||||
|
# guarantees serialized execution of tasks.
|
||||||
|
#
|
||||||
|
# @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html)
|
||||||
|
# @see Concurrent::SerializedExecution
|
||||||
|
class SerializedExecutionDelegator < SimpleDelegator
|
||||||
|
include SerialExecutorService
|
||||||
|
|
||||||
|
def initialize(executor)
|
||||||
|
@executor = executor
|
||||||
|
@serializer = SerializedExecution.new
|
||||||
|
super(executor)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_post
|
||||||
|
def post(*args, &task)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
return false unless running?
|
||||||
|
@serializer.post(@executor, *args, &task)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,103 @@
|
|||||||
|
require 'concurrent/atomic/atomic_boolean'
|
||||||
|
require 'concurrent/atomic/atomic_fixnum'
|
||||||
|
require 'concurrent/atomic/event'
|
||||||
|
require 'concurrent/executor/executor_service'
|
||||||
|
require 'concurrent/executor/ruby_executor_service'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# An executor service in which every operation spawns a new,
|
||||||
|
# independently operating thread.
|
||||||
|
#
|
||||||
|
# This is perhaps the most inefficient executor service in this
|
||||||
|
# library. It exists mainly for testing an debugging. Thread creation
|
||||||
|
# and management is expensive in Ruby and this executor performs no
|
||||||
|
# resource pooling. This can be very beneficial during testing and
|
||||||
|
# debugging because it decouples the using code from the underlying
|
||||||
|
# executor implementation. In production this executor will likely
|
||||||
|
# lead to suboptimal performance.
|
||||||
|
#
|
||||||
|
# @note Intended for use primarily in testing and debugging.
|
||||||
|
class SimpleExecutorService < RubyExecutorService
|
||||||
|
|
||||||
|
# @!macro executor_service_method_post
|
||||||
|
def self.post(*args)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
Thread.new(*args) do
|
||||||
|
Thread.current.abort_on_exception = false
|
||||||
|
yield(*args)
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_left_shift
|
||||||
|
def self.<<(task)
|
||||||
|
post(&task)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_post
|
||||||
|
def post(*args, &task)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
return false unless running?
|
||||||
|
@count.increment
|
||||||
|
Thread.new(*args) do
|
||||||
|
Thread.current.abort_on_exception = false
|
||||||
|
begin
|
||||||
|
yield(*args)
|
||||||
|
ensure
|
||||||
|
@count.decrement
|
||||||
|
@stopped.set if @running.false? && @count.value == 0
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_left_shift
|
||||||
|
def <<(task)
|
||||||
|
post(&task)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_running_question
|
||||||
|
def running?
|
||||||
|
@running.true?
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shuttingdown_question
|
||||||
|
def shuttingdown?
|
||||||
|
@running.false? && ! @stopped.set?
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shutdown_question
|
||||||
|
def shutdown?
|
||||||
|
@stopped.set?
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_shutdown
|
||||||
|
def shutdown
|
||||||
|
@running.make_false
|
||||||
|
@stopped.set if @count.value == 0
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_kill
|
||||||
|
def kill
|
||||||
|
@running.make_false
|
||||||
|
@stopped.set
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro executor_service_method_wait_for_termination
|
||||||
|
def wait_for_termination(timeout = nil)
|
||||||
|
@stopped.wait(timeout)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def ns_initialize(*args)
|
||||||
|
@running = Concurrent::AtomicBoolean.new(true)
|
||||||
|
@stopped = Concurrent::Event.new
|
||||||
|
@count = Concurrent::AtomicFixnum.new(0)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,57 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require 'concurrent/executor/ruby_single_thread_executor'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
if Concurrent.on_jruby?
|
||||||
|
require 'concurrent/executor/java_single_thread_executor'
|
||||||
|
end
|
||||||
|
|
||||||
|
SingleThreadExecutorImplementation = case
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
JavaSingleThreadExecutor
|
||||||
|
else
|
||||||
|
RubySingleThreadExecutor
|
||||||
|
end
|
||||||
|
private_constant :SingleThreadExecutorImplementation
|
||||||
|
|
||||||
|
# @!macro single_thread_executor
|
||||||
|
#
|
||||||
|
# A thread pool with a single thread an unlimited queue. Should the thread
|
||||||
|
# die for any reason it will be removed and replaced, thus ensuring that
|
||||||
|
# the executor will always remain viable and available to process jobs.
|
||||||
|
#
|
||||||
|
# A common pattern for background processing is to create a single thread
|
||||||
|
# on which an infinite loop is run. The thread's loop blocks on an input
|
||||||
|
# source (perhaps blocking I/O or a queue) and processes each input as it
|
||||||
|
# is received. This pattern has several issues. The thread itself is highly
|
||||||
|
# susceptible to errors during processing. Also, the thread itself must be
|
||||||
|
# constantly monitored and restarted should it die. `SingleThreadExecutor`
|
||||||
|
# encapsulates all these bahaviors. The task processor is highly resilient
|
||||||
|
# to errors from within tasks. Also, should the thread die it will
|
||||||
|
# automatically be restarted.
|
||||||
|
#
|
||||||
|
# The API and behavior of this class are based on Java's `SingleThreadExecutor`.
|
||||||
|
#
|
||||||
|
# @!macro abstract_executor_service_public_api
|
||||||
|
class SingleThreadExecutor < SingleThreadExecutorImplementation
|
||||||
|
|
||||||
|
# @!macro single_thread_executor_method_initialize
|
||||||
|
#
|
||||||
|
# Create a new thread pool.
|
||||||
|
#
|
||||||
|
# @option opts [Symbol] :fallback_policy (:discard) the policy for handling new
|
||||||
|
# tasks that are received when the queue size has reached
|
||||||
|
# `max_queue` or the executor has shut down
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if `:fallback_policy` is not one of the values specified
|
||||||
|
# in `FALLBACK_POLICIES`
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
|
||||||
|
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
|
||||||
|
|
||||||
|
# @!method initialize(opts = {})
|
||||||
|
# @!macro single_thread_executor_method_initialize
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,88 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require 'concurrent/executor/ruby_thread_pool_executor'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
if Concurrent.on_jruby?
|
||||||
|
require 'concurrent/executor/java_thread_pool_executor'
|
||||||
|
end
|
||||||
|
|
||||||
|
ThreadPoolExecutorImplementation = case
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
JavaThreadPoolExecutor
|
||||||
|
else
|
||||||
|
RubyThreadPoolExecutor
|
||||||
|
end
|
||||||
|
private_constant :ThreadPoolExecutorImplementation
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor
|
||||||
|
#
|
||||||
|
# An abstraction composed of one or more threads and a task queue. Tasks
|
||||||
|
# (blocks or `proc` objects) are submitted to the pool and added to the queue.
|
||||||
|
# The threads in the pool remove the tasks and execute them in the order
|
||||||
|
# they were received.
|
||||||
|
#
|
||||||
|
# A `ThreadPoolExecutor` will automatically adjust the pool size according
|
||||||
|
# to the bounds set by `min-threads` and `max-threads`. When a new task is
|
||||||
|
# submitted and fewer than `min-threads` threads are running, a new thread
|
||||||
|
# is created to handle the request, even if other worker threads are idle.
|
||||||
|
# If there are more than `min-threads` but less than `max-threads` threads
|
||||||
|
# running, a new thread will be created only if the queue is full.
|
||||||
|
#
|
||||||
|
# Threads that are idle for too long will be garbage collected, down to the
|
||||||
|
# configured minimum options. Should a thread crash it, too, will be garbage collected.
|
||||||
|
#
|
||||||
|
# `ThreadPoolExecutor` is based on the Java class of the same name. From
|
||||||
|
# the official Java documentation;
|
||||||
|
#
|
||||||
|
# > Thread pools address two different problems: they usually provide
|
||||||
|
# > improved performance when executing large numbers of asynchronous tasks,
|
||||||
|
# > due to reduced per-task invocation overhead, and they provide a means
|
||||||
|
# > of bounding and managing the resources, including threads, consumed
|
||||||
|
# > when executing a collection of tasks. Each ThreadPoolExecutor also
|
||||||
|
# > maintains some basic statistics, such as the number of completed tasks.
|
||||||
|
# >
|
||||||
|
# > To be useful across a wide range of contexts, this class provides many
|
||||||
|
# > adjustable parameters and extensibility hooks. However, programmers are
|
||||||
|
# > urged to use the more convenient Executors factory methods
|
||||||
|
# > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation),
|
||||||
|
# > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single
|
||||||
|
# > background thread), that preconfigure settings for the most common usage
|
||||||
|
# > scenarios.
|
||||||
|
#
|
||||||
|
# @!macro thread_pool_options
|
||||||
|
#
|
||||||
|
# @!macro thread_pool_executor_public_api
|
||||||
|
class ThreadPoolExecutor < ThreadPoolExecutorImplementation
|
||||||
|
|
||||||
|
# @!macro thread_pool_executor_method_initialize
|
||||||
|
#
|
||||||
|
# Create a new thread pool.
|
||||||
|
#
|
||||||
|
# @param [Hash] opts the options which configure the thread pool.
|
||||||
|
#
|
||||||
|
# @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum
|
||||||
|
# number of threads to be created
|
||||||
|
# @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) When a new task is submitted
|
||||||
|
# and fewer than `min_threads` are running, a new thread is created
|
||||||
|
# @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum
|
||||||
|
# number of seconds a thread may be idle before being reclaimed
|
||||||
|
# @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum
|
||||||
|
# number of tasks allowed in the work queue at any one time; a value of
|
||||||
|
# zero means the queue may grow without bound
|
||||||
|
# @option opts [Symbol] :fallback_policy (:abort) the policy for handling new
|
||||||
|
# tasks that are received when the queue size has reached
|
||||||
|
# `max_queue` or the executor has shut down
|
||||||
|
# @option opts [Boolean] :synchronous (DEFAULT_SYNCHRONOUS) whether or not a value of 0
|
||||||
|
# for :max_queue means the queue must perform direct hand-off rather than unbounded.
|
||||||
|
# @raise [ArgumentError] if `:max_threads` is less than one
|
||||||
|
# @raise [ArgumentError] if `:min_threads` is less than zero
|
||||||
|
# @raise [ArgumentError] if `:fallback_policy` is not one of the values specified
|
||||||
|
# in `FALLBACK_POLICIES`
|
||||||
|
#
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
|
||||||
|
|
||||||
|
# @!method initialize(opts = {})
|
||||||
|
# @!macro thread_pool_executor_method_initialize
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,176 @@
|
|||||||
|
require 'concurrent/scheduled_task'
|
||||||
|
require 'concurrent/atomic/event'
|
||||||
|
require 'concurrent/collection/non_concurrent_priority_queue'
|
||||||
|
require 'concurrent/executor/executor_service'
|
||||||
|
require 'concurrent/executor/single_thread_executor'
|
||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/options'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# Executes a collection of tasks, each after a given delay. A master task
|
||||||
|
# monitors the set and schedules each task for execution at the appropriate
|
||||||
|
# time. Tasks are run on the global thread pool or on the supplied executor.
|
||||||
|
# Each task is represented as a `ScheduledTask`.
|
||||||
|
#
|
||||||
|
# @see Concurrent::ScheduledTask
|
||||||
|
#
|
||||||
|
# @!macro monotonic_clock_warning
|
||||||
|
class TimerSet < RubyExecutorService
|
||||||
|
|
||||||
|
# Create a new set of timed tasks.
|
||||||
|
#
|
||||||
|
# @!macro executor_options
|
||||||
|
#
|
||||||
|
# @param [Hash] opts the options used to specify the executor on which to perform actions
|
||||||
|
# @option opts [Executor] :executor when set use the given `Executor` instance.
|
||||||
|
# Three special values are also supported: `:task` returns the global task pool,
|
||||||
|
# `:operation` returns the global operation pool, and `:immediate` returns a new
|
||||||
|
# `ImmediateExecutor` object.
|
||||||
|
def initialize(opts = {})
|
||||||
|
super(opts)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Post a task to be execute run after a given delay (in seconds). If the
|
||||||
|
# delay is less than 1/100th of a second the task will be immediately post
|
||||||
|
# to the executor.
|
||||||
|
#
|
||||||
|
# @param [Float] delay the number of seconds to wait for before executing the task.
|
||||||
|
# @param [Array<Object>] args the arguments passed to the task on execution.
|
||||||
|
#
|
||||||
|
# @yield the task to be performed.
|
||||||
|
#
|
||||||
|
# @return [Concurrent::ScheduledTask, false] IVar representing the task if the post
|
||||||
|
# is successful; false after shutdown.
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if the intended execution time is not in the future.
|
||||||
|
# @raise [ArgumentError] if no block is given.
|
||||||
|
def post(delay, *args, &task)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
return false unless running?
|
||||||
|
opts = { executor: @task_executor,
|
||||||
|
args: args,
|
||||||
|
timer_set: self }
|
||||||
|
task = ScheduledTask.execute(delay, opts, &task) # may raise exception
|
||||||
|
task.unscheduled? ? false : task
|
||||||
|
end
|
||||||
|
|
||||||
|
# Begin an immediate shutdown. In-progress tasks will be allowed to
|
||||||
|
# complete but enqueued tasks will be dismissed and no new tasks
|
||||||
|
# will be accepted. Has no additional effect if the thread pool is
|
||||||
|
# not running.
|
||||||
|
def kill
|
||||||
|
shutdown
|
||||||
|
end
|
||||||
|
|
||||||
|
private :<<
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# Initialize the object.
|
||||||
|
#
|
||||||
|
# @param [Hash] opts the options to create the object with.
|
||||||
|
# @!visibility private
|
||||||
|
def ns_initialize(opts)
|
||||||
|
@queue = Collection::NonConcurrentPriorityQueue.new(order: :min)
|
||||||
|
@task_executor = Options.executor_from_options(opts) || Concurrent.global_io_executor
|
||||||
|
@timer_executor = SingleThreadExecutor.new
|
||||||
|
@condition = Event.new
|
||||||
|
@ruby_pid = $$ # detects if Ruby has forked
|
||||||
|
end
|
||||||
|
|
||||||
|
# Post the task to the internal queue.
|
||||||
|
#
|
||||||
|
# @note This is intended as a callback method from ScheduledTask
|
||||||
|
# only. It is not intended to be used directly. Post a task
|
||||||
|
# by using the `SchedulesTask#execute` method.
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def post_task(task)
|
||||||
|
synchronize { ns_post_task(task) }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_post_task(task)
|
||||||
|
return false unless ns_running?
|
||||||
|
ns_reset_if_forked
|
||||||
|
if (task.initial_delay) <= 0.01
|
||||||
|
task.executor.post { task.process_task }
|
||||||
|
else
|
||||||
|
@queue.push(task)
|
||||||
|
# only post the process method when the queue is empty
|
||||||
|
@timer_executor.post(&method(:process_tasks)) if @queue.length == 1
|
||||||
|
@condition.set
|
||||||
|
end
|
||||||
|
true
|
||||||
|
end
|
||||||
|
|
||||||
|
# Remove the given task from the queue.
|
||||||
|
#
|
||||||
|
# @note This is intended as a callback method from `ScheduledTask`
|
||||||
|
# only. It is not intended to be used directly. Cancel a task
|
||||||
|
# by using the `ScheduledTask#cancel` method.
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def remove_task(task)
|
||||||
|
synchronize { @queue.delete(task) }
|
||||||
|
end
|
||||||
|
|
||||||
|
# `ExecutorService` callback called during shutdown.
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def ns_shutdown_execution
|
||||||
|
ns_reset_if_forked
|
||||||
|
@queue.clear
|
||||||
|
@timer_executor.kill
|
||||||
|
stopped_event.set
|
||||||
|
end
|
||||||
|
|
||||||
|
def ns_reset_if_forked
|
||||||
|
if $$ != @ruby_pid
|
||||||
|
@queue.clear
|
||||||
|
@condition.reset
|
||||||
|
@ruby_pid = $$
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Run a loop and execute tasks in the scheduled order and at the approximate
|
||||||
|
# scheduled time. If no tasks remain the thread will exit gracefully so that
|
||||||
|
# garbage collection can occur. If there are no ready tasks it will sleep
|
||||||
|
# for up to 60 seconds waiting for the next scheduled task.
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def process_tasks
|
||||||
|
loop do
|
||||||
|
task = synchronize { @condition.reset; @queue.peek }
|
||||||
|
break unless task
|
||||||
|
|
||||||
|
now = Concurrent.monotonic_time
|
||||||
|
diff = task.schedule_time - now
|
||||||
|
|
||||||
|
if diff <= 0
|
||||||
|
# We need to remove the task from the queue before passing
|
||||||
|
# it to the executor, to avoid race conditions where we pass
|
||||||
|
# the peek'ed task to the executor and then pop a different
|
||||||
|
# one that's been added in the meantime.
|
||||||
|
#
|
||||||
|
# Note that there's no race condition between the peek and
|
||||||
|
# this pop - this pop could retrieve a different task from
|
||||||
|
# the peek, but that task would be due to fire now anyway
|
||||||
|
# (because @queue is a priority queue, and this thread is
|
||||||
|
# the only reader, so whatever timer is at the head of the
|
||||||
|
# queue now must have the same pop time, or a closer one, as
|
||||||
|
# when we peeked).
|
||||||
|
task = synchronize { @queue.pop }
|
||||||
|
begin
|
||||||
|
task.executor.post { task.process_task }
|
||||||
|
rescue RejectedExecutionError
|
||||||
|
# ignore and continue
|
||||||
|
end
|
||||||
|
else
|
||||||
|
@condition.wait([diff, 60].min)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,20 @@
|
|||||||
|
require 'concurrent/executor/abstract_executor_service'
|
||||||
|
require 'concurrent/executor/cached_thread_pool'
|
||||||
|
require 'concurrent/executor/executor_service'
|
||||||
|
require 'concurrent/executor/fixed_thread_pool'
|
||||||
|
require 'concurrent/executor/immediate_executor'
|
||||||
|
require 'concurrent/executor/indirect_immediate_executor'
|
||||||
|
require 'concurrent/executor/java_executor_service'
|
||||||
|
require 'concurrent/executor/java_single_thread_executor'
|
||||||
|
require 'concurrent/executor/java_thread_pool_executor'
|
||||||
|
require 'concurrent/executor/ruby_executor_service'
|
||||||
|
require 'concurrent/executor/ruby_single_thread_executor'
|
||||||
|
require 'concurrent/executor/ruby_thread_pool_executor'
|
||||||
|
require 'concurrent/executor/cached_thread_pool'
|
||||||
|
require 'concurrent/executor/safe_task_executor'
|
||||||
|
require 'concurrent/executor/serial_executor_service'
|
||||||
|
require 'concurrent/executor/serialized_execution'
|
||||||
|
require 'concurrent/executor/serialized_execution_delegator'
|
||||||
|
require 'concurrent/executor/single_thread_executor'
|
||||||
|
require 'concurrent/executor/thread_pool_executor'
|
||||||
|
require 'concurrent/executor/timer_set'
|
||||||
@ -0,0 +1,141 @@
|
|||||||
|
require 'thread'
|
||||||
|
require 'concurrent/constants'
|
||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/ivar'
|
||||||
|
require 'concurrent/executor/safe_task_executor'
|
||||||
|
|
||||||
|
require 'concurrent/options'
|
||||||
|
|
||||||
|
# TODO (pitr-ch 14-Mar-2017): deprecate, Future, Promise, etc.
|
||||||
|
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# {include:file:docs-source/future.md}
|
||||||
|
#
|
||||||
|
# @!macro copy_options
|
||||||
|
#
|
||||||
|
# @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module
|
||||||
|
# @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function
|
||||||
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html java.util.concurrent.Future
|
||||||
|
class Future < IVar
|
||||||
|
|
||||||
|
# Create a new `Future` in the `:unscheduled` state.
|
||||||
|
#
|
||||||
|
# @yield the asynchronous operation to perform
|
||||||
|
#
|
||||||
|
# @!macro executor_and_deref_options
|
||||||
|
#
|
||||||
|
# @option opts [object, Array] :args zero or more arguments to be passed the task
|
||||||
|
# block on execution
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if no block is given
|
||||||
|
def initialize(opts = {}, &block)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
super(NULL, opts.merge(__task_from_block__: block), &nil)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Execute an `:unscheduled` `Future`. Immediately sets the state to `:pending` and
|
||||||
|
# passes the block to a new thread/thread pool for eventual execution.
|
||||||
|
# Does nothing if the `Future` is in any state other than `:unscheduled`.
|
||||||
|
#
|
||||||
|
# @return [Future] a reference to `self`
|
||||||
|
#
|
||||||
|
# @example Instance and execute in separate steps
|
||||||
|
# future = Concurrent::Future.new{ sleep(1); 42 }
|
||||||
|
# future.state #=> :unscheduled
|
||||||
|
# future.execute
|
||||||
|
# future.state #=> :pending
|
||||||
|
#
|
||||||
|
# @example Instance and execute in one line
|
||||||
|
# future = Concurrent::Future.new{ sleep(1); 42 }.execute
|
||||||
|
# future.state #=> :pending
|
||||||
|
def execute
|
||||||
|
if compare_and_set_state(:pending, :unscheduled)
|
||||||
|
@executor.post{ safe_execute(@task, @args) }
|
||||||
|
self
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Create a new `Future` object with the given block, execute it, and return the
|
||||||
|
# `:pending` object.
|
||||||
|
#
|
||||||
|
# @yield the asynchronous operation to perform
|
||||||
|
#
|
||||||
|
# @!macro executor_and_deref_options
|
||||||
|
#
|
||||||
|
# @option opts [object, Array] :args zero or more arguments to be passed the task
|
||||||
|
# block on execution
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] if no block is given
|
||||||
|
#
|
||||||
|
# @return [Future] the newly created `Future` in the `:pending` state
|
||||||
|
#
|
||||||
|
# @example
|
||||||
|
# future = Concurrent::Future.execute{ sleep(1); 42 }
|
||||||
|
# future.state #=> :pending
|
||||||
|
def self.execute(opts = {}, &block)
|
||||||
|
Future.new(opts, &block).execute
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro ivar_set_method
|
||||||
|
def set(value = NULL, &block)
|
||||||
|
check_for_block_or_value!(block_given?, value)
|
||||||
|
synchronize do
|
||||||
|
if @state != :unscheduled
|
||||||
|
raise MultipleAssignmentError
|
||||||
|
else
|
||||||
|
@task = block || Proc.new { value }
|
||||||
|
end
|
||||||
|
end
|
||||||
|
execute
|
||||||
|
end
|
||||||
|
|
||||||
|
# Attempt to cancel the operation if it has not already processed.
|
||||||
|
# The operation can only be cancelled while still `pending`. It cannot
|
||||||
|
# be cancelled once it has begun processing or has completed.
|
||||||
|
#
|
||||||
|
# @return [Boolean] was the operation successfully cancelled.
|
||||||
|
def cancel
|
||||||
|
if compare_and_set_state(:cancelled, :pending)
|
||||||
|
complete(false, nil, CancelledOperationError.new)
|
||||||
|
true
|
||||||
|
else
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Has the operation been successfully cancelled?
|
||||||
|
#
|
||||||
|
# @return [Boolean]
|
||||||
|
def cancelled?
|
||||||
|
state == :cancelled
|
||||||
|
end
|
||||||
|
|
||||||
|
# Wait the given number of seconds for the operation to complete.
|
||||||
|
# On timeout attempt to cancel the operation.
|
||||||
|
#
|
||||||
|
# @param [Numeric] timeout the maximum time in seconds to wait.
|
||||||
|
# @return [Boolean] true if the operation completed before the timeout
|
||||||
|
# else false
|
||||||
|
def wait_or_cancel(timeout)
|
||||||
|
wait(timeout)
|
||||||
|
if complete?
|
||||||
|
true
|
||||||
|
else
|
||||||
|
cancel
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
def ns_initialize(value, opts)
|
||||||
|
super
|
||||||
|
@state = :unscheduled
|
||||||
|
@task = opts[:__task_from_block__]
|
||||||
|
@executor = Options.executor_from_options(opts) || Concurrent.global_io_executor
|
||||||
|
@args = get_arguments_from(opts)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,52 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
|
require 'concurrent/thread_safe/util'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# @!macro concurrent_hash
|
||||||
|
#
|
||||||
|
# A thread-safe subclass of Hash. This version locks against the object
|
||||||
|
# itself for every method call, ensuring only one thread can be reading
|
||||||
|
# or writing at a time. This includes iteration methods like `#each`,
|
||||||
|
# which takes the lock repeatedly when reading an item.
|
||||||
|
#
|
||||||
|
# @see http://ruby-doc.org/core/Hash.html Ruby standard library `Hash`
|
||||||
|
|
||||||
|
# @!macro internal_implementation_note
|
||||||
|
HashImplementation = case
|
||||||
|
when Concurrent.on_cruby?
|
||||||
|
# Hash is not fully thread-safe on CRuby, see
|
||||||
|
# https://bugs.ruby-lang.org/issues/19237
|
||||||
|
# https://github.com/ruby/ruby/commit/ffd52412ab
|
||||||
|
# https://github.com/ruby-concurrency/concurrent-ruby/issues/929
|
||||||
|
# So we will need to add synchronization here (similar to Concurrent::Map).
|
||||||
|
::Hash
|
||||||
|
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
require 'jruby/synchronized'
|
||||||
|
|
||||||
|
class JRubyHash < ::Hash
|
||||||
|
include JRuby::Synchronized
|
||||||
|
end
|
||||||
|
JRubyHash
|
||||||
|
|
||||||
|
when Concurrent.on_truffleruby?
|
||||||
|
require 'concurrent/thread_safe/util/data_structures'
|
||||||
|
|
||||||
|
class TruffleRubyHash < ::Hash
|
||||||
|
end
|
||||||
|
|
||||||
|
ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyHash
|
||||||
|
TruffleRubyHash
|
||||||
|
|
||||||
|
else
|
||||||
|
warn 'Possibly unsupported Ruby implementation'
|
||||||
|
::Hash
|
||||||
|
end
|
||||||
|
private_constant :HashImplementation
|
||||||
|
|
||||||
|
# @!macro concurrent_hash
|
||||||
|
class Hash < HashImplementation
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
@ -0,0 +1,101 @@
|
|||||||
|
require 'concurrent/synchronization/abstract_struct'
|
||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# A thread-safe, immutable variation of Ruby's standard `Struct`.
|
||||||
|
#
|
||||||
|
# @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct`
|
||||||
|
module ImmutableStruct
|
||||||
|
include Synchronization::AbstractStruct
|
||||||
|
|
||||||
|
def self.included(base)
|
||||||
|
base.safe_initialization!
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro struct_values
|
||||||
|
def values
|
||||||
|
ns_values
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :to_a, :values
|
||||||
|
|
||||||
|
# @!macro struct_values_at
|
||||||
|
def values_at(*indexes)
|
||||||
|
ns_values_at(indexes)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro struct_inspect
|
||||||
|
def inspect
|
||||||
|
ns_inspect
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :to_s, :inspect
|
||||||
|
|
||||||
|
# @!macro struct_merge
|
||||||
|
def merge(other, &block)
|
||||||
|
ns_merge(other, &block)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro struct_to_h
|
||||||
|
def to_h
|
||||||
|
ns_to_h
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro struct_get
|
||||||
|
def [](member)
|
||||||
|
ns_get(member)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro struct_equality
|
||||||
|
def ==(other)
|
||||||
|
ns_equality(other)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro struct_each
|
||||||
|
def each(&block)
|
||||||
|
return enum_for(:each) unless block_given?
|
||||||
|
ns_each(&block)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro struct_each_pair
|
||||||
|
def each_pair(&block)
|
||||||
|
return enum_for(:each_pair) unless block_given?
|
||||||
|
ns_each_pair(&block)
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro struct_select
|
||||||
|
def select(&block)
|
||||||
|
return enum_for(:select) unless block_given?
|
||||||
|
ns_select(&block)
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def initialize_copy(original)
|
||||||
|
super(original)
|
||||||
|
ns_initialize_copy
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro struct_new
|
||||||
|
def self.new(*args, &block)
|
||||||
|
clazz_name = nil
|
||||||
|
if args.length == 0
|
||||||
|
raise ArgumentError.new('wrong number of arguments (0 for 1+)')
|
||||||
|
elsif args.length > 0 && args.first.is_a?(String)
|
||||||
|
clazz_name = args.shift
|
||||||
|
end
|
||||||
|
FACTORY.define_struct(clazz_name, args, &block)
|
||||||
|
end
|
||||||
|
|
||||||
|
FACTORY = Class.new(Synchronization::LockableObject) do
|
||||||
|
def define_struct(name, members, &block)
|
||||||
|
synchronize do
|
||||||
|
Synchronization::AbstractStruct.define_struct_class(ImmutableStruct, Synchronization::Object, name, members, &block)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end.new
|
||||||
|
private_constant :FACTORY
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,208 @@
|
|||||||
|
require 'concurrent/constants'
|
||||||
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/collection/copy_on_write_observer_set'
|
||||||
|
require 'concurrent/concern/obligation'
|
||||||
|
require 'concurrent/concern/observable'
|
||||||
|
require 'concurrent/executor/safe_task_executor'
|
||||||
|
require 'concurrent/synchronization/lockable_object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# An `IVar` is like a future that you can assign. As a future is a value that
|
||||||
|
# is being computed that you can wait on, an `IVar` is a value that is waiting
|
||||||
|
# to be assigned, that you can wait on. `IVars` are single assignment and
|
||||||
|
# deterministic.
|
||||||
|
#
|
||||||
|
# Then, express futures as an asynchronous computation that assigns an `IVar`.
|
||||||
|
# The `IVar` becomes the primitive on which [futures](Future) and
|
||||||
|
# [dataflow](Dataflow) are built.
|
||||||
|
#
|
||||||
|
# An `IVar` is a single-element container that is normally created empty, and
|
||||||
|
# can only be set once. The I in `IVar` stands for immutable. Reading an
|
||||||
|
# `IVar` normally blocks until it is set. It is safe to set and read an `IVar`
|
||||||
|
# from different threads.
|
||||||
|
#
|
||||||
|
# If you want to have some parallel task set the value in an `IVar`, you want
|
||||||
|
# a `Future`. If you want to create a graph of parallel tasks all executed
|
||||||
|
# when the values they depend on are ready you want `dataflow`. `IVar` is
|
||||||
|
# generally a low-level primitive.
|
||||||
|
#
|
||||||
|
# ## Examples
|
||||||
|
#
|
||||||
|
# Create, set and get an `IVar`
|
||||||
|
#
|
||||||
|
# ```ruby
|
||||||
|
# ivar = Concurrent::IVar.new
|
||||||
|
# ivar.set 14
|
||||||
|
# ivar.value #=> 14
|
||||||
|
# ivar.set 2 # would now be an error
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# ## See Also
|
||||||
|
#
|
||||||
|
# 1. For the theory: Arvind, R. Nikhil, and K. Pingali.
|
||||||
|
# [I-Structures: Data structures for parallel computing](http://dl.acm.org/citation.cfm?id=69562).
|
||||||
|
# In Proceedings of Workshop on Graph Reduction, 1986.
|
||||||
|
# 2. For recent application:
|
||||||
|
# [DataDrivenFuture in Habanero Java from Rice](http://www.cs.rice.edu/~vs3/hjlib/doc/edu/rice/hj/api/HjDataDrivenFuture.html).
|
||||||
|
class IVar < Synchronization::LockableObject
|
||||||
|
include Concern::Obligation
|
||||||
|
include Concern::Observable
|
||||||
|
|
||||||
|
# Create a new `IVar` in the `:pending` state with the (optional) initial value.
|
||||||
|
#
|
||||||
|
# @param [Object] value the initial value
|
||||||
|
# @param [Hash] opts the options to create a message with
|
||||||
|
# @option opts [String] :dup_on_deref (false) call `#dup` before returning
|
||||||
|
# the data
|
||||||
|
# @option opts [String] :freeze_on_deref (false) call `#freeze` before
|
||||||
|
# returning the data
|
||||||
|
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing
|
||||||
|
# the internal value and returning the value returned from the proc
|
||||||
|
def initialize(value = NULL, opts = {}, &block)
|
||||||
|
if value != NULL && block_given?
|
||||||
|
raise ArgumentError.new('provide only a value or a block')
|
||||||
|
end
|
||||||
|
super(&nil)
|
||||||
|
synchronize { ns_initialize(value, opts, &block) }
|
||||||
|
end
|
||||||
|
|
||||||
|
# Add an observer on this object that will receive notification on update.
|
||||||
|
#
|
||||||
|
# Upon completion the `IVar` will notify all observers in a thread-safe way.
|
||||||
|
# The `func` method of the observer will be called with three arguments: the
|
||||||
|
# `Time` at which the `Future` completed the asynchronous operation, the
|
||||||
|
# final `value` (or `nil` on rejection), and the final `reason` (or `nil` on
|
||||||
|
# fulfillment).
|
||||||
|
#
|
||||||
|
# @param [Object] observer the object that will be notified of changes
|
||||||
|
# @param [Symbol] func symbol naming the method to call when this
|
||||||
|
# `Observable` has changes`
|
||||||
|
def add_observer(observer = nil, func = :update, &block)
|
||||||
|
raise ArgumentError.new('cannot provide both an observer and a block') if observer && block
|
||||||
|
direct_notification = false
|
||||||
|
|
||||||
|
if block
|
||||||
|
observer = block
|
||||||
|
func = :call
|
||||||
|
end
|
||||||
|
|
||||||
|
synchronize do
|
||||||
|
if event.set?
|
||||||
|
direct_notification = true
|
||||||
|
else
|
||||||
|
observers.add_observer(observer, func)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
observer.send(func, Time.now, self.value, reason) if direct_notification
|
||||||
|
observer
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro ivar_set_method
|
||||||
|
# Set the `IVar` to a value and wake or notify all threads waiting on it.
|
||||||
|
#
|
||||||
|
# @!macro ivar_set_parameters_and_exceptions
|
||||||
|
# @param [Object] value the value to store in the `IVar`
|
||||||
|
# @yield A block operation to use for setting the value
|
||||||
|
# @raise [ArgumentError] if both a value and a block are given
|
||||||
|
# @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already
|
||||||
|
# been set or otherwise completed
|
||||||
|
#
|
||||||
|
# @return [IVar] self
|
||||||
|
def set(value = NULL)
|
||||||
|
check_for_block_or_value!(block_given?, value)
|
||||||
|
raise MultipleAssignmentError unless compare_and_set_state(:processing, :pending)
|
||||||
|
|
||||||
|
begin
|
||||||
|
value = yield if block_given?
|
||||||
|
complete_without_notification(true, value, nil)
|
||||||
|
rescue => ex
|
||||||
|
complete_without_notification(false, nil, ex)
|
||||||
|
end
|
||||||
|
|
||||||
|
notify_observers(self.value, reason)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!macro ivar_fail_method
|
||||||
|
# Set the `IVar` to failed due to some error and wake or notify all threads waiting on it.
|
||||||
|
#
|
||||||
|
# @param [Object] reason for the failure
|
||||||
|
# @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already
|
||||||
|
# been set or otherwise completed
|
||||||
|
# @return [IVar] self
|
||||||
|
def fail(reason = StandardError.new)
|
||||||
|
complete(false, nil, reason)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Attempt to set the `IVar` with the given value or block. Return a
|
||||||
|
# boolean indicating the success or failure of the set operation.
|
||||||
|
#
|
||||||
|
# @!macro ivar_set_parameters_and_exceptions
|
||||||
|
#
|
||||||
|
# @return [Boolean] true if the value was set else false
|
||||||
|
def try_set(value = NULL, &block)
|
||||||
|
set(value, &block)
|
||||||
|
true
|
||||||
|
rescue MultipleAssignmentError
|
||||||
|
false
|
||||||
|
end
|
||||||
|
|
||||||
|
protected
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_initialize(value, opts)
|
||||||
|
value = yield if block_given?
|
||||||
|
init_obligation
|
||||||
|
self.observers = Collection::CopyOnWriteObserverSet.new
|
||||||
|
set_deref_options(opts)
|
||||||
|
|
||||||
|
@state = :pending
|
||||||
|
if value != NULL
|
||||||
|
ns_complete_without_notification(true, value, nil)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def safe_execute(task, args = [])
|
||||||
|
if compare_and_set_state(:processing, :pending)
|
||||||
|
success, val, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args)
|
||||||
|
complete(success, val, reason)
|
||||||
|
yield(success, val, reason) if block_given?
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def complete(success, value, reason)
|
||||||
|
complete_without_notification(success, value, reason)
|
||||||
|
notify_observers(self.value, reason)
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def complete_without_notification(success, value, reason)
|
||||||
|
synchronize { ns_complete_without_notification(success, value, reason) }
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def notify_observers(value, reason)
|
||||||
|
observers.notify_and_delete_observers{ [Time.now, value, reason] }
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_complete_without_notification(success, value, reason)
|
||||||
|
raise MultipleAssignmentError if [:fulfilled, :rejected].include? @state
|
||||||
|
set_state(success, value, reason)
|
||||||
|
event.set
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def check_for_block_or_value!(block_given, value) # :nodoc:
|
||||||
|
if (block_given && value != NULL) || (! block_given && value == NULL)
|
||||||
|
raise ArgumentError.new('must set with either a value or a block')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,350 @@
|
|||||||
|
require 'thread'
|
||||||
|
require 'concurrent/constants'
|
||||||
|
require 'concurrent/utility/engine'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
# @!visibility private
|
||||||
|
module Collection
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
MapImplementation = case
|
||||||
|
when Concurrent.on_jruby?
|
||||||
|
require 'concurrent/utility/native_extension_loader'
|
||||||
|
# noinspection RubyResolve
|
||||||
|
JRubyMapBackend
|
||||||
|
when Concurrent.on_cruby?
|
||||||
|
require 'concurrent/collection/map/mri_map_backend'
|
||||||
|
MriMapBackend
|
||||||
|
when Concurrent.on_truffleruby?
|
||||||
|
if defined?(::TruffleRuby::ConcurrentMap)
|
||||||
|
require 'concurrent/collection/map/truffleruby_map_backend'
|
||||||
|
TruffleRubyMapBackend
|
||||||
|
else
|
||||||
|
require 'concurrent/collection/map/synchronized_map_backend'
|
||||||
|
SynchronizedMapBackend
|
||||||
|
end
|
||||||
|
else
|
||||||
|
warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation'
|
||||||
|
require 'concurrent/collection/map/synchronized_map_backend'
|
||||||
|
SynchronizedMapBackend
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# `Concurrent::Map` is a hash-like object and should have much better performance
|
||||||
|
# characteristics, especially under high concurrency, than `Concurrent::Hash`.
|
||||||
|
# However, `Concurrent::Map `is not strictly semantically equivalent to a ruby `Hash`
|
||||||
|
# -- for instance, it does not necessarily retain ordering by insertion time as `Hash`
|
||||||
|
# does. For most uses it should do fine though, and we recommend you consider
|
||||||
|
# `Concurrent::Map` instead of `Concurrent::Hash` for your concurrency-safe hash needs.
|
||||||
|
class Map < Collection::MapImplementation
|
||||||
|
|
||||||
|
# @!macro map.atomic_method
|
||||||
|
# This method is atomic.
|
||||||
|
|
||||||
|
# @!macro map.atomic_method_with_block
|
||||||
|
# This method is atomic.
|
||||||
|
# @note Atomic methods taking a block do not allow the `self` instance
|
||||||
|
# to be used within the block. Doing so will cause a deadlock.
|
||||||
|
|
||||||
|
# @!method []=(key, value)
|
||||||
|
# Set a value with key
|
||||||
|
# @param [Object] key
|
||||||
|
# @param [Object] value
|
||||||
|
# @return [Object] the new value
|
||||||
|
|
||||||
|
# @!method compute_if_absent(key)
|
||||||
|
# Compute and store new value for key if the key is absent.
|
||||||
|
# @param [Object] key
|
||||||
|
# @yield new value
|
||||||
|
# @yieldreturn [Object] new value
|
||||||
|
# @return [Object] new value or current value
|
||||||
|
# @!macro map.atomic_method_with_block
|
||||||
|
|
||||||
|
# @!method compute_if_present(key)
|
||||||
|
# Compute and store new value for key if the key is present.
|
||||||
|
# @param [Object] key
|
||||||
|
# @yield new value
|
||||||
|
# @yieldparam old_value [Object]
|
||||||
|
# @yieldreturn [Object, nil] new value, when nil the key is removed
|
||||||
|
# @return [Object, nil] new value or nil
|
||||||
|
# @!macro map.atomic_method_with_block
|
||||||
|
|
||||||
|
# @!method compute(key)
|
||||||
|
# Compute and store new value for key.
|
||||||
|
# @param [Object] key
|
||||||
|
# @yield compute new value from old one
|
||||||
|
# @yieldparam old_value [Object, nil] old_value, or nil when key is absent
|
||||||
|
# @yieldreturn [Object, nil] new value, when nil the key is removed
|
||||||
|
# @return [Object, nil] new value or nil
|
||||||
|
# @!macro map.atomic_method_with_block
|
||||||
|
|
||||||
|
# @!method merge_pair(key, value)
|
||||||
|
# If the key is absent, the value is stored, otherwise new value is
|
||||||
|
# computed with a block.
|
||||||
|
# @param [Object] key
|
||||||
|
# @param [Object] value
|
||||||
|
# @yield compute new value from old one
|
||||||
|
# @yieldparam old_value [Object] old value
|
||||||
|
# @yieldreturn [Object, nil] new value, when nil the key is removed
|
||||||
|
# @return [Object, nil] new value or nil
|
||||||
|
# @!macro map.atomic_method_with_block
|
||||||
|
|
||||||
|
# @!method replace_pair(key, old_value, new_value)
|
||||||
|
# Replaces old_value with new_value if key exists and current value
|
||||||
|
# matches old_value
|
||||||
|
# @param [Object] key
|
||||||
|
# @param [Object] old_value
|
||||||
|
# @param [Object] new_value
|
||||||
|
# @return [true, false] true if replaced
|
||||||
|
# @!macro map.atomic_method
|
||||||
|
|
||||||
|
# @!method replace_if_exists(key, new_value)
|
||||||
|
# Replaces current value with new_value if key exists
|
||||||
|
# @param [Object] key
|
||||||
|
# @param [Object] new_value
|
||||||
|
# @return [Object, nil] old value or nil
|
||||||
|
# @!macro map.atomic_method
|
||||||
|
|
||||||
|
# @!method get_and_set(key, value)
|
||||||
|
# Get the current value under key and set new value.
|
||||||
|
# @param [Object] key
|
||||||
|
# @param [Object] value
|
||||||
|
# @return [Object, nil] old value or nil when the key was absent
|
||||||
|
# @!macro map.atomic_method
|
||||||
|
|
||||||
|
# @!method delete(key)
|
||||||
|
# Delete key and its value.
|
||||||
|
# @param [Object] key
|
||||||
|
# @return [Object, nil] old value or nil when the key was absent
|
||||||
|
# @!macro map.atomic_method
|
||||||
|
|
||||||
|
# @!method delete_pair(key, value)
|
||||||
|
# Delete pair and its value if current value equals the provided value.
|
||||||
|
# @param [Object] key
|
||||||
|
# @param [Object] value
|
||||||
|
# @return [true, false] true if deleted
|
||||||
|
# @!macro map.atomic_method
|
||||||
|
|
||||||
|
# NonConcurrentMapBackend handles default_proc natively
|
||||||
|
unless defined?(Collection::NonConcurrentMapBackend) and self < Collection::NonConcurrentMapBackend
|
||||||
|
|
||||||
|
# @param [Hash, nil] options options to set the :initial_capacity or :load_factor. Ignored on some Rubies.
|
||||||
|
# @param [Proc] default_proc Optional block to compute the default value if the key is not set, like `Hash#default_proc`
|
||||||
|
def initialize(options = nil, &default_proc)
|
||||||
|
if options.kind_of?(::Hash)
|
||||||
|
validate_options_hash!(options)
|
||||||
|
else
|
||||||
|
options = nil
|
||||||
|
end
|
||||||
|
|
||||||
|
super(options)
|
||||||
|
@default_proc = default_proc
|
||||||
|
end
|
||||||
|
|
||||||
|
# Get a value with key
|
||||||
|
# @param [Object] key
|
||||||
|
# @return [Object] the value
|
||||||
|
def [](key)
|
||||||
|
if value = super # non-falsy value is an existing mapping, return it right away
|
||||||
|
value
|
||||||
|
# re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call
|
||||||
|
# a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value
|
||||||
|
# would be returned)
|
||||||
|
# note: nil == value check is not technically necessary
|
||||||
|
elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL))
|
||||||
|
@default_proc.call(self, key)
|
||||||
|
else
|
||||||
|
value
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :get, :[]
|
||||||
|
alias_method :put, :[]=
|
||||||
|
|
||||||
|
# Get a value with key, or default_value when key is absent,
|
||||||
|
# or fail when no default_value is given.
|
||||||
|
# @param [Object] key
|
||||||
|
# @param [Object] default_value
|
||||||
|
# @yield default value for a key
|
||||||
|
# @yieldparam key [Object]
|
||||||
|
# @yieldreturn [Object] default value
|
||||||
|
# @return [Object] the value or default value
|
||||||
|
# @raise [KeyError] when key is missing and no default_value is provided
|
||||||
|
# @!macro map_method_not_atomic
|
||||||
|
# @note The "fetch-then-act" methods of `Map` are not atomic. `Map` is intended
|
||||||
|
# to be use as a concurrency primitive with strong happens-before
|
||||||
|
# guarantees. It is not intended to be used as a high-level abstraction
|
||||||
|
# supporting complex operations. All read and write operations are
|
||||||
|
# thread safe, but no guarantees are made regarding race conditions
|
||||||
|
# between the fetch operation and yielding to the block. Additionally,
|
||||||
|
# this method does not support recursion. This is due to internal
|
||||||
|
# constraints that are very unlikely to change in the near future.
|
||||||
|
def fetch(key, default_value = NULL)
|
||||||
|
if NULL != (value = get_or_default(key, NULL))
|
||||||
|
value
|
||||||
|
elsif block_given?
|
||||||
|
yield key
|
||||||
|
elsif NULL != default_value
|
||||||
|
default_value
|
||||||
|
else
|
||||||
|
raise_fetch_no_key
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Fetch value with key, or store default value when key is absent,
|
||||||
|
# or fail when no default_value is given. This is a two step operation,
|
||||||
|
# therefore not atomic. The store can overwrite other concurrently
|
||||||
|
# stored value.
|
||||||
|
# @param [Object] key
|
||||||
|
# @param [Object] default_value
|
||||||
|
# @yield default value for a key
|
||||||
|
# @yieldparam key [Object]
|
||||||
|
# @yieldreturn [Object] default value
|
||||||
|
# @return [Object] the value or default value
|
||||||
|
def fetch_or_store(key, default_value = NULL)
|
||||||
|
fetch(key) do
|
||||||
|
put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Insert value into map with key if key is absent in one atomic step.
|
||||||
|
# @param [Object] key
|
||||||
|
# @param [Object] value
|
||||||
|
# @return [Object, nil] the previous value when key was present or nil when there was no key
|
||||||
|
def put_if_absent(key, value)
|
||||||
|
computed = false
|
||||||
|
result = compute_if_absent(key) do
|
||||||
|
computed = true
|
||||||
|
value
|
||||||
|
end
|
||||||
|
computed ? nil : result
|
||||||
|
end unless method_defined?(:put_if_absent)
|
||||||
|
|
||||||
|
# Is the value stored in the map. Iterates over all values.
|
||||||
|
# @param [Object] value
|
||||||
|
# @return [true, false]
|
||||||
|
def value?(value)
|
||||||
|
each_value do |v|
|
||||||
|
return true if value.equal?(v)
|
||||||
|
end
|
||||||
|
false
|
||||||
|
end
|
||||||
|
|
||||||
|
# All keys
|
||||||
|
# @return [::Array<Object>] keys
|
||||||
|
def keys
|
||||||
|
arr = []
|
||||||
|
each_pair { |k, v| arr << k }
|
||||||
|
arr
|
||||||
|
end unless method_defined?(:keys)
|
||||||
|
|
||||||
|
# All values
|
||||||
|
# @return [::Array<Object>] values
|
||||||
|
def values
|
||||||
|
arr = []
|
||||||
|
each_pair { |k, v| arr << v }
|
||||||
|
arr
|
||||||
|
end unless method_defined?(:values)
|
||||||
|
|
||||||
|
# Iterates over each key.
|
||||||
|
# @yield for each key in the map
|
||||||
|
# @yieldparam key [Object]
|
||||||
|
# @return [self]
|
||||||
|
# @!macro map.atomic_method_with_block
|
||||||
|
def each_key
|
||||||
|
each_pair { |k, v| yield k }
|
||||||
|
end unless method_defined?(:each_key)
|
||||||
|
|
||||||
|
# Iterates over each value.
|
||||||
|
# @yield for each value in the map
|
||||||
|
# @yieldparam value [Object]
|
||||||
|
# @return [self]
|
||||||
|
# @!macro map.atomic_method_with_block
|
||||||
|
def each_value
|
||||||
|
each_pair { |k, v| yield v }
|
||||||
|
end unless method_defined?(:each_value)
|
||||||
|
|
||||||
|
# Iterates over each key value pair.
|
||||||
|
# @yield for each key value pair in the map
|
||||||
|
# @yieldparam key [Object]
|
||||||
|
# @yieldparam value [Object]
|
||||||
|
# @return [self]
|
||||||
|
# @!macro map.atomic_method_with_block
|
||||||
|
def each_pair
|
||||||
|
return enum_for :each_pair unless block_given?
|
||||||
|
super
|
||||||
|
end
|
||||||
|
|
||||||
|
alias_method :each, :each_pair unless method_defined?(:each)
|
||||||
|
|
||||||
|
# Find key of a value.
|
||||||
|
# @param [Object] value
|
||||||
|
# @return [Object, nil] key or nil when not found
|
||||||
|
def key(value)
|
||||||
|
each_pair { |k, v| return k if v == value }
|
||||||
|
nil
|
||||||
|
end unless method_defined?(:key)
|
||||||
|
|
||||||
|
# Is map empty?
|
||||||
|
# @return [true, false]
|
||||||
|
def empty?
|
||||||
|
each_pair { |k, v| return false }
|
||||||
|
true
|
||||||
|
end unless method_defined?(:empty?)
|
||||||
|
|
||||||
|
# The size of map.
|
||||||
|
# @return [Integer] size
|
||||||
|
def size
|
||||||
|
count = 0
|
||||||
|
each_pair { |k, v| count += 1 }
|
||||||
|
count
|
||||||
|
end unless method_defined?(:size)
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def marshal_dump
|
||||||
|
raise TypeError, "can't dump hash with default proc" if @default_proc
|
||||||
|
h = {}
|
||||||
|
each_pair { |k, v| h[k] = v }
|
||||||
|
h
|
||||||
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def marshal_load(hash)
|
||||||
|
initialize
|
||||||
|
populate_from(hash)
|
||||||
|
end
|
||||||
|
|
||||||
|
undef :freeze
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def inspect
|
||||||
|
format '%s entries=%d default_proc=%s>', to_s[0..-2], size.to_s, @default_proc.inspect
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
def raise_fetch_no_key
|
||||||
|
raise KeyError, 'key not found'
|
||||||
|
end
|
||||||
|
|
||||||
|
def initialize_copy(other)
|
||||||
|
super
|
||||||
|
populate_from(other)
|
||||||
|
end
|
||||||
|
|
||||||
|
def populate_from(hash)
|
||||||
|
hash.each_pair { |k, v| self[k] = v }
|
||||||
|
self
|
||||||
|
end
|
||||||
|
|
||||||
|
def validate_options_hash!(options)
|
||||||
|
if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0)
|
||||||
|
raise ArgumentError, ":initial_capacity must be a positive Integer"
|
||||||
|
end
|
||||||
|
if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1)
|
||||||
|
raise ArgumentError, ":load_factor must be a number between 0 and 1"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
@ -0,0 +1,229 @@
|
|||||||
|
require 'concurrent/synchronization/object'
|
||||||
|
|
||||||
|
module Concurrent
|
||||||
|
|
||||||
|
# A `Maybe` encapsulates an optional value. A `Maybe` either contains a value
|
||||||
|
# of (represented as `Just`), or it is empty (represented as `Nothing`). Using
|
||||||
|
# `Maybe` is a good way to deal with errors or exceptional cases without
|
||||||
|
# resorting to drastic measures such as exceptions.
|
||||||
|
#
|
||||||
|
# `Maybe` is a replacement for the use of `nil` with better type checking.
|
||||||
|
#
|
||||||
|
# For compatibility with {Concurrent::Concern::Obligation} the predicate and
|
||||||
|
# accessor methods are aliased as `fulfilled?`, `rejected?`, `value`, and
|
||||||
|
# `reason`.
|
||||||
|
#
|
||||||
|
# ## Motivation
|
||||||
|
#
|
||||||
|
# A common pattern in languages with pattern matching, such as Erlang and
|
||||||
|
# Haskell, is to return *either* a value *or* an error from a function
|
||||||
|
# Consider this Erlang code:
|
||||||
|
#
|
||||||
|
# ```erlang
|
||||||
|
# case file:consult("data.dat") of
|
||||||
|
# {ok, Terms} -> do_something_useful(Terms);
|
||||||
|
# {error, Reason} -> lager:error(Reason)
|
||||||
|
# end.
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# In this example the standard library function `file:consult` returns a
|
||||||
|
# [tuple](http://erlang.org/doc/reference_manual/data_types.html#id69044)
|
||||||
|
# with two elements: an [atom](http://erlang.org/doc/reference_manual/data_types.html#id64134)
|
||||||
|
# (similar to a ruby symbol) and a variable containing ancillary data. On
|
||||||
|
# success it returns the atom `ok` and the data from the file. On failure it
|
||||||
|
# returns `error` and a string with an explanation of the problem. With this
|
||||||
|
# pattern there is no ambiguity regarding success or failure. If the file is
|
||||||
|
# empty the return value cannot be misinterpreted as an error. And when an
|
||||||
|
# error occurs the return value provides useful information.
|
||||||
|
#
|
||||||
|
# In Ruby we tend to return `nil` when an error occurs or else we raise an
|
||||||
|
# exception. Both of these idioms are problematic. Returning `nil` is
|
||||||
|
# ambiguous because `nil` may also be a valid value. It also lacks
|
||||||
|
# information pertaining to the nature of the error. Raising an exception
|
||||||
|
# is both expensive and usurps the normal flow of control. All of these
|
||||||
|
# problems can be solved with the use of a `Maybe`.
|
||||||
|
#
|
||||||
|
# A `Maybe` is unambiguous with regard to whether or not it contains a value.
|
||||||
|
# When `Just` it contains a value, when `Nothing` it does not. When `Just`
|
||||||
|
# the value it contains may be `nil`, which is perfectly valid. When
|
||||||
|
# `Nothing` the reason for the lack of a value is contained as well. The
|
||||||
|
# previous Erlang example can be duplicated in Ruby in a principled way by
|
||||||
|
# having functions return `Maybe` objects:
|
||||||
|
#
|
||||||
|
# ```ruby
|
||||||
|
# result = MyFileUtils.consult("data.dat") # returns a Maybe
|
||||||
|
# if result.just?
|
||||||
|
# do_something_useful(result.value) # or result.just
|
||||||
|
# else
|
||||||
|
# logger.error(result.reason) # or result.nothing
|
||||||
|
# end
|
||||||
|
# ```
|
||||||
|
#
|
||||||
|
# @example Returning a Maybe from a Function
|
||||||
|
# module MyFileUtils
|
||||||
|
# def self.consult(path)
|
||||||
|
# file = File.open(path, 'r')
|
||||||
|
# Concurrent::Maybe.just(file.read)
|
||||||
|
# rescue => ex
|
||||||
|
# return Concurrent::Maybe.nothing(ex)
|
||||||
|
# ensure
|
||||||
|
# file.close if file
|
||||||
|
# end
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# maybe = MyFileUtils.consult('bogus.file')
|
||||||
|
# maybe.just? #=> false
|
||||||
|
# maybe.nothing? #=> true
|
||||||
|
# maybe.reason #=> #<Errno::ENOENT: No such file or directory @ rb_sysopen - bogus.file>
|
||||||
|
#
|
||||||
|
# maybe = MyFileUtils.consult('README.md')
|
||||||
|
# maybe.just? #=> true
|
||||||
|
# maybe.nothing? #=> false
|
||||||
|
# maybe.value #=> "# Concurrent Ruby\n[![Gem Version..."
|
||||||
|
#
|
||||||
|
# @example Using Maybe with a Block
|
||||||
|
# result = Concurrent::Maybe.from do
|
||||||
|
# Client.find(10) # Client is an ActiveRecord model
|
||||||
|
# end
|
||||||
|
#
|
||||||
|
# # -- if the record was found
|
||||||
|
# result.just? #=> true
|
||||||
|
# result.value #=> #<Client id: 10, first_name: "Ryan">
|
||||||
|
#
|
||||||
|
# # -- if the record was not found
|
||||||
|
# result.just? #=> false
|
||||||
|
# result.reason #=> ActiveRecord::RecordNotFound
|
||||||
|
#
|
||||||
|
# @example Using Maybe with the Null Object Pattern
|
||||||
|
# # In a Rails controller...
|
||||||
|
# result = ClientService.new(10).find # returns a Maybe
|
||||||
|
# render json: result.or(NullClient.new)
|
||||||
|
#
|
||||||
|
# @see https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html Haskell Data.Maybe
|
||||||
|
# @see https://github.com/purescript/purescript-maybe/blob/master/docs/Data.Maybe.md PureScript Data.Maybe
|
||||||
|
class Maybe < Synchronization::Object
|
||||||
|
include Comparable
|
||||||
|
safe_initialization!
|
||||||
|
|
||||||
|
# Indicates that the given attribute has not been set.
|
||||||
|
# When `Just` the {#nothing} getter will return `NONE`.
|
||||||
|
# When `Nothing` the {#just} getter will return `NONE`.
|
||||||
|
NONE = ::Object.new.freeze
|
||||||
|
|
||||||
|
# The value of a `Maybe` when `Just`. Will be `NONE` when `Nothing`.
|
||||||
|
attr_reader :just
|
||||||
|
|
||||||
|
# The reason for the `Maybe` when `Nothing`. Will be `NONE` when `Just`.
|
||||||
|
attr_reader :nothing
|
||||||
|
|
||||||
|
private_class_method :new
|
||||||
|
|
||||||
|
# Create a new `Maybe` using the given block.
|
||||||
|
#
|
||||||
|
# Runs the given block passing all function arguments to the block as block
|
||||||
|
# arguments. If the block runs to completion without raising an exception
|
||||||
|
# a new `Just` is created with the value set to the return value of the
|
||||||
|
# block. If the block raises an exception a new `Nothing` is created with
|
||||||
|
# the reason being set to the raised exception.
|
||||||
|
#
|
||||||
|
# @param [Array<Object>] args Zero or more arguments to pass to the block.
|
||||||
|
# @yield The block from which to create a new `Maybe`.
|
||||||
|
# @yieldparam [Array<Object>] args Zero or more block arguments passed as
|
||||||
|
# arguments to the function.
|
||||||
|
#
|
||||||
|
# @return [Maybe] The newly created object.
|
||||||
|
#
|
||||||
|
# @raise [ArgumentError] when no block given.
|
||||||
|
def self.from(*args)
|
||||||
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
|
begin
|
||||||
|
value = yield(*args)
|
||||||
|
return new(value, NONE)
|
||||||
|
rescue => ex
|
||||||
|
return new(NONE, ex)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Create a new `Just` with the given value.
|
||||||
|
#
|
||||||
|
# @param [Object] value The value to set for the new `Maybe` object.
|
||||||
|
#
|
||||||
|
# @return [Maybe] The newly created object.
|
||||||
|
def self.just(value)
|
||||||
|
return new(value, NONE)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Create a new `Nothing` with the given (optional) reason.
|
||||||
|
#
|
||||||
|
# @param [Exception] error The reason to set for the new `Maybe` object.
|
||||||
|
# When given a string a new `StandardError` will be created with the
|
||||||
|
# argument as the message. When no argument is given a new
|
||||||
|
# `StandardError` with an empty message will be created.
|
||||||
|
#
|
||||||
|
# @return [Maybe] The newly created object.
|
||||||
|
def self.nothing(error = '')
|
||||||
|
if error.is_a?(Exception)
|
||||||
|
nothing = error
|
||||||
|
else
|
||||||
|
nothing = StandardError.new(error.to_s)
|
||||||
|
end
|
||||||
|
return new(NONE, nothing)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Is this `Maybe` a `Just` (successfully fulfilled with a value)?
|
||||||
|
#
|
||||||
|
# @return [Boolean] True if `Just` or false if `Nothing`.
|
||||||
|
def just?
|
||||||
|
! nothing?
|
||||||
|
end
|
||||||
|
alias :fulfilled? :just?
|
||||||
|
|
||||||
|
# Is this `Maybe` a `nothing` (rejected with an exception upon fulfillment)?
|
||||||
|
#
|
||||||
|
# @return [Boolean] True if `Nothing` or false if `Just`.
|
||||||
|
def nothing?
|
||||||
|
@nothing != NONE
|
||||||
|
end
|
||||||
|
alias :rejected? :nothing?
|
||||||
|
|
||||||
|
alias :value :just
|
||||||
|
|
||||||
|
alias :reason :nothing
|
||||||
|
|
||||||
|
# Comparison operator.
|
||||||
|
#
|
||||||
|
# @return [Integer] 0 if self and other are both `Nothing`;
|
||||||
|
# -1 if self is `Nothing` and other is `Just`;
|
||||||
|
# 1 if self is `Just` and other is nothing;
|
||||||
|
# `self.just <=> other.just` if both self and other are `Just`.
|
||||||
|
def <=>(other)
|
||||||
|
if nothing?
|
||||||
|
other.nothing? ? 0 : -1
|
||||||
|
else
|
||||||
|
other.nothing? ? 1 : just <=> other.just
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Return either the value of self or the given default value.
|
||||||
|
#
|
||||||
|
# @return [Object] The value of self when `Just`; else the given default.
|
||||||
|
def or(other)
|
||||||
|
just? ? just : other
|
||||||
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# Create a new `Maybe` with the given attributes.
|
||||||
|
#
|
||||||
|
# @param [Object] just The value when `Just` else `NONE`.
|
||||||
|
# @param [Exception, Object] nothing The exception when `Nothing` else `NONE`.
|
||||||
|
#
|
||||||
|
# @return [Maybe] The new `Maybe`.
|
||||||
|
#
|
||||||
|
# @!visibility private
|
||||||
|
def initialize(just, nothing)
|
||||||
|
@just = just
|
||||||
|
@nothing = nothing
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user