Vendor concurrent-ruby 1.1.3.

This commit is contained in:
Mike McQuaid 2018-12-05 16:32:24 +00:00
parent 2110641b4c
commit 9347c0272a
No known key found for this signature in database
GPG Key ID: 48A898132FD8EE70
126 changed files with 15964 additions and 405 deletions

View File

@ -3,7 +3,7 @@ require 'rbconfig'
ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
ruby_version = RbConfig::CONFIG["ruby_version"]
path = File.expand_path('..', __FILE__)
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.0.5/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.3/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/i18n-1.1.1/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/minitest-5.11.3/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/thread_safe-0.3.6/lib"
@ -21,6 +21,6 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/powerpack-0.1.2/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rainbow-3.0.0/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-progressbar-1.10.0/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/unicode-display_width-1.4.0/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-0.60.0/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-0.61.0/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-rspec-1.30.1/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-macho-2.1.0/lib"

View File

@ -1,240 +0,0 @@
require 'thread'
require 'concurrent/constants'
require 'concurrent/synchronization'
module Concurrent
# @!visibility private
module Collection
# @!visibility private
MapImplementation = if Concurrent.java_extensions_loaded?
# noinspection RubyResolve
JRubyMapBackend
elsif defined?(RUBY_ENGINE)
case RUBY_ENGINE
when 'ruby'
require 'concurrent/collection/map/mri_map_backend'
MriMapBackend
when 'rbx'
require 'concurrent/collection/map/atomic_reference_map_backend'
AtomicReferenceMapBackend
when 'jruby+truffle'
require 'concurrent/collection/map/atomic_reference_map_backend'
AtomicReferenceMapBackend
else
warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation' if $VERBOSE
require 'concurrent/collection/map/synchronized_map_backend'
SynchronizedMapBackend
end
else
MriMapBackend
end
end
# `Concurrent::Map` is a hash-like object and should have much better performance
# characteristics, especially under high concurrency, than `Concurrent::Hash`.
# However, `Concurrent::Map `is not strictly semantically equivalent to a ruby `Hash`
# -- for instance, it does not necessarily retain ordering by insertion time as `Hash`
# does. For most uses it should do fine though, and we recommend you consider
# `Concurrent::Map` instead of `Concurrent::Hash` for your concurrency-safe hash needs.
#
# > require 'concurrent'
# >
# > map = Concurrent::Map.new
class Map < Collection::MapImplementation
# @!macro [new] map_method_is_atomic
# This method is atomic. Atomic methods of `Map` which accept a block
# do not allow the `self` instance to be used within the block. Doing
# so will cause a deadlock.
# @!method put_if_absent
# @!macro map_method_is_atomic
# @!method compute_if_absent
# @!macro map_method_is_atomic
# @!method compute_if_present
# @!macro map_method_is_atomic
# @!method compute
# @!macro map_method_is_atomic
# @!method merge_pair
# @!macro map_method_is_atomic
# @!method replace_pair
# @!macro map_method_is_atomic
# @!method replace_if_exists
# @!macro map_method_is_atomic
# @!method get_and_set
# @!macro map_method_is_atomic
# @!method delete
# @!macro map_method_is_atomic
# @!method delete_pair
# @!macro map_method_is_atomic
def initialize(options = nil, &block)
if options.kind_of?(::Hash)
validate_options_hash!(options)
else
options = nil
end
super(options)
@default_proc = block
end
def [](key)
if value = super # non-falsy value is an existing mapping, return it right away
value
# re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call
# a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value
# would be returned)
# note: nil == value check is not technically necessary
elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL))
@default_proc.call(self, key)
else
value
end
end
alias_method :get, :[]
alias_method :put, :[]=
# @!macro [attach] map_method_not_atomic
# The "fetch-then-act" methods of `Map` are not atomic. `Map` is intended
# to be use as a concurrency primitive with strong happens-before
# guarantees. It is not intended to be used as a high-level abstraction
# supporting complex operations. All read and write operations are
# thread safe, but no guarantees are made regarding race conditions
# between the fetch operation and yielding to the block. Additionally,
# this method does not support recursion. This is due to internal
# constraints that are very unlikely to change in the near future.
def fetch(key, default_value = NULL)
if NULL != (value = get_or_default(key, NULL))
value
elsif block_given?
yield key
elsif NULL != default_value
default_value
else
raise_fetch_no_key
end
end
# @!macro map_method_not_atomic
def fetch_or_store(key, default_value = NULL)
fetch(key) do
put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value))
end
end
# @!macro map_method_is_atomic
def put_if_absent(key, value)
computed = false
result = compute_if_absent(key) do
computed = true
value
end
computed ? nil : result
end unless method_defined?(:put_if_absent)
def value?(value)
each_value do |v|
return true if value.equal?(v)
end
false
end
def keys
arr = []
each_pair {|k, v| arr << k}
arr
end unless method_defined?(:keys)
def values
arr = []
each_pair {|k, v| arr << v}
arr
end unless method_defined?(:values)
def each_key
each_pair {|k, v| yield k}
end unless method_defined?(:each_key)
def each_value
each_pair {|k, v| yield v}
end unless method_defined?(:each_value)
alias_method :each, :each_pair unless method_defined?(:each)
def key(value)
each_pair {|k, v| return k if v == value}
nil
end unless method_defined?(:key)
alias_method :index, :key if RUBY_VERSION < '1.9'
def empty?
each_pair {|k, v| return false}
true
end unless method_defined?(:empty?)
def size
count = 0
each_pair {|k, v| count += 1}
count
end unless method_defined?(:size)
def marshal_dump
raise TypeError, "can't dump hash with default proc" if @default_proc
h = {}
each_pair {|k, v| h[k] = v}
h
end
def marshal_load(hash)
initialize
populate_from(hash)
end
undef :freeze
# @!visibility private
DEFAULT_OBJ_ID_STR_WIDTH = 0.size == 4 ? 7 : 14 # we want to look "native", 7 for 32-bit, 14 for 64-bit
# override default #inspect() method: firstly, we don't want to be spilling our guts (i-vars), secondly, MRI backend's
# #inspect() call on its @backend i-var will bump @backend's iter level while possibly yielding GVL
def inspect
id_str = (object_id << 1).to_s(16).rjust(DEFAULT_OBJ_ID_STR_WIDTH, '0')
"#<#{self.class.name}:0x#{id_str} entries=#{size} default_proc=#{@default_proc.inspect}>"
end
private
def raise_fetch_no_key
raise KeyError, 'key not found'
end
def initialize_copy(other)
super
populate_from(other)
end
def populate_from(hash)
hash.each_pair {|k, v| self[k] = v}
self
end
def validate_options_hash!(options)
if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0)
raise ArgumentError, ":initial_capacity must be a positive Integer"
end
if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1)
raise ArgumentError, ":load_factor must be a number between 0 and 1"
end
end
end
end

View File

@ -1,9 +0,0 @@
module Concurrent
module Synchronization
class TruffleLockableObject < AbstractLockableObject
def new(*)
raise NotImplementedError
end
end
end
end

View File

@ -1,31 +0,0 @@
module Concurrent
module Synchronization
module TruffleAttrVolatile
def self.included(base)
base.extend(ClassMethods)
end
module ClassMethods
def attr_volatile(*names)
# TODO may not always be available
attr_atomic(*names)
end
end
def full_memory_barrier
Truffle::System.full_memory_barrier
end
end
# @!visibility private
# @!macro internal_implementation_note
class TruffleObject < AbstractObject
include TruffleAttrVolatile
def initialize
# nothing to do
end
end
end
end

View File

@ -1,73 +0,0 @@
require 'concurrent/utility/engine'
module Concurrent
module Utility
# @!visibility private
module NativeExtensionLoader
def allow_c_extensions?
Concurrent.on_cruby?
end
def c_extensions_loaded?
@c_extensions_loaded ||= false
end
def java_extensions_loaded?
@java_extensions_loaded ||= false
end
def set_c_extensions_loaded
@c_extensions_loaded = true
end
def set_java_extensions_loaded
@java_extensions_loaded = true
end
def load_native_extensions
unless defined? Synchronization::AbstractObject
raise 'native_extension_loader loaded before Synchronization::AbstractObject'
end
if Concurrent.on_cruby? && !c_extensions_loaded?
tries = [
lambda do
require 'concurrent/extension'
set_c_extensions_loaded
end,
lambda do
# may be a Windows cross-compiled native gem
require "concurrent/#{RUBY_VERSION[0..2]}/extension"
set_c_extensions_loaded
end]
tries.each do |try|
begin
try.call
break
rescue LoadError
next
end
end
end
if Concurrent.on_jruby? && !java_extensions_loaded?
begin
require 'concurrent_ruby_ext'
set_java_extensions_loaded
rescue LoadError
# move on with pure-Ruby implementations
raise 'On JRuby but Java extensions failed to load.'
end
end
end
end
end
# @!visibility private
extend Utility::NativeExtensionLoader
end

View File

@ -0,0 +1 @@
require_relative "./concurrent"

View File

@ -0,0 +1,134 @@
require 'concurrent/version'
require 'concurrent/constants'
require 'concurrent/errors'
require 'concurrent/configuration'
require 'concurrent/atomics'
require 'concurrent/executors'
require 'concurrent/synchronization'
require 'concurrent/atomic/atomic_markable_reference'
require 'concurrent/atomic/atomic_reference'
require 'concurrent/agent'
require 'concurrent/atom'
require 'concurrent/array'
require 'concurrent/hash'
require 'concurrent/set'
require 'concurrent/map'
require 'concurrent/tuple'
require 'concurrent/async'
require 'concurrent/dataflow'
require 'concurrent/delay'
require 'concurrent/exchanger'
require 'concurrent/future'
require 'concurrent/immutable_struct'
require 'concurrent/ivar'
require 'concurrent/maybe'
require 'concurrent/mutable_struct'
require 'concurrent/mvar'
require 'concurrent/promise'
require 'concurrent/scheduled_task'
require 'concurrent/settable_struct'
require 'concurrent/timer_task'
require 'concurrent/tvar'
require 'concurrent/promises'
require 'concurrent/thread_safe/synchronized_delegator'
require 'concurrent/thread_safe/util'
require 'concurrent/options'
# @!macro internal_implementation_note
#
# @note **Private Implementation:** This abstraction is a private, internal
# implementation detail. It should never be used directly.
# @!macro monotonic_clock_warning
#
# @note Time calculations on all platforms and languages are sensitive to
# changes to the system clock. To alleviate the potential problems
# associated with changing the system clock while an application is running,
# most modern operating systems provide a monotonic clock that operates
# independently of the system clock. A monotonic clock cannot be used to
# determine human-friendly clock times. A monotonic clock is used exclusively
# for calculating time intervals. Not all Ruby platforms provide access to an
# operating system monotonic clock. On these platforms a pure-Ruby monotonic
# clock will be used as a fallback. An operating system monotonic clock is both
# faster and more reliable than the pure-Ruby implementation. The pure-Ruby
# implementation should be fast and reliable enough for most non-realtime
# operations. At this time the common Ruby platforms that provide access to an
# operating system monotonic clock are MRI 2.1 and above and JRuby (all versions).
#
# @see http://linux.die.net/man/3/clock_gettime Linux clock_gettime(3)
# @!macro copy_options
#
# ## Copy Options
#
# Object references in Ruby are mutable. This can lead to serious
# problems when the {#value} of an object is a mutable reference. Which
# is always the case unless the value is a `Fixnum`, `Symbol`, or similar
# "primitive" data type. Each instance can be configured with a few
# options that can help protect the program from potentially dangerous
# operations. Each of these options can be optionally set when the object
# instance is created:
#
# * `:dup_on_deref` When true the object will call the `#dup` method on
# the `value` object every time the `#value` method is called
# (default: false)
# * `:freeze_on_deref` When true the object will call the `#freeze`
# method on the `value` object every time the `#value` method is called
# (default: false)
# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run
# every time the `#value` method is called. The `Proc` will be given
# the current `value` as its only argument and the result returned by
# the block will be the return value of the `#value` call. When `nil`
# this option will be ignored (default: nil)
#
# When multiple deref options are set the order of operations is strictly defined.
# The order of deref operations is:
# * `:copy_on_deref`
# * `:dup_on_deref`
# * `:freeze_on_deref`
#
# Because of this ordering there is no need to `#freeze` an object created by a
# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`.
# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is
# as close to the behavior of a "pure" functional language (like Erlang, Clojure,
# or Haskell) as we are likely to get in Ruby.
# @!macro deref_options
#
# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before
# returning the data from {#value}
# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before
# returning the data from {#value}
# @option opts [Proc] :copy_on_deref (nil) When calling the {#value}
# method, call the given proc passing the internal value as the sole
# argument then return the new value returned from the proc.
# @!macro executor_and_deref_options
#
# @param [Hash] opts the options used to define the behavior at update and deref
# and to specify the executor on which to perform actions
# @option opts [Executor] :executor when set use the given `Executor` instance.
# Three special values are also supported: `:io` returns the global pool for
# long, blocking (IO) tasks, `:fast` returns the global pool for short, fast
# operations, and `:immediate` returns the global `ImmediateExecutor` object.
# @!macro deref_options
# @!macro warn.edge
# @api Edge
# @note **Edge Features** are under active development and may change frequently.
#
# - Deprecations are not added before incompatible changes.
# - Edge version: _major_ is always 0, _minor_ bump means incompatible change,
# _patch_ bump means compatible change.
# - Edge features may also lack tests and documentation.
# - Features developed in `concurrent-ruby-edge` are expected to move
# to `concurrent-ruby` when finalised.
# {include:file:README.md}
module Concurrent
end

View File

@ -0,0 +1,587 @@
require 'concurrent/configuration'
require 'concurrent/atomic/atomic_reference'
require 'concurrent/atomic/thread_local_var'
require 'concurrent/collection/copy_on_write_observer_set'
require 'concurrent/concern/observable'
require 'concurrent/synchronization'
module Concurrent
# `Agent` is inspired by Clojure's [agent](http://clojure.org/agents)
# function. An agent is a shared, mutable variable providing independent,
# uncoordinated, *asynchronous* change of individual values. Best used when
# the value will undergo frequent, complex updates. Suitable when the result
# of an update does not need to be known immediately. `Agent` is (mostly)
# functionally equivalent to Clojure's agent, except where the runtime
# prevents parity.
#
# Agents are reactive, not autonomous - there is no imperative message loop
# and no blocking receive. The state of an Agent should be itself immutable
# and the `#value` of an Agent is always immediately available for reading by
# any thread without any messages, i.e. observation does not require
# cooperation or coordination.
#
# Agent action dispatches are made using the various `#send` methods. These
# methods always return immediately. At some point later, in another thread,
# the following will happen:
#
# 1. The given `action` will be applied to the state of the Agent and the
# `args`, if any were supplied.
# 2. The return value of `action` will be passed to the validator lambda,
# if one has been set on the Agent.
# 3. If the validator succeeds or if no validator was given, the return value
# of the given `action` will become the new `#value` of the Agent. See
# `#initialize` for details.
# 4. If any observers were added to the Agent, they will be notified. See
# `#add_observer` for details.
# 5. If during the `action` execution any other dispatches are made (directly
# or indirectly), they will be held until after the `#value` of the Agent
# has been changed.
#
# If any exceptions are thrown by an action function, no nested dispatches
# will occur, and the exception will be cached in the Agent itself. When an
# Agent has errors cached, any subsequent interactions will immediately throw
# an exception, until the agent's errors are cleared. Agent errors can be
# examined with `#error` and the agent restarted with `#restart`.
#
# The actions of all Agents get interleaved amongst threads in a thread pool.
# At any point in time, at most one action for each Agent is being executed.
# Actions dispatched to an agent from another single agent or thread will
# occur in the order they were sent, potentially interleaved with actions
# dispatched to the same agent from other sources. The `#send` method should
# be used for actions that are CPU limited, while the `#send_off` method is
# appropriate for actions that may block on IO.
#
# Unlike in Clojure, `Agent` cannot participate in `Concurrent::TVar` transactions.
#
# ## Example
#
# ```
# def next_fibonacci(set = nil)
# return [0, 1] if set.nil?
# set + [set[-2..-1].reduce{|sum,x| sum + x }]
# end
#
# # create an agent with an initial value
# agent = Concurrent::Agent.new(next_fibonacci)
#
# # send a few update requests
# 5.times do
# agent.send{|set| next_fibonacci(set) }
# end
#
# # wait for them to complete
# agent.await
#
# # get the current value
# agent.value #=> [0, 1, 1, 2, 3, 5, 8]
# ```
#
# ## Observation
#
# Agents support observers through the {Concurrent::Observable} mixin module.
# Notification of observers occurs every time an action dispatch returns and
# the new value is successfully validated. Observation will *not* occur if the
# action raises an exception, if validation fails, or when a {#restart} occurs.
#
# When notified the observer will receive three arguments: `time`, `old_value`,
# and `new_value`. The `time` argument is the time at which the value change
# occurred. The `old_value` is the value of the Agent when the action began
# processing. The `new_value` is the value to which the Agent was set when the
# action completed. Note that `old_value` and `new_value` may be the same.
# This is not an error. It simply means that the action returned the same
# value.
#
# ## Nested Actions
#
# It is possible for an Agent action to post further actions back to itself.
# The nested actions will be enqueued normally then processed *after* the
# outer action completes, in the order they were sent, possibly interleaved
# with action dispatches from other threads. Nested actions never deadlock
# with one another and a failure in a nested action will never affect the
# outer action.
#
# Nested actions can be called using the Agent reference from the enclosing
# scope or by passing the reference in as a "send" argument. Nested actions
# cannot be post using `self` from within the action block/proc/lambda; `self`
# in this context will not reference the Agent. The preferred method for
# dispatching nested actions is to pass the Agent as an argument. This allows
# Ruby to more effectively manage the closing scope.
#
# Prefer this:
#
# ```
# agent = Concurrent::Agent.new(0)
# agent.send(agent) do |value, this|
# this.send {|v| v + 42 }
# 3.14
# end
# agent.value #=> 45.14
# ```
#
# Over this:
#
# ```
# agent = Concurrent::Agent.new(0)
# agent.send do |value|
# agent.send {|v| v + 42 }
# 3.14
# end
# ```
#
# @!macro agent_await_warning
#
# **NOTE** Never, *under any circumstances*, call any of the "await" methods
# ({#await}, {#await_for}, {#await_for!}, and {#wait}) from within an action
# block/proc/lambda. The call will block the Agent and will always fail.
# Calling either {#await} or {#wait} (with a timeout of `nil`) will
# hopelessly deadlock the Agent with no possibility of recovery.
#
# @!macro thread_safe_variable_comparison
#
# @see http://clojure.org/Agents Clojure Agents
# @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State
class Agent < Synchronization::LockableObject
include Concern::Observable
ERROR_MODES = [:continue, :fail].freeze
private_constant :ERROR_MODES
AWAIT_FLAG = ::Object.new
private_constant :AWAIT_FLAG
AWAIT_ACTION = ->(value, latch) { latch.count_down; AWAIT_FLAG }
private_constant :AWAIT_ACTION
DEFAULT_ERROR_HANDLER = ->(agent, error) { nil }
private_constant :DEFAULT_ERROR_HANDLER
DEFAULT_VALIDATOR = ->(value) { true }
private_constant :DEFAULT_VALIDATOR
Job = Struct.new(:action, :args, :executor, :caller)
private_constant :Job
# Raised during action processing or any other time in an Agent's lifecycle.
class Error < StandardError
def initialize(message = nil)
message ||= 'agent must be restarted before jobs can post'
super(message)
end
end
# Raised when a new value obtained during action processing or at `#restart`
# fails validation.
class ValidationError < Error
def initialize(message = nil)
message ||= 'invalid value'
super(message)
end
end
# The error mode this Agent is operating in. See {#initialize} for details.
attr_reader :error_mode
# Create a new `Agent` with the given initial value and options.
#
# The `:validator` option must be `nil` or a side-effect free proc/lambda
# which takes one argument. On any intended value change the validator, if
# provided, will be called. If the new value is invalid the validator should
# return `false` or raise an error.
#
# The `:error_handler` option must be `nil` or a proc/lambda which takes two
# arguments. When an action raises an error or validation fails, either by
# returning false or raising an error, the error handler will be called. The
# arguments to the error handler will be a reference to the agent itself and
# the error object which was raised.
#
# The `:error_mode` may be either `:continue` (the default if an error
# handler is given) or `:fail` (the default if error handler nil or not
# given).
#
# If an action being run by the agent throws an error or doesn't pass
# validation the error handler, if present, will be called. After the
# handler executes if the error mode is `:continue` the Agent will continue
# as if neither the action that caused the error nor the error itself ever
# happened.
#
# If the mode is `:fail` the Agent will become {#failed?} and will stop
# accepting new action dispatches. Any previously queued actions will be
# held until {#restart} is called. The {#value} method will still work,
# returning the value of the Agent before the error.
#
# @param [Object] initial the initial value
# @param [Hash] opts the configuration options
#
# @option opts [Symbol] :error_mode either `:continue` or `:fail`
# @option opts [nil, Proc] :error_handler the (optional) error handler
# @option opts [nil, Proc] :validator the (optional) validation procedure
def initialize(initial, opts = {})
super()
synchronize { ns_initialize(initial, opts) }
end
# The current value (state) of the Agent, irrespective of any pending or
# in-progress actions. The value is always available and is non-blocking.
#
# @return [Object] the current value
def value
@current.value # TODO (pitr 12-Sep-2015): broken unsafe read?
end
alias_method :deref, :value
# When {#failed?} and {#error_mode} is `:fail`, returns the error object
# which caused the failure, else `nil`. When {#error_mode} is `:continue`
# will *always* return `nil`.
#
# @return [nil, Error] the error which caused the failure when {#failed?}
def error
@error.value
end
alias_method :reason, :error
# @!macro agent_send
#
# Dispatches an action to the Agent and returns immediately. Subsequently,
# in a thread from a thread pool, the {#value} will be set to the return
# value of the action. Action dispatches are only allowed when the Agent
# is not {#failed?}.
#
# The action must be a block/proc/lambda which takes 1 or more arguments.
# The first argument is the current {#value} of the Agent. Any arguments
# passed to the send method via the `args` parameter will be passed to the
# action as the remaining arguments. The action must return the new value
# of the Agent.
#
# * {#send} and {#send!} should be used for actions that are CPU limited
# * {#send_off}, {#send_off!}, and {#<<} are appropriate for actions that
# may block on IO
# * {#send_via} and {#send_via!} are used when a specific executor is to
# be used for the action
#
# @param [Array<Object>] args zero or more arguments to be passed to
# the action
# @param [Proc] action the action dispatch to be enqueued
#
# @yield [agent, value, *args] process the old value and return the new
# @yieldparam [Object] value the current {#value} of the Agent
# @yieldparam [Array<Object>] args zero or more arguments to pass to the
# action
# @yieldreturn [Object] the new value of the Agent
#
# @!macro send_return
# @return [Boolean] true if the action is successfully enqueued, false if
# the Agent is {#failed?}
def send(*args, &action)
enqueue_action_job(action, args, Concurrent.global_fast_executor)
end
# @!macro agent_send
#
# @!macro send_bang_return_and_raise
# @return [Boolean] true if the action is successfully enqueued
# @raise [Concurrent::Agent::Error] if the Agent is {#failed?}
def send!(*args, &action)
raise Error.new unless send(*args, &action)
true
end
# @!macro agent_send
# @!macro send_return
def send_off(*args, &action)
enqueue_action_job(action, args, Concurrent.global_io_executor)
end
alias_method :post, :send_off
# @!macro agent_send
# @!macro send_bang_return_and_raise
def send_off!(*args, &action)
raise Error.new unless send_off(*args, &action)
true
end
# @!macro agent_send
# @!macro send_return
# @param [Concurrent::ExecutorService] executor the executor on which the
# action is to be dispatched
def send_via(executor, *args, &action)
enqueue_action_job(action, args, executor)
end
# @!macro agent_send
# @!macro send_bang_return_and_raise
# @param [Concurrent::ExecutorService] executor the executor on which the
# action is to be dispatched
def send_via!(executor, *args, &action)
raise Error.new unless send_via(executor, *args, &action)
true
end
# Dispatches an action to the Agent and returns immediately. Subsequently,
# in a thread from a thread pool, the {#value} will be set to the return
# value of the action. Appropriate for actions that may block on IO.
#
# @param [Proc] action the action dispatch to be enqueued
# @return [Concurrent::Agent] self
# @see #send_off
def <<(action)
send_off(&action)
self
end
# Blocks the current thread (indefinitely!) until all actions dispatched
# thus far, from this thread or nested by the Agent, have occurred. Will
# block when {#failed?}. Will never return if a failed Agent is {#restart}
# with `:clear_actions` true.
#
# Returns a reference to `self` to support method chaining:
#
# ```
# current_value = agent.await.value
# ```
#
# @return [Boolean] self
#
# @!macro agent_await_warning
def await
wait(nil)
self
end
# Blocks the current thread until all actions dispatched thus far, from this
# thread or nested by the Agent, have occurred, or the timeout (in seconds)
# has elapsed.
#
# @param [Float] timeout the maximum number of seconds to wait
# @return [Boolean] true if all actions complete before timeout else false
#
# @!macro agent_await_warning
def await_for(timeout)
wait(timeout.to_f)
end
# Blocks the current thread until all actions dispatched thus far, from this
# thread or nested by the Agent, have occurred, or the timeout (in seconds)
# has elapsed.
#
# @param [Float] timeout the maximum number of seconds to wait
# @return [Boolean] true if all actions complete before timeout
#
# @raise [Concurrent::TimeoutError] when timout is reached
#
# @!macro agent_await_warning
def await_for!(timeout)
raise Concurrent::TimeoutError unless wait(timeout.to_f)
true
end
# Blocks the current thread until all actions dispatched thus far, from this
# thread or nested by the Agent, have occurred, or the timeout (in seconds)
# has elapsed. Will block indefinitely when timeout is nil or not given.
#
# Provided mainly for consistency with other classes in this library. Prefer
# the various `await` methods instead.
#
# @param [Float] timeout the maximum number of seconds to wait
# @return [Boolean] true if all actions complete before timeout else false
#
# @!macro agent_await_warning
def wait(timeout = nil)
latch = Concurrent::CountDownLatch.new(1)
enqueue_await_job(latch)
latch.wait(timeout)
end
# Is the Agent in a failed state?
#
# @see #restart
def failed?
!@error.value.nil?
end
alias_method :stopped?, :failed?
# When an Agent is {#failed?}, changes the Agent {#value} to `new_value`
# then un-fails the Agent so that action dispatches are allowed again. If
# the `:clear_actions` option is give and true, any actions queued on the
# Agent that were being held while it was failed will be discarded,
# otherwise those held actions will proceed. The `new_value` must pass the
# validator if any, or `restart` will raise an exception and the Agent will
# remain failed with its old {#value} and {#error}. Observers, if any, will
# not be notified of the new state.
#
# @param [Object] new_value the new value for the Agent once restarted
# @param [Hash] opts the configuration options
# @option opts [Symbol] :clear_actions true if all enqueued but unprocessed
# actions should be discarded on restart, else false (default: false)
# @return [Boolean] true
#
# @raise [Concurrent:AgentError] when not failed
def restart(new_value, opts = {})
clear_actions = opts.fetch(:clear_actions, false)
synchronize do
raise Error.new('agent is not failed') unless failed?
raise ValidationError unless ns_validate(new_value)
@current.value = new_value
@error.value = nil
@queue.clear if clear_actions
ns_post_next_job unless @queue.empty?
end
true
end
class << self
# Blocks the current thread (indefinitely!) until all actions dispatched
# thus far to all the given Agents, from this thread or nested by the
# given Agents, have occurred. Will block when any of the agents are
# failed. Will never return if a failed Agent is restart with
# `:clear_actions` true.
#
# @param [Array<Concurrent::Agent>] agents the Agents on which to wait
# @return [Boolean] true
#
# @!macro agent_await_warning
def await(*agents)
agents.each { |agent| agent.await }
true
end
# Blocks the current thread until all actions dispatched thus far to all
# the given Agents, from this thread or nested by the given Agents, have
# occurred, or the timeout (in seconds) has elapsed.
#
# @param [Float] timeout the maximum number of seconds to wait
# @param [Array<Concurrent::Agent>] agents the Agents on which to wait
# @return [Boolean] true if all actions complete before timeout else false
#
# @!macro agent_await_warning
def await_for(timeout, *agents)
end_at = Concurrent.monotonic_time + timeout.to_f
ok = agents.length.times do |i|
break false if (delay = end_at - Concurrent.monotonic_time) < 0
break false unless agents[i].await_for(delay)
end
!!ok
end
# Blocks the current thread until all actions dispatched thus far to all
# the given Agents, from this thread or nested by the given Agents, have
# occurred, or the timeout (in seconds) has elapsed.
#
# @param [Float] timeout the maximum number of seconds to wait
# @param [Array<Concurrent::Agent>] agents the Agents on which to wait
# @return [Boolean] true if all actions complete before timeout
#
# @raise [Concurrent::TimeoutError] when timout is reached
# @!macro agent_await_warning
def await_for!(timeout, *agents)
raise Concurrent::TimeoutError unless await_for(timeout, *agents)
true
end
end
private
def ns_initialize(initial, opts)
@error_mode = opts[:error_mode]
@error_handler = opts[:error_handler]
if @error_mode && !ERROR_MODES.include?(@error_mode)
raise ArgumentError.new('unrecognized error mode')
elsif @error_mode.nil?
@error_mode = @error_handler ? :continue : :fail
end
@error_handler ||= DEFAULT_ERROR_HANDLER
@validator = opts.fetch(:validator, DEFAULT_VALIDATOR)
@current = Concurrent::AtomicReference.new(initial)
@error = Concurrent::AtomicReference.new(nil)
@caller = Concurrent::ThreadLocalVar.new(nil)
@queue = []
self.observers = Collection::CopyOnNotifyObserverSet.new
end
def enqueue_action_job(action, args, executor)
raise ArgumentError.new('no action given') unless action
job = Job.new(action, args, executor, @caller.value || Thread.current.object_id)
synchronize { ns_enqueue_job(job) }
end
def enqueue_await_job(latch)
synchronize do
if (index = ns_find_last_job_for_thread)
job = Job.new(AWAIT_ACTION, [latch], Concurrent.global_immediate_executor,
Thread.current.object_id)
ns_enqueue_job(job, index+1)
else
latch.count_down
true
end
end
end
def ns_enqueue_job(job, index = nil)
# a non-nil index means this is an await job
return false if index.nil? && failed?
index ||= @queue.length
@queue.insert(index, job)
# if this is the only job, post to executor
ns_post_next_job if @queue.length == 1
true
end
def ns_post_next_job
@queue.first.executor.post { execute_next_job }
end
def execute_next_job
job = synchronize { @queue.first }
old_value = @current.value
@caller.value = job.caller # for nested actions
new_value = job.action.call(old_value, *job.args)
@caller.value = nil
return if new_value == AWAIT_FLAG
if ns_validate(new_value)
@current.value = new_value
observers.notify_observers(Time.now, old_value, new_value)
else
handle_error(ValidationError.new)
end
rescue => error
handle_error(error)
ensure
synchronize do
@queue.shift
unless failed? || @queue.empty?
ns_post_next_job
end
end
end
def ns_validate(value)
@validator.call(value)
rescue
false
end
def handle_error(error)
# stop new jobs from posting
@error.value = error if @error_mode == :fail
@error_handler.call(self, error)
rescue
# do nothing
end
def ns_find_last_job_for_thread
@queue.rindex { |job| job.caller == Thread.current.object_id }
end
end
end

View File

@ -0,0 +1,66 @@
require 'concurrent/utility/engine'
require 'concurrent/thread_safe/util'
module Concurrent
# @!macro concurrent_array
#
# A thread-safe subclass of Array. This version locks against the object
# itself for every method call, ensuring only one thread can be reading
# or writing at a time. This includes iteration methods like `#each`.
#
# @note `a += b` is **not** a **thread-safe** operation on
# `Concurrent::Array`. It reads array `a`, then it creates new `Concurrent::Array`
# which is concatenation of `a` and `b`, then it writes the concatenation to `a`.
# The read and write are independent operations they do not form a single atomic
# operation therefore when two `+=` operations are executed concurrently updates
# may be lost. Use `#concat` instead.
#
# @see http://ruby-doc.org/core-2.2.0/Array.html Ruby standard library `Array`
# @!macro internal_implementation_note
ArrayImplementation = case
when Concurrent.on_cruby?
# Because MRI never runs code in parallel, the existing
# non-thread-safe structures should usually work fine.
::Array
when Concurrent.on_jruby?
require 'jruby/synchronized'
class JRubyArray < ::Array
include JRuby::Synchronized
end
JRubyArray
when Concurrent.on_rbx?
require 'monitor'
require 'concurrent/thread_safe/util/data_structures'
class RbxArray < ::Array
end
ThreadSafe::Util.make_synchronized_on_rbx RbxArray
RbxArray
when Concurrent.on_truffleruby?
require 'concurrent/thread_safe/util/data_structures'
class TruffleRubyArray < ::Array
end
ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyArray
TruffleRubyArray
else
warn 'Possibly unsupported Ruby implementation'
::Array
end
private_constant :ArrayImplementation
# @!macro concurrent_array
class Array < ArrayImplementation
end
end

View File

@ -0,0 +1,445 @@
require 'concurrent/configuration'
require 'concurrent/ivar'
require 'concurrent/synchronization/lockable_object'
module Concurrent
# A mixin module that provides simple asynchronous behavior to a class,
# turning it into a simple actor. Loosely based on Erlang's
# [gen_server](http://www.erlang.org/doc/man/gen_server.html), but without
# supervision or linking.
#
# A more feature-rich {Concurrent::Actor} is also available when the
# capabilities of `Async` are too limited.
#
# ```cucumber
# Feature:
# As a stateful, plain old Ruby class
# I want safe, asynchronous behavior
# So my long-running methods don't block the main thread
# ```
#
# The `Async` module is a way to mix simple yet powerful asynchronous
# capabilities into any plain old Ruby object or class, turning each object
# into a simple Actor. Method calls are processed on a background thread. The
# caller is free to perform other actions while processing occurs in the
# background.
#
# Method calls to the asynchronous object are made via two proxy methods:
# `async` (alias `cast`) and `await` (alias `call`). These proxy methods post
# the method call to the object's background thread and return a "future"
# which will eventually contain the result of the method call.
#
# This behavior is loosely patterned after Erlang's `gen_server` behavior.
# When an Erlang module implements the `gen_server` behavior it becomes
# inherently asynchronous. The `start` or `start_link` function spawns a
# process (similar to a thread but much more lightweight and efficient) and
# returns the ID of the process. Using the process ID, other processes can
# send messages to the `gen_server` via the `cast` and `call` methods. Unlike
# Erlang's `gen_server`, however, `Async` classes do not support linking or
# supervision trees.
#
# ## Basic Usage
#
# When this module is mixed into a class, objects of the class become inherently
# asynchronous. Each object gets its own background thread on which to post
# asynchronous method calls. Asynchronous method calls are executed in the
# background one at a time in the order they are received.
#
# To create an asynchronous class, simply mix in the `Concurrent::Async` module:
#
# ```
# class Hello
# include Concurrent::Async
#
# def hello(name)
# "Hello, #{name}!"
# end
# end
# ```
#
# When defining a constructor it is critical that the first line be a call to
# `super` with no arguments. The `super` method initializes the background
# thread and other asynchronous components.
#
# ```
# class BackgroundLogger
# include Concurrent::Async
#
# def initialize(level)
# super()
# @logger = Logger.new(STDOUT)
# @logger.level = level
# end
#
# def info(msg)
# @logger.info(msg)
# end
# end
# ```
#
# Mixing this module into a class provides each object two proxy methods:
# `async` and `await`. These methods are thread safe with respect to the
# enclosing object. The former proxy allows methods to be called
# asynchronously by posting to the object's internal thread. The latter proxy
# allows a method to be called synchronously but does so safely with respect
# to any pending asynchronous method calls and ensures proper ordering. Both
# methods return a {Concurrent::IVar} which can be inspected for the result
# of the proxied method call. Calling a method with `async` will return a
# `:pending` `IVar` whereas `await` will return a `:complete` `IVar`.
#
# ```
# class Echo
# include Concurrent::Async
#
# def echo(msg)
# print "#{msg}\n"
# end
# end
#
# horn = Echo.new
# horn.echo('zero') # synchronous, not thread-safe
# # returns the actual return value of the method
#
# horn.async.echo('one') # asynchronous, non-blocking, thread-safe
# # returns an IVar in the :pending state
#
# horn.await.echo('two') # synchronous, blocking, thread-safe
# # returns an IVar in the :complete state
# ```
#
# ## Let It Fail
#
# The `async` and `await` proxy methods have built-in error protection based
# on Erlang's famous "let it fail" philosophy. Instance methods should not be
# programmed defensively. When an exception is raised by a delegated method
# the proxy will rescue the exception, expose it to the caller as the `reason`
# attribute of the returned future, then process the next method call.
#
# ## Calling Methods Internally
#
# External method calls should *always* use the `async` and `await` proxy
# methods. When one method calls another method, the `async` proxy should
# rarely be used and the `await` proxy should *never* be used.
#
# When an object calls one of its own methods using the `await` proxy the
# second call will be enqueued *behind* the currently running method call.
# Any attempt to wait on the result will fail as the second call will never
# run until after the current call completes.
#
# Calling a method using the `await` proxy from within a method that was
# itself called using `async` or `await` will irreversibly deadlock the
# object. Do *not* do this, ever.
#
# ## Instance Variables and Attribute Accessors
#
# Instance variables do not need to be thread-safe so long as they are private.
# Asynchronous method calls are processed in the order they are received and
# are processed one at a time. Therefore private instance variables can only
# be accessed by one thread at a time. This is inherently thread-safe.
#
# When using private instance variables within asynchronous methods, the best
# practice is to read the instance variable into a local variable at the start
# of the method then update the instance variable at the *end* of the method.
# This way, should an exception be raised during method execution the internal
# state of the object will not have been changed.
#
# ### Reader Attributes
#
# The use of `attr_reader` is discouraged. Internal state exposed externally,
# when necessary, should be done through accessor methods. The instance
# variables exposed by these methods *must* be thread-safe, or they must be
# called using the `async` and `await` proxy methods. These two approaches are
# subtly different.
#
# When internal state is accessed via the `async` and `await` proxy methods,
# the returned value represents the object's state *at the time the call is
# processed*, which may *not* be the state of the object at the time the call
# is made.
#
# To get the state *at the current* time, irrespective of an enqueued method
# calls, a reader method must be called directly. This is inherently unsafe
# unless the instance variable is itself thread-safe, preferably using one
# of the thread-safe classes within this library. Because the thread-safe
# classes within this library are internally-locking or non-locking, they can
# be safely used from within asynchronous methods without causing deadlocks.
#
# Generally speaking, the best practice is to *not* expose internal state via
# reader methods. The best practice is to simply use the method's return value.
#
# ### Writer Attributes
#
# Writer attributes should never be used with asynchronous classes. Changing
# the state externally, even when done in the thread-safe way, is not logically
# consistent. Changes to state need to be timed with respect to all asynchronous
# method calls which my be in-process or enqueued. The only safe practice is to
# pass all necessary data to each method as arguments and let the method update
# the internal state as necessary.
#
# ## Class Constants, Variables, and Methods
#
# ### Class Constants
#
# Class constants do not need to be thread-safe. Since they are read-only and
# immutable they may be safely read both externally and from within
# asynchronous methods.
#
# ### Class Variables
#
# Class variables should be avoided. Class variables represent shared state.
# Shared state is anathema to concurrency. Should there be a need to share
# state using class variables they *must* be thread-safe, preferably
# using the thread-safe classes within this library. When updating class
# variables, never assign a new value/object to the variable itself. Assignment
# is not thread-safe in Ruby. Instead, use the thread-safe update functions
# of the variable itself to change the value.
#
# The best practice is to *never* use class variables with `Async` classes.
#
# ### Class Methods
#
# Class methods which are pure functions are safe. Class methods which modify
# class variables should be avoided, for all the reasons listed above.
#
# ## An Important Note About Thread Safe Guarantees
#
# > Thread safe guarantees can only be made when asynchronous method calls
# > are not mixed with direct method calls. Use only direct method calls
# > when the object is used exclusively on a single thread. Use only
# > `async` and `await` when the object is shared between threads. Once you
# > call a method using `async` or `await`, you should no longer call methods
# > directly on the object. Use `async` and `await` exclusively from then on.
#
# @example
#
# class Echo
# include Concurrent::Async
#
# def echo(msg)
# print "#{msg}\n"
# end
# end
#
# horn = Echo.new
# horn.echo('zero') # synchronous, not thread-safe
# # returns the actual return value of the method
#
# horn.async.echo('one') # asynchronous, non-blocking, thread-safe
# # returns an IVar in the :pending state
#
# horn.await.echo('two') # synchronous, blocking, thread-safe
# # returns an IVar in the :complete state
#
# @see Concurrent::Actor
# @see https://en.wikipedia.org/wiki/Actor_model "Actor Model" at Wikipedia
# @see http://www.erlang.org/doc/man/gen_server.html Erlang gen_server
# @see http://c2.com/cgi/wiki?LetItCrash "Let It Crash" at http://c2.com/
module Async
# @!method self.new(*args, &block)
#
# Instanciate a new object and ensure proper initialization of the
# synchronization mechanisms.
#
# @param [Array<Object>] args Zero or more arguments to be passed to the
# object's initializer.
# @param [Proc] block Optional block to pass to the object's initializer.
# @return [Object] A properly initialized object of the asynchronous class.
# Check for the presence of a method on an object and determine if a given
# set of arguments matches the required arity.
#
# @param [Object] obj the object to check against
# @param [Symbol] method the method to check the object for
# @param [Array] args zero or more arguments for the arity check
#
# @raise [NameError] the object does not respond to `method` method
# @raise [ArgumentError] the given `args` do not match the arity of `method`
#
# @note This check is imperfect because of the way Ruby reports the arity of
# methods with a variable number of arguments. It is possible to determine
# if too few arguments are given but impossible to determine if too many
# arguments are given. This check may also fail to recognize dynamic behavior
# of the object, such as methods simulated with `method_missing`.
#
# @see http://www.ruby-doc.org/core-2.1.1/Method.html#method-i-arity Method#arity
# @see http://ruby-doc.org/core-2.1.0/Object.html#method-i-respond_to-3F Object#respond_to?
# @see http://www.ruby-doc.org/core-2.1.0/BasicObject.html#method-i-method_missing BasicObject#method_missing
#
# @!visibility private
def self.validate_argc(obj, method, *args)
argc = args.length
arity = obj.method(method).arity
if arity >= 0 && argc != arity
raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity})")
elsif arity < 0 && (arity = (arity + 1).abs) > argc
raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity}..*)")
end
end
# @!visibility private
def self.included(base)
base.singleton_class.send(:alias_method, :original_new, :new)
base.extend(ClassMethods)
super(base)
end
# @!visibility private
module ClassMethods
def new(*args, &block)
obj = original_new(*args, &block)
obj.send(:init_synchronization)
obj
end
end
private_constant :ClassMethods
# Delegates asynchronous, thread-safe method calls to the wrapped object.
#
# @!visibility private
class AsyncDelegator < Synchronization::LockableObject
safe_initialization!
# Create a new delegator object wrapping the given delegate.
#
# @param [Object] delegate the object to wrap and delegate method calls to
def initialize(delegate)
super()
@delegate = delegate
@queue = []
@executor = Concurrent.global_io_executor
end
# Delegates method calls to the wrapped object.
#
# @param [Symbol] method the method being called
# @param [Array] args zero or more arguments to the method
#
# @return [IVar] the result of the method call
#
# @raise [NameError] the object does not respond to `method` method
# @raise [ArgumentError] the given `args` do not match the arity of `method`
def method_missing(method, *args, &block)
super unless @delegate.respond_to?(method)
Async::validate_argc(@delegate, method, *args)
ivar = Concurrent::IVar.new
synchronize do
@queue.push [ivar, method, args, block]
@executor.post { perform } if @queue.length == 1
end
ivar
end
# Perform all enqueued tasks.
#
# This method must be called from within the executor. It must not be
# called while already running. It will loop until the queue is empty.
def perform
loop do
ivar, method, args, block = synchronize { @queue.first }
break unless ivar # queue is empty
begin
ivar.set(@delegate.send(method, *args, &block))
rescue => error
ivar.fail(error)
end
synchronize do
@queue.shift
return if @queue.empty?
end
end
end
end
private_constant :AsyncDelegator
# Delegates synchronous, thread-safe method calls to the wrapped object.
#
# @!visibility private
class AwaitDelegator
# Create a new delegator object wrapping the given delegate.
#
# @param [AsyncDelegator] delegate the object to wrap and delegate method calls to
def initialize(delegate)
@delegate = delegate
end
# Delegates method calls to the wrapped object.
#
# @param [Symbol] method the method being called
# @param [Array] args zero or more arguments to the method
#
# @return [IVar] the result of the method call
#
# @raise [NameError] the object does not respond to `method` method
# @raise [ArgumentError] the given `args` do not match the arity of `method`
def method_missing(method, *args, &block)
ivar = @delegate.send(method, *args, &block)
ivar.wait
ivar
end
end
private_constant :AwaitDelegator
# Causes the chained method call to be performed asynchronously on the
# object's thread. The delegated method will return a future in the
# `:pending` state and the method call will have been scheduled on the
# object's thread. The final disposition of the method call can be obtained
# by inspecting the returned future.
#
# @!macro async_thread_safety_warning
# @note The method call is guaranteed to be thread safe with respect to
# all other method calls against the same object that are called with
# either `async` or `await`. The mutable nature of Ruby references
# (and object orientation in general) prevent any other thread safety
# guarantees. Do NOT mix direct method calls with delegated method calls.
# Use *only* delegated method calls when sharing the object between threads.
#
# @return [Concurrent::IVar] the pending result of the asynchronous operation
#
# @raise [NameError] the object does not respond to the requested method
# @raise [ArgumentError] the given `args` do not match the arity of
# the requested method
def async
@__async_delegator__
end
alias_method :cast, :async
# Causes the chained method call to be performed synchronously on the
# current thread. The delegated will return a future in either the
# `:fulfilled` or `:rejected` state and the delegated method will have
# completed. The final disposition of the delegated method can be obtained
# by inspecting the returned future.
#
# @!macro async_thread_safety_warning
#
# @return [Concurrent::IVar] the completed result of the synchronous operation
#
# @raise [NameError] the object does not respond to the requested method
# @raise [ArgumentError] the given `args` do not match the arity of the
# requested method
def await
@__await_delegator__
end
alias_method :call, :await
# Initialize the internal serializer and other stnchronization mechanisms.
#
# @note This method *must* be called immediately upon object construction.
# This is the only way thread-safe initialization can be guaranteed.
#
# @!visibility private
def init_synchronization
return self if defined?(@__async_initialized__) && @__async_initialized__
@__async_initialized__ = true
@__async_delegator__ = AsyncDelegator.new(self)
@__await_delegator__ = AwaitDelegator.new(@__async_delegator__)
self
end
end
end

View File

@ -0,0 +1,222 @@
require 'concurrent/atomic/atomic_reference'
require 'concurrent/collection/copy_on_notify_observer_set'
require 'concurrent/concern/observable'
require 'concurrent/synchronization'
# @!macro thread_safe_variable_comparison
#
# ## Thread-safe Variable Classes
#
# Each of the thread-safe variable classes is designed to solve a different
# problem. In general:
#
# * *{Concurrent::Agent}:* Shared, mutable variable providing independent,
# uncoordinated, *asynchronous* change of individual values. Best used when
# the value will undergo frequent, complex updates. Suitable when the result
# of an update does not need to be known immediately.
# * *{Concurrent::Atom}:* Shared, mutable variable providing independent,
# uncoordinated, *synchronous* change of individual values. Best used when
# the value will undergo frequent reads but only occasional, though complex,
# updates. Suitable when the result of an update must be known immediately.
# * *{Concurrent::AtomicReference}:* A simple object reference that can be
# atomically. Updates are synchronous but fast. Best used when updates a
# simple set operations. Not suitable when updates are complex.
# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar
# but optimized for the given data type.
# * *{Concurrent::Exchanger}:* Shared, stateless synchronization point. Used
# when two or more threads need to exchange data. The threads will pair then
# block on each other until the exchange is complete.
# * *{Concurrent::MVar}:* Shared synchronization point. Used when one thread
# must give a value to another, which must take the value. The threads will
# block on each other until the exchange is complete.
# * *{Concurrent::ThreadLocalVar}:* Shared, mutable, isolated variable which
# holds a different value for each thread which has access. Often used as
# an instance variable in objects which must maintain different state
# for different threads.
# * *{Concurrent::TVar}:* Shared, mutable variables which provide
# *coordinated*, *synchronous*, change of *many* stated. Used when multiple
# value must change together, in an all-or-nothing transaction.
module Concurrent
# Atoms provide a way to manage shared, synchronous, independent state.
#
# An atom is initialized with an initial value and an optional validation
# proc. At any time the value of the atom can be synchronously and safely
# changed. If a validator is given at construction then any new value
# will be checked against the validator and will be rejected if the
# validator returns false or raises an exception.
#
# There are two ways to change the value of an atom: {#compare_and_set} and
# {#swap}. The former will set the new value if and only if it validates and
# the current value matches the new value. The latter will atomically set the
# new value to the result of running the given block if and only if that
# value validates.
#
# ## Example
#
# ```
# def next_fibonacci(set = nil)
# return [0, 1] if set.nil?
# set + [set[-2..-1].reduce{|sum,x| sum + x }]
# end
#
# # create an atom with an initial value
# atom = Concurrent::Atom.new(next_fibonacci)
#
# # send a few update requests
# 5.times do
# atom.swap{|set| next_fibonacci(set) }
# end
#
# # get the current value
# atom.value #=> [0, 1, 1, 2, 3, 5, 8]
# ```
#
# ## Observation
#
# Atoms support observers through the {Concurrent::Observable} mixin module.
# Notification of observers occurs every time the value of the Atom changes.
# When notified the observer will receive three arguments: `time`, `old_value`,
# and `new_value`. The `time` argument is the time at which the value change
# occurred. The `old_value` is the value of the Atom when the change began
# The `new_value` is the value to which the Atom was set when the change
# completed. Note that `old_value` and `new_value` may be the same. This is
# not an error. It simply means that the change operation returned the same
# value.
#
# Unlike in Clojure, `Atom` cannot participate in {Concurrent::TVar} transactions.
#
# @!macro thread_safe_variable_comparison
#
# @see http://clojure.org/atoms Clojure Atoms
# @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State
class Atom < Synchronization::Object
include Concern::Observable
safe_initialization!
attr_atomic(:value)
private :value=, :swap_value, :compare_and_set_value, :update_value
public :value
alias_method :deref, :value
# @!method value
# The current value of the atom.
#
# @return [Object] The current value.
# Create a new atom with the given initial value.
#
# @param [Object] value The initial value
# @param [Hash] opts The options used to configure the atom
# @option opts [Proc] :validator (nil) Optional proc used to validate new
# values. It must accept one and only one argument which will be the
# intended new value. The validator will return true if the new value
# is acceptable else return false (preferrably) or raise an exception.
#
# @!macro deref_options
#
# @raise [ArgumentError] if the validator is not a `Proc` (when given)
def initialize(value, opts = {})
super()
@Validator = opts.fetch(:validator, -> v { true })
self.observers = Collection::CopyOnNotifyObserverSet.new
self.value = value
end
# Atomically swaps the value of atom using the given block. The current
# value will be passed to the block, as will any arguments passed as
# arguments to the function. The new value will be validated against the
# (optional) validator proc given at construction. If validation fails the
# value will not be changed.
#
# Internally, {#swap} reads the current value, applies the block to it, and
# attempts to compare-and-set it in. Since another thread may have changed
# the value in the intervening time, it may have to retry, and does so in a
# spin loop. The net effect is that the value will always be the result of
# the application of the supplied block to a current value, atomically.
# However, because the block might be called multiple times, it must be free
# of side effects.
#
# @note The given block may be called multiple times, and thus should be free
# of side effects.
#
# @param [Object] args Zero or more arguments passed to the block.
#
# @yield [value, args] Calculates a new value for the atom based on the
# current value and any supplied arguments.
# @yieldparam value [Object] The current value of the atom.
# @yieldparam args [Object] All arguments passed to the function, in order.
# @yieldreturn [Object] The intended new value of the atom.
#
# @return [Object] The final value of the atom after all operations and
# validations are complete.
#
# @raise [ArgumentError] When no block is given.
def swap(*args)
raise ArgumentError.new('no block given') unless block_given?
loop do
old_value = value
new_value = yield(old_value, *args)
begin
break old_value unless valid?(new_value)
break new_value if compare_and_set(old_value, new_value)
rescue
break old_value
end
end
end
# Atomically sets the value of atom to the new value if and only if the
# current value of the atom is identical to the old value and the new
# value successfully validates against the (optional) validator given
# at construction.
#
# @param [Object] old_value The expected current value.
# @param [Object] new_value The intended new value.
#
# @return [Boolean] True if the value is changed else false.
def compare_and_set(old_value, new_value)
if valid?(new_value) && compare_and_set_value(old_value, new_value)
observers.notify_observers(Time.now, old_value, new_value)
true
else
false
end
end
# Atomically sets the value of atom to the new value without regard for the
# current value so long as the new value successfully validates against the
# (optional) validator given at construction.
#
# @param [Object] new_value The intended new value.
#
# @return [Object] The final value of the atom after all operations and
# validations are complete.
def reset(new_value)
old_value = value
if valid?(new_value)
self.value = new_value
observers.notify_observers(Time.now, old_value, new_value)
new_value
else
old_value
end
end
private
# Is the new value valid?
#
# @param [Object] new_value The intended new value.
# @return [Boolean] false if the validator function returns false or raises
# an exception else true
def valid?(new_value)
@Validator.call(new_value)
rescue
false
end
end
end

View File

@ -0,0 +1,66 @@
require 'concurrent/constants'
module Concurrent
# @!macro thread_local_var
# @!macro internal_implementation_note
# @!visibility private
class AbstractThreadLocalVar
# @!macro thread_local_var_method_initialize
def initialize(default = nil, &default_block)
if default && block_given?
raise ArgumentError, "Cannot use both value and block as default value"
end
if block_given?
@default_block = default_block
@default = nil
else
@default_block = nil
@default = default
end
allocate_storage
end
# @!macro thread_local_var_method_get
def value
raise NotImplementedError
end
# @!macro thread_local_var_method_set
def value=(value)
raise NotImplementedError
end
# @!macro thread_local_var_method_bind
def bind(value, &block)
if block_given?
old_value = self.value
begin
self.value = value
yield
ensure
self.value = old_value
end
end
end
protected
# @!visibility private
def allocate_storage
raise NotImplementedError
end
# @!visibility private
def default
if @default_block
self.value = @default_block.call
else
@default
end
end
end
end

View File

@ -0,0 +1,126 @@
require 'concurrent/atomic/mutex_atomic_boolean'
require 'concurrent/synchronization'
module Concurrent
###################################################################
# @!macro atomic_boolean_method_initialize
#
# Creates a new `AtomicBoolean` with the given initial value.
#
# @param [Boolean] initial the initial value
# @!macro atomic_boolean_method_value_get
#
# Retrieves the current `Boolean` value.
#
# @return [Boolean] the current value
# @!macro atomic_boolean_method_value_set
#
# Explicitly sets the value.
#
# @param [Boolean] value the new value to be set
#
# @return [Boolean] the current value
# @!macro atomic_boolean_method_true_question
#
# Is the current value `true`
#
# @return [Boolean] true if the current value is `true`, else false
# @!macro atomic_boolean_method_false_question
#
# Is the current value `false`
#
# @return [Boolean] true if the current value is `false`, else false
# @!macro atomic_boolean_method_make_true
#
# Explicitly sets the value to true.
#
# @return [Boolean] true is value has changed, otherwise false
# @!macro atomic_boolean_method_make_false
#
# Explicitly sets the value to false.
#
# @return [Boolean] true is value has changed, otherwise false
###################################################################
# @!macro atomic_boolean_public_api
#
# @!method initialize(initial = false)
# @!macro atomic_boolean_method_initialize
#
# @!method value
# @!macro atomic_boolean_method_value_get
#
# @!method value=(value)
# @!macro atomic_boolean_method_value_set
#
# @!method true?
# @!macro atomic_boolean_method_true_question
#
# @!method false?
# @!macro atomic_boolean_method_false_question
#
# @!method make_true
# @!macro atomic_boolean_method_make_true
#
# @!method make_false
# @!macro atomic_boolean_method_make_false
###################################################################
# @!visibility private
# @!macro internal_implementation_note
AtomicBooleanImplementation = case
when defined?(JavaAtomicBoolean)
JavaAtomicBoolean
when defined?(CAtomicBoolean)
CAtomicBoolean
else
MutexAtomicBoolean
end
private_constant :AtomicBooleanImplementation
# @!macro atomic_boolean
#
# A boolean value that can be updated atomically. Reads and writes to an atomic
# boolean and thread-safe and guaranteed to succeed. Reads and writes may block
# briefly but no explicit locking is required.
#
# @!macro thread_safe_variable_comparison
#
# Performance:
#
# ```
# Testing with ruby 2.1.2
# Testing with Concurrent::MutexAtomicBoolean...
# 2.790000 0.000000 2.790000 ( 2.791454)
# Testing with Concurrent::CAtomicBoolean...
# 0.740000 0.000000 0.740000 ( 0.740206)
#
# Testing with jruby 1.9.3
# Testing with Concurrent::MutexAtomicBoolean...
# 5.240000 2.520000 7.760000 ( 3.683000)
# Testing with Concurrent::JavaAtomicBoolean...
# 3.340000 0.010000 3.350000 ( 0.855000)
# ```
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicBoolean.html java.util.concurrent.atomic.AtomicBoolean
#
# @!macro atomic_boolean_public_api
class AtomicBoolean < AtomicBooleanImplementation
# @return [String] Short string representation.
def to_s
format '%s value:%s>', super[0..-2], value
end
alias_method :inspect, :to_s
end
end

View File

@ -0,0 +1,143 @@
require 'concurrent/atomic/mutex_atomic_fixnum'
require 'concurrent/synchronization'
module Concurrent
###################################################################
# @!macro atomic_fixnum_method_initialize
#
# Creates a new `AtomicFixnum` with the given initial value.
#
# @param [Fixnum] initial the initial value
# @raise [ArgumentError] if the initial value is not a `Fixnum`
# @!macro atomic_fixnum_method_value_get
#
# Retrieves the current `Fixnum` value.
#
# @return [Fixnum] the current value
# @!macro atomic_fixnum_method_value_set
#
# Explicitly sets the value.
#
# @param [Fixnum] value the new value to be set
#
# @return [Fixnum] the current value
#
# @raise [ArgumentError] if the new value is not a `Fixnum`
# @!macro atomic_fixnum_method_increment
#
# Increases the current value by the given amount (defaults to 1).
#
# @param [Fixnum] delta the amount by which to increase the current value
#
# @return [Fixnum] the current value after incrementation
# @!macro atomic_fixnum_method_decrement
#
# Decreases the current value by the given amount (defaults to 1).
#
# @param [Fixnum] delta the amount by which to decrease the current value
#
# @return [Fixnum] the current value after decrementation
# @!macro atomic_fixnum_method_compare_and_set
#
# Atomically sets the value to the given updated value if the current
# value == the expected value.
#
# @param [Fixnum] expect the expected value
# @param [Fixnum] update the new value
#
# @return [Boolean] true if the value was updated else false
# @!macro atomic_fixnum_method_update
#
# Pass the current value to the given block, replacing it
# with the block's result. May retry if the value changes
# during the block's execution.
#
# @yield [Object] Calculate a new value for the atomic reference using
# given (old) value
# @yieldparam [Object] old_value the starting value of the atomic reference
#
# @return [Object] the new value
###################################################################
# @!macro atomic_fixnum_public_api
#
# @!method initialize(initial = 0)
# @!macro atomic_fixnum_method_initialize
#
# @!method value
# @!macro atomic_fixnum_method_value_get
#
# @!method value=(value)
# @!macro atomic_fixnum_method_value_set
#
# @!method increment(delta)
# @!macro atomic_fixnum_method_increment
#
# @!method decrement(delta)
# @!macro atomic_fixnum_method_decrement
#
# @!method compare_and_set(expect, update)
# @!macro atomic_fixnum_method_compare_and_set
#
# @!method update
# @!macro atomic_fixnum_method_update
###################################################################
# @!visibility private
# @!macro internal_implementation_note
AtomicFixnumImplementation = case
when defined?(JavaAtomicFixnum)
JavaAtomicFixnum
when defined?(CAtomicFixnum)
CAtomicFixnum
else
MutexAtomicFixnum
end
private_constant :AtomicFixnumImplementation
# @!macro atomic_fixnum
#
# A numeric value that can be updated atomically. Reads and writes to an atomic
# fixnum and thread-safe and guaranteed to succeed. Reads and writes may block
# briefly but no explicit locking is required.
#
# @!macro thread_safe_variable_comparison
#
# Performance:
#
# ```
# Testing with ruby 2.1.2
# Testing with Concurrent::MutexAtomicFixnum...
# 3.130000 0.000000 3.130000 ( 3.136505)
# Testing with Concurrent::CAtomicFixnum...
# 0.790000 0.000000 0.790000 ( 0.785550)
#
# Testing with jruby 1.9.3
# Testing with Concurrent::MutexAtomicFixnum...
# 5.460000 2.460000 7.920000 ( 3.715000)
# Testing with Concurrent::JavaAtomicFixnum...
# 4.520000 0.030000 4.550000 ( 1.187000)
# ```
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html java.util.concurrent.atomic.AtomicLong
#
# @!macro atomic_fixnum_public_api
class AtomicFixnum < AtomicFixnumImplementation
# @return [String] Short string representation.
def to_s
format '%s value:%s>', super[0..-2], value
end
alias_method :inspect, :to_s
end
end

View File

@ -0,0 +1,164 @@
module Concurrent
# An atomic reference which maintains an object reference along with a mark bit
# that can be updated atomically.
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicMarkableReference.html
# java.util.concurrent.atomic.AtomicMarkableReference
class AtomicMarkableReference < ::Concurrent::Synchronization::Object
attr_atomic(:reference)
private :reference, :reference=, :swap_reference, :compare_and_set_reference, :update_reference
def initialize(value = nil, mark = false)
super()
self.reference = immutable_array(value, mark)
end
# Atomically sets the value and mark to the given updated value and
# mark given both:
# - the current value == the expected value &&
# - the current mark == the expected mark
#
# @param [Object] expected_val the expected value
# @param [Object] new_val the new value
# @param [Boolean] expected_mark the expected mark
# @param [Boolean] new_mark the new mark
#
# @return [Boolean] `true` if successful. A `false` return indicates
# that the actual value was not equal to the expected value or the
# actual mark was not equal to the expected mark
def compare_and_set(expected_val, new_val, expected_mark, new_mark)
# Memoize a valid reference to the current AtomicReference for
# later comparison.
current = reference
curr_val, curr_mark = current
# Ensure that that the expected marks match.
return false unless expected_mark == curr_mark
if expected_val.is_a? Numeric
# If the object is a numeric, we need to ensure we are comparing
# the numerical values
return false unless expected_val == curr_val
else
# Otherwise, we need to ensure we are comparing the object identity.
# Theoretically, this could be incorrect if a user monkey-patched
# `Object#equal?`, but they should know that they are playing with
# fire at that point.
return false unless expected_val.equal? curr_val
end
prospect = immutable_array(new_val, new_mark)
compare_and_set_reference current, prospect
end
alias_method :compare_and_swap, :compare_and_set
# Gets the current reference and marked values.
#
# @return [Array] the current reference and marked values
def get
reference
end
# Gets the current value of the reference
#
# @return [Object] the current value of the reference
def value
reference[0]
end
# Gets the current marked value
#
# @return [Boolean] the current marked value
def mark
reference[1]
end
alias_method :marked?, :mark
# _Unconditionally_ sets to the given value of both the reference and
# the mark.
#
# @param [Object] new_val the new value
# @param [Boolean] new_mark the new mark
#
# @return [Array] both the new value and the new mark
def set(new_val, new_mark)
self.reference = immutable_array(new_val, new_mark)
end
# Pass the current value and marked state to the given block, replacing it
# with the block's results. May retry if the value changes during the
# block's execution.
#
# @yield [Object] Calculate a new value and marked state for the atomic
# reference using given (old) value and (old) marked
# @yieldparam [Object] old_val the starting value of the atomic reference
# @yieldparam [Boolean] old_mark the starting state of marked
#
# @return [Array] the new value and new mark
def update
loop do
old_val, old_mark = reference
new_val, new_mark = yield old_val, old_mark
if compare_and_set old_val, new_val, old_mark, new_mark
return immutable_array(new_val, new_mark)
end
end
end
# Pass the current value to the given block, replacing it
# with the block's result. Raise an exception if the update
# fails.
#
# @yield [Object] Calculate a new value and marked state for the atomic
# reference using given (old) value and (old) marked
# @yieldparam [Object] old_val the starting value of the atomic reference
# @yieldparam [Boolean] old_mark the starting state of marked
#
# @return [Array] the new value and marked state
#
# @raise [Concurrent::ConcurrentUpdateError] if the update fails
def try_update!
old_val, old_mark = reference
new_val, new_mark = yield old_val, old_mark
unless compare_and_set old_val, new_val, old_mark, new_mark
fail ::Concurrent::ConcurrentUpdateError,
'AtomicMarkableReference: Update failed due to race condition.',
'Note: If you would like to guarantee an update, please use ' +
'the `AtomicMarkableReference#update` method.'
end
immutable_array(new_val, new_mark)
end
# Pass the current value to the given block, replacing it with the
# block's result. Simply return nil if update fails.
#
# @yield [Object] Calculate a new value and marked state for the atomic
# reference using given (old) value and (old) marked
# @yieldparam [Object] old_val the starting value of the atomic reference
# @yieldparam [Boolean] old_mark the starting state of marked
#
# @return [Array] the new value and marked state, or nil if
# the update failed
def try_update
old_val, old_mark = reference
new_val, new_mark = yield old_val, old_mark
return unless compare_and_set old_val, new_val, old_mark, new_mark
immutable_array(new_val, new_mark)
end
private
def immutable_array(*args)
args.freeze
end
end
end

View File

@ -0,0 +1,204 @@
require 'concurrent/synchronization'
require 'concurrent/utility/engine'
require 'concurrent/atomic_reference/numeric_cas_wrapper'
# Shim for TruffleRuby::AtomicReference
if Concurrent.on_truffleruby? && !defined?(TruffleRuby::AtomicReference)
# @!visibility private
module TruffleRuby
AtomicReference = Truffle::AtomicReference
end
end
module Concurrent
# Define update methods that use direct paths
#
# @!visibility private
# @!macro internal_implementation_note
module AtomicDirectUpdate
# @!macro atomic_reference_method_update
#
# Pass the current value to the given block, replacing it
# with the block's result. May retry if the value changes
# during the block's execution.
#
# @yield [Object] Calculate a new value for the atomic reference using
# given (old) value
# @yieldparam [Object] old_value the starting value of the atomic reference
# @return [Object] the new value
def update
true until compare_and_set(old_value = get, new_value = yield(old_value))
new_value
end
# @!macro atomic_reference_method_try_update
#
# Pass the current value to the given block, replacing it
# with the block's result. Return nil if the update fails.
#
# @yield [Object] Calculate a new value for the atomic reference using
# given (old) value
# @yieldparam [Object] old_value the starting value of the atomic reference
# @note This method was altered to avoid raising an exception by default.
# Instead, this method now returns `nil` in case of failure. For more info,
# please see: https://github.com/ruby-concurrency/concurrent-ruby/pull/336
# @return [Object] the new value, or nil if update failed
def try_update
old_value = get
new_value = yield old_value
return unless compare_and_set old_value, new_value
new_value
end
# @!macro atomic_reference_method_try_update!
#
# Pass the current value to the given block, replacing it
# with the block's result. Raise an exception if the update
# fails.
#
# @yield [Object] Calculate a new value for the atomic reference using
# given (old) value
# @yieldparam [Object] old_value the starting value of the atomic reference
# @note This behavior mimics the behavior of the original
# `AtomicReference#try_update` API. The reason this was changed was to
# avoid raising exceptions (which are inherently slow) by default. For more
# info: https://github.com/ruby-concurrency/concurrent-ruby/pull/336
# @return [Object] the new value
# @raise [Concurrent::ConcurrentUpdateError] if the update fails
def try_update!
old_value = get
new_value = yield old_value
unless compare_and_set(old_value, new_value)
if $VERBOSE
raise ConcurrentUpdateError, "Update failed"
else
raise ConcurrentUpdateError, "Update failed", ConcurrentUpdateError::CONC_UP_ERR_BACKTRACE
end
end
new_value
end
end
require 'concurrent/atomic_reference/mutex_atomic'
# @!macro atomic_reference
#
# An object reference that may be updated atomically. All read and write
# operations have java volatile semantic.
#
# @!macro thread_safe_variable_comparison
#
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html
#
# @!method initialize(value = nil)
# @!macro atomic_reference_method_initialize
# @param [Object] value The initial value.
#
# @!method get
# @!macro atomic_reference_method_get
# Gets the current value.
# @return [Object] the current value
#
# @!method set(new_value)
# @!macro atomic_reference_method_set
# Sets to the given value.
# @param [Object] new_value the new value
# @return [Object] the new value
#
# @!method get_and_set(new_value)
# @!macro atomic_reference_method_get_and_set
# Atomically sets to the given value and returns the old value.
# @param [Object] new_value the new value
# @return [Object] the old value
#
# @!method compare_and_set(old_value, new_value)
# @!macro atomic_reference_method_compare_and_set
#
# Atomically sets the value to the given updated value if
# the current value == the expected value.
#
# @param [Object] old_value the expected value
# @param [Object] new_value the new value
#
# @return [Boolean] `true` if successful. A `false` return indicates
# that the actual value was not equal to the expected value.
#
# @!method update
# @!macro atomic_reference_method_update
#
# @!method try_update
# @!macro atomic_reference_method_try_update
#
# @!method try_update!
# @!macro atomic_reference_method_try_update!
# @!macro internal_implementation_note
class ConcurrentUpdateError < ThreadError
# frozen pre-allocated backtrace to speed ConcurrentUpdateError
CONC_UP_ERR_BACKTRACE = ['backtrace elided; set verbose to enable'].freeze
end
# @!macro internal_implementation_note
AtomicReferenceImplementation = case
when Concurrent.on_cruby? && Concurrent.c_extensions_loaded?
# @!visibility private
# @!macro internal_implementation_note
class CAtomicReference
include AtomicDirectUpdate
include AtomicNumericCompareAndSetWrapper
alias_method :compare_and_swap, :compare_and_set
end
CAtomicReference
when Concurrent.on_jruby?
# @!visibility private
# @!macro internal_implementation_note
class JavaAtomicReference
include AtomicDirectUpdate
end
JavaAtomicReference
when Concurrent.on_truffleruby?
class TruffleRubyAtomicReference < TruffleRuby::AtomicReference
include AtomicDirectUpdate
alias_method :value, :get
alias_method :value=, :set
alias_method :compare_and_swap, :compare_and_set
alias_method :swap, :get_and_set
end
when Concurrent.on_rbx?
# @note Extends `Rubinius::AtomicReference` version adding aliases
# and numeric logic.
#
# @!visibility private
# @!macro internal_implementation_note
class RbxAtomicReference < Rubinius::AtomicReference
alias_method :_compare_and_set, :compare_and_set
include AtomicDirectUpdate
include AtomicNumericCompareAndSetWrapper
alias_method :value, :get
alias_method :value=, :set
alias_method :swap, :get_and_set
alias_method :compare_and_swap, :compare_and_set
end
RbxAtomicReference
else
MutexAtomicReference
end
private_constant :AtomicReferenceImplementation
# @!macro atomic_reference
class AtomicReference < AtomicReferenceImplementation
# @return [String] Short string representation.
def to_s
format '%s value:%s>', super[0..-2], get
end
alias_method :inspect, :to_s
end
end

View File

@ -0,0 +1,100 @@
require 'concurrent/atomic/mutex_count_down_latch'
require 'concurrent/atomic/java_count_down_latch'
require 'concurrent/utility/engine'
module Concurrent
###################################################################
# @!macro count_down_latch_method_initialize
#
# Create a new `CountDownLatch` with the initial `count`.
#
# @param [new] count the initial count
#
# @raise [ArgumentError] if `count` is not an integer or is less than zero
# @!macro count_down_latch_method_wait
#
# Block on the latch until the counter reaches zero or until `timeout` is reached.
#
# @param [Fixnum] timeout the number of seconds to wait for the counter or `nil`
# to block indefinitely
# @return [Boolean] `true` if the `count` reaches zero else false on `timeout`
# @!macro count_down_latch_method_count_down
#
# Signal the latch to decrement the counter. Will signal all blocked threads when
# the `count` reaches zero.
# @!macro count_down_latch_method_count
#
# The current value of the counter.
#
# @return [Fixnum] the current value of the counter
###################################################################
# @!macro count_down_latch_public_api
#
# @!method initialize(count = 1)
# @!macro count_down_latch_method_initialize
#
# @!method wait(timeout = nil)
# @!macro count_down_latch_method_wait
#
# @!method count_down
# @!macro count_down_latch_method_count_down
#
# @!method count
# @!macro count_down_latch_method_count
###################################################################
# @!visibility private
# @!macro internal_implementation_note
CountDownLatchImplementation = case
when Concurrent.on_jruby?
JavaCountDownLatch
else
MutexCountDownLatch
end
private_constant :CountDownLatchImplementation
# @!macro count_down_latch
#
# A synchronization object that allows one thread to wait on multiple other threads.
# The thread that will wait creates a `CountDownLatch` and sets the initial value
# (normally equal to the number of other threads). The initiating thread passes the
# latch to the other threads then waits for the other threads by calling the `#wait`
# method. Each of the other threads calls `#count_down` when done with its work.
# When the latch counter reaches zero the waiting thread is unblocked and continues
# with its work. A `CountDownLatch` can be used only once. Its value cannot be reset.
#
# @!macro count_down_latch_public_api
# @example Waiter and Decrementer
# latch = Concurrent::CountDownLatch.new(3)
#
# waiter = Thread.new do
# latch.wait()
# puts ("Waiter released")
# end
#
# decrementer = Thread.new do
# sleep(1)
# latch.count_down
# puts latch.count
#
# sleep(1)
# latch.count_down
# puts latch.count
#
# sleep(1)
# latch.count_down
# puts latch.count
# end
#
# [waiter, decrementer].each(&:join)
class CountDownLatch < CountDownLatchImplementation
end
end

View File

@ -0,0 +1,128 @@
require 'concurrent/synchronization'
require 'concurrent/utility/native_integer'
module Concurrent
# A synchronization aid that allows a set of threads to all wait for each
# other to reach a common barrier point.
# @example
# barrier = Concurrent::CyclicBarrier.new(3)
# jobs = Array.new(3) { |i| -> { sleep i; p done: i } }
# process = -> (i) do
# # waiting to start at the same time
# barrier.wait
# # execute job
# jobs[i].call
# # wait for others to finish
# barrier.wait
# end
# threads = 2.times.map do |i|
# Thread.new(i, &process)
# end
#
# # use main as well
# process.call 2
#
# # here we can be sure that all jobs are processed
class CyclicBarrier < Synchronization::LockableObject
# @!visibility private
Generation = Struct.new(:status)
private_constant :Generation
# Create a new `CyclicBarrier` that waits for `parties` threads
#
# @param [Fixnum] parties the number of parties
# @yield an optional block that will be executed that will be executed after
# the last thread arrives and before the others are released
#
# @raise [ArgumentError] if `parties` is not an integer or is less than zero
def initialize(parties, &block)
Utility::NativeInteger.ensure_integer_and_bounds parties
Utility::NativeInteger.ensure_positive_and_no_zero parties
super(&nil)
synchronize { ns_initialize parties, &block }
end
# @return [Fixnum] the number of threads needed to pass the barrier
def parties
synchronize { @parties }
end
# @return [Fixnum] the number of threads currently waiting on the barrier
def number_waiting
synchronize { @number_waiting }
end
# Blocks on the barrier until the number of waiting threads is equal to
# `parties` or until `timeout` is reached or `reset` is called
# If a block has been passed to the constructor, it will be executed once by
# the last arrived thread before releasing the others
# @param [Fixnum] timeout the number of seconds to wait for the counter or
# `nil` to block indefinitely
# @return [Boolean] `true` if the `count` reaches zero else false on
# `timeout` or on `reset` or if the barrier is broken
def wait(timeout = nil)
synchronize do
return false unless @generation.status == :waiting
@number_waiting += 1
if @number_waiting == @parties
@action.call if @action
ns_generation_done @generation, :fulfilled
true
else
generation = @generation
if ns_wait_until(timeout) { generation.status != :waiting }
generation.status == :fulfilled
else
ns_generation_done generation, :broken, false
false
end
end
end
end
# resets the barrier to its initial state
# If there is at least one waiting thread, it will be woken up, the `wait`
# method will return false and the barrier will be broken
# If the barrier is broken, this method restores it to the original state
#
# @return [nil]
def reset
synchronize { ns_generation_done @generation, :reset }
end
# A barrier can be broken when:
# - a thread called the `reset` method while at least one other thread was waiting
# - at least one thread timed out on `wait` method
#
# A broken barrier can be restored using `reset` it's safer to create a new one
# @return [Boolean] true if the barrier is broken otherwise false
def broken?
synchronize { @generation.status != :waiting }
end
protected
def ns_generation_done(generation, status, continue = true)
generation.status = status
ns_next_generation if continue
ns_broadcast
end
def ns_next_generation
@generation = Generation.new(:waiting)
@number_waiting = 0
end
def ns_initialize(parties, &block)
@parties = parties
@action = block
ns_next_generation
end
end
end

View File

@ -0,0 +1,109 @@
require 'thread'
require 'concurrent/synchronization'
module Concurrent
# Old school kernel-style event reminiscent of Win32 programming in C++.
#
# When an `Event` is created it is in the `unset` state. Threads can choose to
# `#wait` on the event, blocking until released by another thread. When one
# thread wants to alert all blocking threads it calls the `#set` method which
# will then wake up all listeners. Once an `Event` has been set it remains set.
# New threads calling `#wait` will return immediately. An `Event` may be
# `#reset` at any time once it has been set.
#
# @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682655.aspx
# @example
# event = Concurrent::Event.new
#
# t1 = Thread.new do
# puts "t1 is waiting"
# event.wait(1)
# puts "event ocurred"
# end
#
# t2 = Thread.new do
# puts "t2 calling set"
# event.set
# end
#
# [t1, t2].each(&:join)
#
# # prints:
# # t2 calling set
# # t1 is waiting
# # event occurred
class Event < Synchronization::LockableObject
# Creates a new `Event` in the unset state. Threads calling `#wait` on the
# `Event` will block.
def initialize
super
synchronize { ns_initialize }
end
# Is the object in the set state?
#
# @return [Boolean] indicating whether or not the `Event` has been set
def set?
synchronize { @set }
end
# Trigger the event, setting the state to `set` and releasing all threads
# waiting on the event. Has no effect if the `Event` has already been set.
#
# @return [Boolean] should always return `true`
def set
synchronize { ns_set }
end
def try?
synchronize { @set ? false : ns_set }
end
# Reset a previously set event back to the `unset` state.
# Has no effect if the `Event` has not yet been set.
#
# @return [Boolean] should always return `true`
def reset
synchronize do
if @set
@set = false
@iteration +=1
end
true
end
end
# Wait a given number of seconds for the `Event` to be set by another
# thread. Will wait forever when no `timeout` value is given. Returns
# immediately if the `Event` has already been set.
#
# @return [Boolean] true if the `Event` was set before timeout else false
def wait(timeout = nil)
synchronize do
unless @set
iteration = @iteration
ns_wait_until(timeout) { iteration < @iteration || @set }
else
true
end
end
end
protected
def ns_set
unless @set
@set = true
ns_broadcast
end
true
end
def ns_initialize
@set = false
@iteration = 0
end
end
end

View File

@ -0,0 +1,42 @@
if Concurrent.on_jruby?
module Concurrent
# @!macro count_down_latch
# @!visibility private
# @!macro internal_implementation_note
class JavaCountDownLatch
# @!macro count_down_latch_method_initialize
def initialize(count = 1)
Utility::NativeInteger.ensure_integer_and_bounds(count)
Utility::NativeInteger.ensure_positive(count)
@latch = java.util.concurrent.CountDownLatch.new(count)
end
# @!macro count_down_latch_method_wait
def wait(timeout = nil)
result = nil
if timeout.nil?
Synchronization::JRuby.sleep_interruptibly { @latch.await }
result = true
else
Synchronization::JRuby.sleep_interruptibly do
result = @latch.await(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS)
end
end
result
end
# @!macro count_down_latch_method_count_down
def count_down
@latch.countDown
end
# @!macro count_down_latch_method_count
def count
@latch.getCount
end
end
end
end

View File

@ -0,0 +1,37 @@
require 'concurrent/atomic/abstract_thread_local_var'
if Concurrent.on_jruby?
module Concurrent
# @!visibility private
# @!macro internal_implementation_note
class JavaThreadLocalVar < AbstractThreadLocalVar
# @!macro thread_local_var_method_get
def value
value = @var.get
if value.nil?
default
elsif value == NULL
nil
else
value
end
end
# @!macro thread_local_var_method_set
def value=(value)
@var.set(value)
end
protected
# @!visibility private
def allocate_storage
@var = java.lang.ThreadLocal.new
end
end
end
end

View File

@ -0,0 +1,62 @@
require 'concurrent/synchronization'
module Concurrent
# @!macro atomic_boolean
# @!visibility private
# @!macro internal_implementation_note
class MutexAtomicBoolean < Synchronization::LockableObject
# @!macro atomic_boolean_method_initialize
def initialize(initial = false)
super()
synchronize { ns_initialize(initial) }
end
# @!macro atomic_boolean_method_value_get
def value
synchronize { @value }
end
# @!macro atomic_boolean_method_value_set
def value=(value)
synchronize { @value = !!value }
end
# @!macro atomic_boolean_method_true_question
def true?
synchronize { @value }
end
# @!macro atomic_boolean_method_false_question
def false?
synchronize { !@value }
end
# @!macro atomic_boolean_method_make_true
def make_true
synchronize { ns_make_value(true) }
end
# @!macro atomic_boolean_method_make_false
def make_false
synchronize { ns_make_value(false) }
end
protected
# @!visibility private
def ns_initialize(initial)
@value = !!initial
end
private
# @!visibility private
def ns_make_value(value)
old = @value
@value = value
old != @value
end
end
end

View File

@ -0,0 +1,75 @@
require 'concurrent/synchronization'
require 'concurrent/utility/native_integer'
module Concurrent
# @!macro atomic_fixnum
# @!visibility private
# @!macro internal_implementation_note
class MutexAtomicFixnum < Synchronization::LockableObject
# @!macro atomic_fixnum_method_initialize
def initialize(initial = 0)
super()
synchronize { ns_initialize(initial) }
end
# @!macro atomic_fixnum_method_value_get
def value
synchronize { @value }
end
# @!macro atomic_fixnum_method_value_set
def value=(value)
synchronize { ns_set(value) }
end
# @!macro atomic_fixnum_method_increment
def increment(delta = 1)
synchronize { ns_set(@value + delta.to_i) }
end
alias_method :up, :increment
# @!macro atomic_fixnum_method_decrement
def decrement(delta = 1)
synchronize { ns_set(@value - delta.to_i) }
end
alias_method :down, :decrement
# @!macro atomic_fixnum_method_compare_and_set
def compare_and_set(expect, update)
synchronize do
if @value == expect.to_i
@value = update.to_i
true
else
false
end
end
end
# @!macro atomic_fixnum_method_update
def update
synchronize do
@value = yield @value
end
end
protected
# @!visibility private
def ns_initialize(initial)
ns_set(initial)
end
private
# @!visibility private
def ns_set(value)
Utility::NativeInteger.ensure_integer_and_bounds value
@value = value
end
end
end

View File

@ -0,0 +1,44 @@
require 'concurrent/synchronization'
require 'concurrent/utility/native_integer'
module Concurrent
# @!macro count_down_latch
# @!visibility private
# @!macro internal_implementation_note
class MutexCountDownLatch < Synchronization::LockableObject
# @!macro count_down_latch_method_initialize
def initialize(count = 1)
Utility::NativeInteger.ensure_integer_and_bounds count
Utility::NativeInteger.ensure_positive count
super()
synchronize { ns_initialize count }
end
# @!macro count_down_latch_method_wait
def wait(timeout = nil)
synchronize { ns_wait_until(timeout) { @count == 0 } }
end
# @!macro count_down_latch_method_count_down
def count_down
synchronize do
@count -= 1 if @count > 0
ns_broadcast if @count == 0
end
end
# @!macro count_down_latch_method_count
def count
synchronize { @count }
end
protected
def ns_initialize(count)
@count = count
end
end
end

View File

@ -0,0 +1,115 @@
require 'concurrent/synchronization'
require 'concurrent/utility/native_integer'
module Concurrent
# @!macro semaphore
# @!visibility private
# @!macro internal_implementation_note
class MutexSemaphore < Synchronization::LockableObject
# @!macro semaphore_method_initialize
def initialize(count)
Utility::NativeInteger.ensure_integer_and_bounds count
super()
synchronize { ns_initialize count }
end
# @!macro semaphore_method_acquire
def acquire(permits = 1)
Utility::NativeInteger.ensure_integer_and_bounds permits
Utility::NativeInteger.ensure_positive permits
synchronize do
try_acquire_timed(permits, nil)
nil
end
end
# @!macro semaphore_method_available_permits
def available_permits
synchronize { @free }
end
# @!macro semaphore_method_drain_permits
#
# Acquires and returns all permits that are immediately available.
#
# @return [Integer]
def drain_permits
synchronize do
@free.tap { |_| @free = 0 }
end
end
# @!macro semaphore_method_try_acquire
def try_acquire(permits = 1, timeout = nil)
Utility::NativeInteger.ensure_integer_and_bounds permits
Utility::NativeInteger.ensure_positive permits
synchronize do
if timeout.nil?
try_acquire_now(permits)
else
try_acquire_timed(permits, timeout)
end
end
end
# @!macro semaphore_method_release
def release(permits = 1)
Utility::NativeInteger.ensure_integer_and_bounds permits
Utility::NativeInteger.ensure_positive permits
synchronize do
@free += permits
permits.times { ns_signal }
end
nil
end
# Shrinks the number of available permits by the indicated reduction.
#
# @param [Fixnum] reduction Number of permits to remove.
#
# @raise [ArgumentError] if `reduction` is not an integer or is negative
#
# @raise [ArgumentError] if `@free` - `@reduction` is less than zero
#
# @return [nil]
#
# @!visibility private
def reduce_permits(reduction)
Utility::NativeInteger.ensure_integer_and_bounds reduction
Utility::NativeInteger.ensure_positive reduction
synchronize { @free -= reduction }
nil
end
protected
# @!visibility private
def ns_initialize(count)
@free = count
end
private
# @!visibility private
def try_acquire_now(permits)
if @free >= permits
@free -= permits
true
else
false
end
end
# @!visibility private
def try_acquire_timed(permits, timeout)
ns_wait_until(timeout) { try_acquire_now(permits) }
end
end
end

View File

@ -0,0 +1,254 @@
require 'thread'
require 'concurrent/atomic/atomic_fixnum'
require 'concurrent/errors'
require 'concurrent/synchronization'
module Concurrent
# Ruby read-write lock implementation
#
# Allows any number of concurrent readers, but only one concurrent writer
# (And if the "write" lock is taken, any readers who come along will have to wait)
#
# If readers are already active when a writer comes along, the writer will wait for
# all the readers to finish before going ahead.
# Any additional readers that come when the writer is already waiting, will also
# wait (so writers are not starved).
#
# This implementation is based on `java.util.concurrent.ReentrantReadWriteLock`.
#
# @example
# lock = Concurrent::ReadWriteLock.new
# lock.with_read_lock { data.retrieve }
# lock.with_write_lock { data.modify! }
#
# @note Do **not** try to acquire the write lock while already holding a read lock
# **or** try to acquire the write lock while you already have it.
# This will lead to deadlock
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock
class ReadWriteLock < Synchronization::Object
# @!visibility private
WAITING_WRITER = 1 << 15
# @!visibility private
RUNNING_WRITER = 1 << 29
# @!visibility private
MAX_READERS = WAITING_WRITER - 1
# @!visibility private
MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1
safe_initialization!
# Implementation notes:
# A goal is to make the uncontended path for both readers/writers lock-free
# Only if there is reader-writer or writer-writer contention, should locks be used
# Internal state is represented by a single integer ("counter"), and updated
# using atomic compare-and-swap operations
# When the counter is 0, the lock is free
# Each reader increments the counter by 1 when acquiring a read lock
# (and decrements by 1 when releasing the read lock)
# The counter is increased by (1 << 15) for each writer waiting to acquire the
# write lock, and by (1 << 29) if the write lock is taken
# Create a new `ReadWriteLock` in the unlocked state.
def initialize
super()
@Counter = AtomicFixnum.new(0) # single integer which represents lock state
@ReadLock = Synchronization::Lock.new
@WriteLock = Synchronization::Lock.new
end
# Execute a block operation within a read lock.
#
# @yield the task to be performed within the lock.
#
# @return [Object] the result of the block operation.
#
# @raise [ArgumentError] when no block is given.
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def with_read_lock
raise ArgumentError.new('no block given') unless block_given?
acquire_read_lock
begin
yield
ensure
release_read_lock
end
end
# Execute a block operation within a write lock.
#
# @yield the task to be performed within the lock.
#
# @return [Object] the result of the block operation.
#
# @raise [ArgumentError] when no block is given.
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def with_write_lock
raise ArgumentError.new('no block given') unless block_given?
acquire_write_lock
begin
yield
ensure
release_write_lock
end
end
# Acquire a read lock. If a write lock has been acquired will block until
# it is released. Will not block if other read locks have been acquired.
#
# @return [Boolean] true if the lock is successfully acquired
#
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def acquire_read_lock
while true
c = @Counter.value
raise ResourceLimitError.new('Too many reader threads') if max_readers?(c)
# If a writer is waiting when we first queue up, we need to wait
if waiting_writer?(c)
@ReadLock.wait_until { !waiting_writer? }
# after a reader has waited once, they are allowed to "barge" ahead of waiting writers
# but if a writer is *running*, the reader still needs to wait (naturally)
while true
c = @Counter.value
if running_writer?(c)
@ReadLock.wait_until { !running_writer? }
else
return if @Counter.compare_and_set(c, c+1)
end
end
else
break if @Counter.compare_and_set(c, c+1)
end
end
true
end
# Release a previously acquired read lock.
#
# @return [Boolean] true if the lock is successfully released
def release_read_lock
while true
c = @Counter.value
if @Counter.compare_and_set(c, c-1)
# If one or more writers were waiting, and we were the last reader, wake a writer up
if waiting_writer?(c) && running_readers(c) == 1
@WriteLock.signal
end
break
end
end
true
end
# Acquire a write lock. Will block and wait for all active readers and writers.
#
# @return [Boolean] true if the lock is successfully acquired
#
# @raise [Concurrent::ResourceLimitError] if the maximum number of writers
# is exceeded.
def acquire_write_lock
while true
c = @Counter.value
raise ResourceLimitError.new('Too many writer threads') if max_writers?(c)
if c == 0 # no readers OR writers running
# if we successfully swap the RUNNING_WRITER bit on, then we can go ahead
break if @Counter.compare_and_set(0, RUNNING_WRITER)
elsif @Counter.compare_and_set(c, c+WAITING_WRITER)
while true
# Now we have successfully incremented, so no more readers will be able to increment
# (they will wait instead)
# However, readers OR writers could decrement right here, OR another writer could increment
@WriteLock.wait_until do
# So we have to do another check inside the synchronized section
# If a writer OR reader is running, then go to sleep
c = @Counter.value
!running_writer?(c) && !running_readers?(c)
end
# We just came out of a wait
# If we successfully turn the RUNNING_WRITER bit on with an atomic swap,
# Then we are OK to stop waiting and go ahead
# Otherwise go back and wait again
c = @Counter.value
break if !running_writer?(c) && !running_readers?(c) && @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER)
end
break
end
end
true
end
# Release a previously acquired write lock.
#
# @return [Boolean] true if the lock is successfully released
def release_write_lock
return true unless running_writer?
c = @Counter.update { |counter| counter - RUNNING_WRITER }
@ReadLock.broadcast
@WriteLock.signal if waiting_writers(c) > 0
true
end
# Queries if the write lock is held by any thread.
#
# @return [Boolean] true if the write lock is held else false`
def write_locked?
@Counter.value >= RUNNING_WRITER
end
# Queries whether any threads are waiting to acquire the read or write lock.
#
# @return [Boolean] true if any threads are waiting for a lock else false
def has_waiters?
waiting_writer?(@Counter.value)
end
private
# @!visibility private
def running_readers(c = @Counter.value)
c & MAX_READERS
end
# @!visibility private
def running_readers?(c = @Counter.value)
(c & MAX_READERS) > 0
end
# @!visibility private
def running_writer?(c = @Counter.value)
c >= RUNNING_WRITER
end
# @!visibility private
def waiting_writers(c = @Counter.value)
(c & MAX_WRITERS) / WAITING_WRITER
end
# @!visibility private
def waiting_writer?(c = @Counter.value)
c >= WAITING_WRITER
end
# @!visibility private
def max_readers?(c = @Counter.value)
(c & MAX_READERS) == MAX_READERS
end
# @!visibility private
def max_writers?(c = @Counter.value)
(c & MAX_WRITERS) == MAX_WRITERS
end
end
end

View File

@ -0,0 +1,379 @@
require 'thread'
require 'concurrent/atomic/atomic_reference'
require 'concurrent/errors'
require 'concurrent/synchronization'
require 'concurrent/atomic/thread_local_var'
module Concurrent
# Re-entrant read-write lock implementation
#
# Allows any number of concurrent readers, but only one concurrent writer
# (And while the "write" lock is taken, no read locks can be obtained either.
# Hence, the write lock can also be called an "exclusive" lock.)
#
# If another thread has taken a read lock, any thread which wants a write lock
# will block until all the readers release their locks. However, once a thread
# starts waiting to obtain a write lock, any additional readers that come along
# will also wait (so writers are not starved).
#
# A thread can acquire both a read and write lock at the same time. A thread can
# also acquire a read lock OR a write lock more than once. Only when the read (or
# write) lock is released as many times as it was acquired, will the thread
# actually let it go, allowing other threads which might have been waiting
# to proceed. Therefore the lock can be upgraded by first acquiring
# read lock and then write lock and that the lock can be downgraded by first
# having both read and write lock a releasing just the write lock.
#
# If both read and write locks are acquired by the same thread, it is not strictly
# necessary to release them in the same order they were acquired. In other words,
# the following code is legal:
#
# @example
# lock = Concurrent::ReentrantReadWriteLock.new
# lock.acquire_write_lock
# lock.acquire_read_lock
# lock.release_write_lock
# # At this point, the current thread is holding only a read lock, not a write
# # lock. So other threads can take read locks, but not a write lock.
# lock.release_read_lock
# # Now the current thread is not holding either a read or write lock, so
# # another thread could potentially acquire a write lock.
#
# This implementation was inspired by `java.util.concurrent.ReentrantReadWriteLock`.
#
# @example
# lock = Concurrent::ReentrantReadWriteLock.new
# lock.with_read_lock { data.retrieve }
# lock.with_write_lock { data.modify! }
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock
class ReentrantReadWriteLock < Synchronization::Object
# Implementation notes:
#
# A goal is to make the uncontended path for both readers/writers mutex-free
# Only if there is reader-writer or writer-writer contention, should mutexes be used
# Otherwise, a single CAS operation is all we need to acquire/release a lock
#
# Internal state is represented by a single integer ("counter"), and updated
# using atomic compare-and-swap operations
# When the counter is 0, the lock is free
# Each thread which has one OR MORE read locks increments the counter by 1
# (and decrements by 1 when releasing the read lock)
# The counter is increased by (1 << 15) for each writer waiting to acquire the
# write lock, and by (1 << 29) if the write lock is taken
#
# Additionally, each thread uses a thread-local variable to count how many times
# it has acquired a read lock, AND how many times it has acquired a write lock.
# It uses a similar trick; an increment of 1 means a read lock was taken, and
# an increment of (1 << 15) means a write lock was taken
# This is what makes re-entrancy possible
#
# 2 rules are followed to ensure good liveness properties:
# 1) Once a writer has queued up and is waiting for a write lock, no other thread
# can take a lock without waiting
# 2) When a write lock is released, readers are given the "first chance" to wake
# up and acquire a read lock
# Following these rules means readers and writers tend to "take turns", so neither
# can starve the other, even under heavy contention
# @!visibility private
READER_BITS = 15
# @!visibility private
WRITER_BITS = 14
# Used with @Counter:
# @!visibility private
WAITING_WRITER = 1 << READER_BITS
# @!visibility private
RUNNING_WRITER = 1 << (READER_BITS + WRITER_BITS)
# @!visibility private
MAX_READERS = WAITING_WRITER - 1
# @!visibility private
MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1
# Used with @HeldCount:
# @!visibility private
WRITE_LOCK_HELD = 1 << READER_BITS
# @!visibility private
READ_LOCK_MASK = WRITE_LOCK_HELD - 1
# @!visibility private
WRITE_LOCK_MASK = MAX_WRITERS
safe_initialization!
# Create a new `ReentrantReadWriteLock` in the unlocked state.
def initialize
super()
@Counter = AtomicFixnum.new(0) # single integer which represents lock state
@ReadQueue = Synchronization::Lock.new # used to queue waiting readers
@WriteQueue = Synchronization::Lock.new # used to queue waiting writers
@HeldCount = ThreadLocalVar.new(0) # indicates # of R & W locks held by this thread
end
# Execute a block operation within a read lock.
#
# @yield the task to be performed within the lock.
#
# @return [Object] the result of the block operation.
#
# @raise [ArgumentError] when no block is given.
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def with_read_lock
raise ArgumentError.new('no block given') unless block_given?
acquire_read_lock
begin
yield
ensure
release_read_lock
end
end
# Execute a block operation within a write lock.
#
# @yield the task to be performed within the lock.
#
# @return [Object] the result of the block operation.
#
# @raise [ArgumentError] when no block is given.
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def with_write_lock
raise ArgumentError.new('no block given') unless block_given?
acquire_write_lock
begin
yield
ensure
release_write_lock
end
end
# Acquire a read lock. If a write lock is held by another thread, will block
# until it is released.
#
# @return [Boolean] true if the lock is successfully acquired
#
# @raise [Concurrent::ResourceLimitError] if the maximum number of readers
# is exceeded.
def acquire_read_lock
if (held = @HeldCount.value) > 0
# If we already have a lock, there's no need to wait
if held & READ_LOCK_MASK == 0
# But we do need to update the counter, if we were holding a write
# lock but not a read lock
@Counter.update { |c| c + 1 }
end
@HeldCount.value = held + 1
return true
end
while true
c = @Counter.value
raise ResourceLimitError.new('Too many reader threads') if max_readers?(c)
# If a writer is waiting OR running when we first queue up, we need to wait
if waiting_or_running_writer?(c)
# Before going to sleep, check again with the ReadQueue mutex held
@ReadQueue.synchronize do
@ReadQueue.ns_wait if waiting_or_running_writer?
end
# Note: the above 'synchronize' block could have used #wait_until,
# but that waits repeatedly in a loop, checking the wait condition
# each time it wakes up (to protect against spurious wakeups)
# But we are already in a loop, which is only broken when we successfully
# acquire the lock! So we don't care about spurious wakeups, and would
# rather not pay the extra overhead of using #wait_until
# After a reader has waited once, they are allowed to "barge" ahead of waiting writers
# But if a writer is *running*, the reader still needs to wait (naturally)
while true
c = @Counter.value
if running_writer?(c)
@ReadQueue.synchronize do
@ReadQueue.ns_wait if running_writer?
end
elsif @Counter.compare_and_set(c, c+1)
@HeldCount.value = held + 1
return true
end
end
elsif @Counter.compare_and_set(c, c+1)
@HeldCount.value = held + 1
return true
end
end
end
# Try to acquire a read lock and return true if we succeed. If it cannot be
# acquired immediately, return false.
#
# @return [Boolean] true if the lock is successfully acquired
def try_read_lock
if (held = @HeldCount.value) > 0
if held & READ_LOCK_MASK == 0
# If we hold a write lock, but not a read lock...
@Counter.update { |c| c + 1 }
end
@HeldCount.value = held + 1
return true
else
c = @Counter.value
if !waiting_or_running_writer?(c) && @Counter.compare_and_set(c, c+1)
@HeldCount.value = held + 1
return true
end
end
false
end
# Release a previously acquired read lock.
#
# @return [Boolean] true if the lock is successfully released
def release_read_lock
held = @HeldCount.value = @HeldCount.value - 1
rlocks_held = held & READ_LOCK_MASK
if rlocks_held == 0
c = @Counter.update { |counter| counter - 1 }
# If one or more writers were waiting, and we were the last reader, wake a writer up
if waiting_or_running_writer?(c) && running_readers(c) == 0
@WriteQueue.signal
end
elsif rlocks_held == READ_LOCK_MASK
raise IllegalOperationError, "Cannot release a read lock which is not held"
end
true
end
# Acquire a write lock. Will block and wait for all active readers and writers.
#
# @return [Boolean] true if the lock is successfully acquired
#
# @raise [Concurrent::ResourceLimitError] if the maximum number of writers
# is exceeded.
def acquire_write_lock
if (held = @HeldCount.value) >= WRITE_LOCK_HELD
# if we already have a write (exclusive) lock, there's no need to wait
@HeldCount.value = held + WRITE_LOCK_HELD
return true
end
while true
c = @Counter.value
raise ResourceLimitError.new('Too many writer threads') if max_writers?(c)
# To go ahead and take the lock without waiting, there must be no writer
# running right now, AND no writers who came before us still waiting to
# acquire the lock
# Additionally, if any read locks have been taken, we must hold all of them
if c == held
# If we successfully swap the RUNNING_WRITER bit on, then we can go ahead
if @Counter.compare_and_set(c, c+RUNNING_WRITER)
@HeldCount.value = held + WRITE_LOCK_HELD
return true
end
elsif @Counter.compare_and_set(c, c+WAITING_WRITER)
while true
# Now we have successfully incremented, so no more readers will be able to increment
# (they will wait instead)
# However, readers OR writers could decrement right here
@WriteQueue.synchronize do
# So we have to do another check inside the synchronized section
# If a writer OR another reader is running, then go to sleep
c = @Counter.value
@WriteQueue.ns_wait if running_writer?(c) || running_readers(c) != held
end
# Note: if you are thinking of replacing the above 'synchronize' block
# with #wait_until, read the comment in #acquire_read_lock first!
# We just came out of a wait
# If we successfully turn the RUNNING_WRITER bit on with an atomic swap,
# then we are OK to stop waiting and go ahead
# Otherwise go back and wait again
c = @Counter.value
if !running_writer?(c) &&
running_readers(c) == held &&
@Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER)
@HeldCount.value = held + WRITE_LOCK_HELD
return true
end
end
end
end
end
# Try to acquire a write lock and return true if we succeed. If it cannot be
# acquired immediately, return false.
#
# @return [Boolean] true if the lock is successfully acquired
def try_write_lock
if (held = @HeldCount.value) >= WRITE_LOCK_HELD
@HeldCount.value = held + WRITE_LOCK_HELD
return true
else
c = @Counter.value
if !waiting_or_running_writer?(c) &&
running_readers(c) == held &&
@Counter.compare_and_set(c, c+RUNNING_WRITER)
@HeldCount.value = held + WRITE_LOCK_HELD
return true
end
end
false
end
# Release a previously acquired write lock.
#
# @return [Boolean] true if the lock is successfully released
def release_write_lock
held = @HeldCount.value = @HeldCount.value - WRITE_LOCK_HELD
wlocks_held = held & WRITE_LOCK_MASK
if wlocks_held == 0
c = @Counter.update { |counter| counter - RUNNING_WRITER }
@ReadQueue.broadcast
@WriteQueue.signal if waiting_writers(c) > 0
elsif wlocks_held == WRITE_LOCK_MASK
raise IllegalOperationError, "Cannot release a write lock which is not held"
end
true
end
private
# @!visibility private
def running_readers(c = @Counter.value)
c & MAX_READERS
end
# @!visibility private
def running_readers?(c = @Counter.value)
(c & MAX_READERS) > 0
end
# @!visibility private
def running_writer?(c = @Counter.value)
c >= RUNNING_WRITER
end
# @!visibility private
def waiting_writers(c = @Counter.value)
(c & MAX_WRITERS) >> READER_BITS
end
# @!visibility private
def waiting_or_running_writer?(c = @Counter.value)
c >= WAITING_WRITER
end
# @!visibility private
def max_readers?(c = @Counter.value)
(c & MAX_READERS) == MAX_READERS
end
# @!visibility private
def max_writers?(c = @Counter.value)
(c & MAX_WRITERS) == MAX_WRITERS
end
end
end

View File

@ -0,0 +1,161 @@
require 'thread'
require 'concurrent/atomic/abstract_thread_local_var'
module Concurrent
# @!visibility private
# @!macro internal_implementation_note
class RubyThreadLocalVar < AbstractThreadLocalVar
# Each thread has a (lazily initialized) array of thread-local variable values
# Each time a new thread-local var is created, we allocate an "index" for it
# For example, if the allocated index is 1, that means slot #1 in EVERY
# thread's thread-local array will be used for the value of that TLV
#
# The good thing about using a per-THREAD structure to hold values, rather
# than a per-TLV structure, is that no synchronization is needed when
# reading and writing those values (since the structure is only ever
# accessed by a single thread)
#
# Of course, when a TLV is GC'd, 1) we need to recover its index for use
# by other new TLVs (otherwise the thread-local arrays could get bigger
# and bigger with time), and 2) we need to null out all the references
# held in the now-unused slots (both to avoid blocking GC of those objects,
# and also to prevent "stale" values from being passed on to a new TLV
# when the index is reused)
# Because we need to null out freed slots, we need to keep references to
# ALL the thread-local arrays -- ARRAYS is for that
# But when a Thread is GC'd, we need to drop the reference to its thread-local
# array, so we don't leak memory
# @!visibility private
FREE = []
LOCK = Mutex.new
ARRAYS = {} # used as a hash set
@@next = 0
private_constant :FREE, :LOCK, :ARRAYS
# @!macro thread_local_var_method_get
def value
if array = get_threadlocal_array
value = array[@index]
if value.nil?
default
elsif value.equal?(NULL)
nil
else
value
end
else
default
end
end
# @!macro thread_local_var_method_set
def value=(value)
me = Thread.current
# We could keep the thread-local arrays in a hash, keyed by Thread
# But why? That would require locking
# Using Ruby's built-in thread-local storage is faster
unless array = get_threadlocal_array(me)
array = set_threadlocal_array([], me)
LOCK.synchronize { ARRAYS[array.object_id] = array }
ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array))
end
array[@index] = (value.nil? ? NULL : value)
value
end
protected
# @!visibility private
def allocate_storage
@index = LOCK.synchronize do
FREE.pop || begin
result = @@next
@@next += 1
result
end
end
ObjectSpace.define_finalizer(self, self.class.threadlocal_finalizer(@index))
end
# @!visibility private
def self.threadlocal_finalizer(index)
proc do
Thread.new do # avoid error: can't be called from trap context
LOCK.synchronize do
FREE.push(index)
# The cost of GC'ing a TLV is linear in the number of threads using TLVs
# But that is natural! More threads means more storage is used per TLV
# So naturally more CPU time is required to free more storage
ARRAYS.each_value do |array|
array[index] = nil
end
end
end
end
end
# @!visibility private
def self.thread_finalizer(array)
proc do
Thread.new do # avoid error: can't be called from trap context
LOCK.synchronize do
# The thread which used this thread-local array is now gone
# So don't hold onto a reference to the array (thus blocking GC)
ARRAYS.delete(array.object_id)
end
end
end
end
private
if Thread.instance_methods.include?(:thread_variable_get)
def get_threadlocal_array(thread = Thread.current)
thread.thread_variable_get(:__threadlocal_array__)
end
def set_threadlocal_array(array, thread = Thread.current)
thread.thread_variable_set(:__threadlocal_array__, array)
end
else
def get_threadlocal_array(thread = Thread.current)
thread[:__threadlocal_array__]
end
def set_threadlocal_array(array, thread = Thread.current)
thread[:__threadlocal_array__] = array
end
end
# This exists only for use in testing
# @!visibility private
def value_for(thread)
if array = get_threadlocal_array(thread)
value = array[@index]
if value.nil?
default_for(thread)
elsif value.equal?(NULL)
nil
else
value
end
else
default_for(thread)
end
end
def default_for(thread)
if @default_block
raise "Cannot use default_for with default block"
else
@default
end
end
end
end

View File

@ -0,0 +1,145 @@
require 'concurrent/atomic/mutex_semaphore'
require 'concurrent/synchronization'
module Concurrent
###################################################################
# @!macro semaphore_method_initialize
#
# Create a new `Semaphore` with the initial `count`.
#
# @param [Fixnum] count the initial count
#
# @raise [ArgumentError] if `count` is not an integer or is less than zero
# @!macro semaphore_method_acquire
#
# Acquires the given number of permits from this semaphore,
# blocking until all are available.
#
# @param [Fixnum] permits Number of permits to acquire
#
# @raise [ArgumentError] if `permits` is not an integer or is less than
# one
#
# @return [nil]
# @!macro semaphore_method_available_permits
#
# Returns the current number of permits available in this semaphore.
#
# @return [Integer]
# @!macro semaphore_method_drain_permits
#
# Acquires and returns all permits that are immediately available.
#
# @return [Integer]
# @!macro semaphore_method_try_acquire
#
# Acquires the given number of permits from this semaphore,
# only if all are available at the time of invocation or within
# `timeout` interval
#
# @param [Fixnum] permits the number of permits to acquire
#
# @param [Fixnum] timeout the number of seconds to wait for the counter
# or `nil` to return immediately
#
# @raise [ArgumentError] if `permits` is not an integer or is less than
# one
#
# @return [Boolean] `false` if no permits are available, `true` when
# acquired a permit
# @!macro semaphore_method_release
#
# Releases the given number of permits, returning them to the semaphore.
#
# @param [Fixnum] permits Number of permits to return to the semaphore.
#
# @raise [ArgumentError] if `permits` is not a number or is less than one
#
# @return [nil]
###################################################################
# @!macro semaphore_public_api
#
# @!method initialize(count)
# @!macro semaphore_method_initialize
#
# @!method acquire(permits = 1)
# @!macro semaphore_method_acquire
#
# @!method available_permits
# @!macro semaphore_method_available_permits
#
# @!method drain_permits
# @!macro semaphore_method_drain_permits
#
# @!method try_acquire(permits = 1, timeout = nil)
# @!macro semaphore_method_try_acquire
#
# @!method release(permits = 1)
# @!macro semaphore_method_release
###################################################################
# @!visibility private
# @!macro internal_implementation_note
SemaphoreImplementation = case
when defined?(JavaSemaphore)
JavaSemaphore
else
MutexSemaphore
end
private_constant :SemaphoreImplementation
# @!macro semaphore
#
# A counting semaphore. Conceptually, a semaphore maintains a set of
# permits. Each {#acquire} blocks if necessary until a permit is
# available, and then takes it. Each {#release} adds a permit, potentially
# releasing a blocking acquirer.
# However, no actual permit objects are used; the Semaphore just keeps a
# count of the number available and acts accordingly.
#
# @!macro semaphore_public_api
# @example
# semaphore = Concurrent::Semaphore.new(2)
#
# t1 = Thread.new do
# semaphore.acquire
# puts "Thread 1 acquired semaphore"
# end
#
# t2 = Thread.new do
# semaphore.acquire
# puts "Thread 2 acquired semaphore"
# end
#
# t3 = Thread.new do
# semaphore.acquire
# puts "Thread 3 acquired semaphore"
# end
#
# t4 = Thread.new do
# sleep(2)
# puts "Thread 4 releasing semaphore"
# semaphore.release
# end
#
# [t1, t2, t3, t4].each(&:join)
#
# # prints:
# # Thread 3 acquired semaphore
# # Thread 2 acquired semaphore
# # Thread 4 releasing semaphore
# # Thread 1 acquired semaphore
#
class Semaphore < SemaphoreImplementation
end
end

View File

@ -0,0 +1,104 @@
require 'concurrent/atomic/ruby_thread_local_var'
require 'concurrent/atomic/java_thread_local_var'
require 'concurrent/utility/engine'
module Concurrent
###################################################################
# @!macro thread_local_var_method_initialize
#
# Creates a thread local variable.
#
# @param [Object] default the default value when otherwise unset
# @param [Proc] default_block Optional block that gets called to obtain the
# default value for each thread
# @!macro thread_local_var_method_get
#
# Returns the value in the current thread's copy of this thread-local variable.
#
# @return [Object] the current value
# @!macro thread_local_var_method_set
#
# Sets the current thread's copy of this thread-local variable to the specified value.
#
# @param [Object] value the value to set
# @return [Object] the new value
# @!macro thread_local_var_method_bind
#
# Bind the given value to thread local storage during
# execution of the given block.
#
# @param [Object] value the value to bind
# @yield the operation to be performed with the bound variable
# @return [Object] the value
###################################################################
# @!macro thread_local_var_public_api
#
# @!method initialize(default = nil, &default_block)
# @!macro thread_local_var_method_initialize
#
# @!method value
# @!macro thread_local_var_method_get
#
# @!method value=(value)
# @!macro thread_local_var_method_set
#
# @!method bind(value, &block)
# @!macro thread_local_var_method_bind
###################################################################
# @!visibility private
# @!macro internal_implementation_note
ThreadLocalVarImplementation = case
when Concurrent.on_jruby?
JavaThreadLocalVar
else
RubyThreadLocalVar
end
private_constant :ThreadLocalVarImplementation
# @!macro thread_local_var
#
# A `ThreadLocalVar` is a variable where the value is different for each thread.
# Each variable may have a default value, but when you modify the variable only
# the current thread will ever see that change.
#
# @!macro thread_safe_variable_comparison
#
# @example
# v = ThreadLocalVar.new(14)
# v.value #=> 14
# v.value = 2
# v.value #=> 2
#
# @example
# v = ThreadLocalVar.new(14)
#
# t1 = Thread.new do
# v.value #=> 14
# v.value = 1
# v.value #=> 1
# end
#
# t2 = Thread.new do
# v.value #=> 14
# v.value = 2
# v.value #=> 2
# end
#
# v.value #=> 14
#
# @see https://docs.oracle.com/javase/7/docs/api/java/lang/ThreadLocal.html Java ThreadLocal
#
# @!macro thread_local_var_public_api
class ThreadLocalVar < ThreadLocalVarImplementation
end
end

View File

@ -0,0 +1,56 @@
module Concurrent
# @!visibility private
# @!macro internal_implementation_note
class MutexAtomicReference < Synchronization::LockableObject
include AtomicDirectUpdate
include AtomicNumericCompareAndSetWrapper
alias_method :compare_and_swap, :compare_and_set
# @!macro atomic_reference_method_initialize
def initialize(value = nil)
super()
synchronize { ns_initialize(value) }
end
# @!macro atomic_reference_method_get
def get
synchronize { @value }
end
alias_method :value, :get
# @!macro atomic_reference_method_set
def set(new_value)
synchronize { @value = new_value }
end
alias_method :value=, :set
# @!macro atomic_reference_method_get_and_set
def get_and_set(new_value)
synchronize do
old_value = @value
@value = new_value
old_value
end
end
alias_method :swap, :get_and_set
# @!macro atomic_reference_method_compare_and_set
def _compare_and_set(old_value, new_value)
synchronize do
if @value.equal? old_value
@value = new_value
true
else
false
end
end
end
protected
def ns_initialize(value)
@value = value
end
end
end

View File

@ -0,0 +1,28 @@
module Concurrent
# Special "compare and set" handling of numeric values.
#
# @!visibility private
# @!macro internal_implementation_note
module AtomicNumericCompareAndSetWrapper
# @!macro atomic_reference_method_compare_and_set
def compare_and_set(old_value, new_value)
if old_value.kind_of? Numeric
while true
old = get
return false unless old.kind_of? Numeric
return false unless old == old_value
result = _compare_and_set(old, new_value)
return result if result
end
else
_compare_and_set(old_value, new_value)
end
end
end
end

View File

@ -0,0 +1,10 @@
require 'concurrent/atomic/atomic_reference'
require 'concurrent/atomic/atomic_boolean'
require 'concurrent/atomic/atomic_fixnum'
require 'concurrent/atomic/cyclic_barrier'
require 'concurrent/atomic/count_down_latch'
require 'concurrent/atomic/event'
require 'concurrent/atomic/read_write_lock'
require 'concurrent/atomic/reentrant_read_write_lock'
require 'concurrent/atomic/semaphore'
require 'concurrent/atomic/thread_local_var'

View File

@ -0,0 +1,107 @@
require 'concurrent/synchronization'
module Concurrent
module Collection
# A thread safe observer set implemented using copy-on-read approach:
# observers are added and removed from a thread safe collection; every time
# a notification is required the internal data structure is copied to
# prevent concurrency issues
#
# @api private
class CopyOnNotifyObserverSet < Synchronization::LockableObject
def initialize
super()
synchronize { ns_initialize }
end
# @!macro observable_add_observer
def add_observer(observer = nil, func = :update, &block)
if observer.nil? && block.nil?
raise ArgumentError, 'should pass observer as a first argument or block'
elsif observer && block
raise ArgumentError.new('cannot provide both an observer and a block')
end
if block
observer = block
func = :call
end
synchronize do
@observers[observer] = func
observer
end
end
# @!macro observable_delete_observer
def delete_observer(observer)
synchronize do
@observers.delete(observer)
observer
end
end
# @!macro observable_delete_observers
def delete_observers
synchronize do
@observers.clear
self
end
end
# @!macro observable_count_observers
def count_observers
synchronize { @observers.count }
end
# Notifies all registered observers with optional args
# @param [Object] args arguments to be passed to each observer
# @return [CopyOnWriteObserverSet] self
def notify_observers(*args, &block)
observers = duplicate_observers
notify_to(observers, *args, &block)
self
end
# Notifies all registered observers with optional args and deletes them.
#
# @param [Object] args arguments to be passed to each observer
# @return [CopyOnWriteObserverSet] self
def notify_and_delete_observers(*args, &block)
observers = duplicate_and_clear_observers
notify_to(observers, *args, &block)
self
end
protected
def ns_initialize
@observers = {}
end
private
def duplicate_and_clear_observers
synchronize do
observers = @observers.dup
@observers.clear
observers
end
end
def duplicate_observers
synchronize { @observers.dup }
end
def notify_to(observers, *args)
raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty?
observers.each do |observer, function|
args = yield if block_given?
observer.send(function, *args)
end
end
end
end
end

View File

@ -0,0 +1,111 @@
require 'concurrent/synchronization'
module Concurrent
module Collection
# A thread safe observer set implemented using copy-on-write approach:
# every time an observer is added or removed the whole internal data structure is
# duplicated and replaced with a new one.
#
# @api private
class CopyOnWriteObserverSet < Synchronization::LockableObject
def initialize
super()
synchronize { ns_initialize }
end
# @!macro observable_add_observer
def add_observer(observer = nil, func = :update, &block)
if observer.nil? && block.nil?
raise ArgumentError, 'should pass observer as a first argument or block'
elsif observer && block
raise ArgumentError.new('cannot provide both an observer and a block')
end
if block
observer = block
func = :call
end
synchronize do
new_observers = @observers.dup
new_observers[observer] = func
@observers = new_observers
observer
end
end
# @!macro observable_delete_observer
def delete_observer(observer)
synchronize do
new_observers = @observers.dup
new_observers.delete(observer)
@observers = new_observers
observer
end
end
# @!macro observable_delete_observers
def delete_observers
self.observers = {}
self
end
# @!macro observable_count_observers
def count_observers
observers.count
end
# Notifies all registered observers with optional args
# @param [Object] args arguments to be passed to each observer
# @return [CopyOnWriteObserverSet] self
def notify_observers(*args, &block)
notify_to(observers, *args, &block)
self
end
# Notifies all registered observers with optional args and deletes them.
#
# @param [Object] args arguments to be passed to each observer
# @return [CopyOnWriteObserverSet] self
def notify_and_delete_observers(*args, &block)
old = clear_observers_and_return_old
notify_to(old, *args, &block)
self
end
protected
def ns_initialize
@observers = {}
end
private
def notify_to(observers, *args)
raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty?
observers.each do |observer, function|
args = yield if block_given?
observer.send(function, *args)
end
end
def observers
synchronize { @observers }
end
def observers=(new_set)
synchronize { @observers = new_set }
end
def clear_observers_and_return_old
synchronize do
old_observers = @observers
@observers = {}
old_observers
end
end
end
end
end

View File

@ -0,0 +1,84 @@
if Concurrent.on_jruby?
module Concurrent
module Collection
# @!macro priority_queue
#
# @!visibility private
# @!macro internal_implementation_note
class JavaNonConcurrentPriorityQueue
# @!macro priority_queue_method_initialize
def initialize(opts = {})
order = opts.fetch(:order, :max)
if [:min, :low].include?(order)
@queue = java.util.PriorityQueue.new(11) # 11 is the default initial capacity
else
@queue = java.util.PriorityQueue.new(11, java.util.Collections.reverseOrder())
end
end
# @!macro priority_queue_method_clear
def clear
@queue.clear
true
end
# @!macro priority_queue_method_delete
def delete(item)
found = false
while @queue.remove(item) do
found = true
end
found
end
# @!macro priority_queue_method_empty
def empty?
@queue.size == 0
end
# @!macro priority_queue_method_include
def include?(item)
@queue.contains(item)
end
alias_method :has_priority?, :include?
# @!macro priority_queue_method_length
def length
@queue.size
end
alias_method :size, :length
# @!macro priority_queue_method_peek
def peek
@queue.peek
end
# @!macro priority_queue_method_pop
def pop
@queue.poll
end
alias_method :deq, :pop
alias_method :shift, :pop
# @!macro priority_queue_method_push
def push(item)
raise ArgumentError.new('cannot enqueue nil') if item.nil?
@queue.add(item)
end
alias_method :<<, :push
alias_method :enq, :push
# @!macro priority_queue_method_from_list
def self.from_list(list, opts = {})
queue = new(opts)
list.each{|item| queue << item }
queue
end
end
end
end
end

View File

@ -0,0 +1,158 @@
module Concurrent
# @!macro warn.edge
class LockFreeStack < Synchronization::Object
safe_initialization!
class Node
# TODO (pitr-ch 20-Dec-2016): Could be unified with Stack class?
# @return [Node]
attr_reader :next_node
# @return [Object]
attr_reader :value
# @!visibility private
# allow to nil-ify to free GC when the entry is no longer relevant, not synchronised
attr_writer :value
def initialize(value, next_node)
@value = value
@next_node = next_node
end
singleton_class.send :alias_method, :[], :new
end
# The singleton for empty node
EMPTY = Node[nil, nil]
def EMPTY.next_node
self
end
attr_atomic(:head)
private :head, :head=, :swap_head, :compare_and_set_head, :update_head
# @!visibility private
def self.of1(value)
new Node[value, EMPTY]
end
# @!visibility private
def self.of2(value1, value2)
new Node[value1, Node[value2, EMPTY]]
end
# @param [Node] head
def initialize(head = EMPTY)
super()
self.head = head
end
# @param [Node] head
# @return [true, false]
def empty?(head = self.head)
head.equal? EMPTY
end
# @param [Node] head
# @param [Object] value
# @return [true, false]
def compare_and_push(head, value)
compare_and_set_head head, Node[value, head]
end
# @param [Object] value
# @return [self]
def push(value)
while true
current_head = head
return self if compare_and_set_head current_head, Node[value, current_head]
end
end
# @return [Node]
def peek
head
end
# @param [Node] head
# @return [true, false]
def compare_and_pop(head)
compare_and_set_head head, head.next_node
end
# @return [Object]
def pop
while true
current_head = head
return current_head.value if compare_and_set_head current_head, current_head.next_node
end
end
# @param [Node] head
# @return [true, false]
def compare_and_clear(head)
compare_and_set_head head, EMPTY
end
include Enumerable
# @param [Node] head
# @return [self]
def each(head = nil)
return to_enum(:each, head) unless block_given?
it = head || peek
until it.equal?(EMPTY)
yield it.value
it = it.next_node
end
self
end
# @return [true, false]
def clear
while true
current_head = head
return false if current_head == EMPTY
return true if compare_and_set_head current_head, EMPTY
end
end
# @param [Node] head
# @return [true, false]
def clear_if(head)
compare_and_set_head head, EMPTY
end
# @param [Node] head
# @param [Node] new_head
# @return [true, false]
def replace_if(head, new_head)
compare_and_set_head head, new_head
end
# @return [self]
# @yield over the cleared stack
# @yieldparam [Object] value
def clear_each(&block)
while true
current_head = head
return self if current_head == EMPTY
if compare_and_set_head current_head, EMPTY
each current_head, &block
return self
end
end
end
# @return [String] Short string representation.
def to_s
format '%s %s>', super[0..-2], to_a.to_s
end
alias_method :inspect, :to_s
end
end

View File

@ -0,0 +1,927 @@
require 'concurrent/constants'
require 'concurrent/thread_safe/util'
require 'concurrent/thread_safe/util/adder'
require 'concurrent/thread_safe/util/cheap_lockable'
require 'concurrent/thread_safe/util/power_of_two_tuple'
require 'concurrent/thread_safe/util/volatile'
require 'concurrent/thread_safe/util/xor_shift_random'
module Concurrent
# @!visibility private
module Collection
# A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59
# available in public domain.
#
# Original source code available here:
# http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59
#
# The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose
# size exceeds a threshold).
#
# A hash table supporting full concurrency of retrievals and high expected
# concurrency for updates. However, even though all operations are
# thread-safe, retrieval operations do _not_ entail locking, and there is
# _not_ any support for locking the entire table in a way that prevents all
# access.
#
# Retrieval operations generally do not block, so may overlap with update
# operations. Retrievals reflect the results of the most recently _completed_
# update operations holding upon their onset. (More formally, an update
# operation for a given key bears a _happens-before_ relation with any (non
# +nil+) retrieval for that key reporting the updated value.) For aggregate
# operations such as +clear()+, concurrent retrievals may reflect insertion or
# removal of only some entries. Similarly, the +each_pair+ iterator yields
# elements reflecting the state of the hash table at some point at or since
# the start of the +each_pair+. Bear in mind that the results of aggregate
# status methods including +size()+ and +empty?+} are typically useful only
# when a map is not undergoing concurrent updates in other threads. Otherwise
# the results of these methods reflect transient states that may be adequate
# for monitoring or estimation purposes, but not for program control.
#
# The table is dynamically expanded when there are too many collisions (i.e.,
# keys that have distinct hash codes but fall into the same slot modulo the
# table size), with the expected average effect of maintaining roughly two
# bins per mapping (corresponding to a 0.75 load factor threshold for
# resizing). There may be much variance around this average as mappings are
# added and removed, but overall, this maintains a commonly accepted
# time/space tradeoff for hash tables. However, resizing this or any other
# kind of hash table may be a relatively slow operation. When possible, it is
# a good idea to provide a size estimate as an optional :initial_capacity
# initializer argument. An additional optional :load_factor constructor
# argument provides a further means of customizing initial table capacity by
# specifying the table density to be used in calculating the amount of space
# to allocate for the given number of elements. Note that using many keys with
# exactly the same +hash+ is a sure way to slow down performance of any hash
# table.
#
# ## Design overview
#
# The primary design goal of this hash table is to maintain concurrent
# readability (typically method +[]+, but also iteration and related methods)
# while minimizing update contention. Secondary goals are to keep space
# consumption about the same or better than plain +Hash+, and to support high
# initial insertion rates on an empty table by many threads.
#
# Each key-value mapping is held in a +Node+. The validation-based approach
# explained below leads to a lot of code sprawl because retry-control
# precludes factoring into smaller methods.
#
# The table is lazily initialized to a power-of-two size upon the first
# insertion. Each bin in the table normally contains a list of +Node+s (most
# often, the list has only zero or one +Node+). Table accesses require
# volatile/atomic reads, writes, and CASes. The lists of nodes within bins are
# always accurately traversable under volatile reads, so long as lookups check
# hash code and non-nullness of value before checking key equality.
#
# We use the top two bits of +Node+ hash fields for control purposes -- they
# are available anyway because of addressing constraints. As explained further
# below, these top bits are used as follows:
#
# - 00 - Normal
# - 01 - Locked
# - 11 - Locked and may have a thread waiting for lock
# - 10 - +Node+ is a forwarding node
#
# The lower 28 bits of each +Node+'s hash field contain a the key's hash code,
# except for forwarding nodes, for which the lower bits are zero (and so
# always have hash field == +MOVED+).
#
# Insertion (via +[]=+ or its variants) of the first node in an empty bin is
# performed by just CASing it to the bin. This is by far the most common case
# for put operations under most key/hash distributions. Other update
# operations (insert, delete, and replace) require locks. We do not want to
# waste the space required to associate a distinct lock object with each bin,
# so instead use the first node of a bin list itself as a lock. Blocking
# support for these locks relies +Concurrent::ThreadSafe::Util::CheapLockable. However, we also need a
# +try_lock+ construction, so we overlay these by using bits of the +Node+
# hash field for lock control (see above), and so normally use builtin
# monitors only for blocking and signalling using
# +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+.
#
# Using the first node of a list as a lock does not by itself suffice though:
# When a node is locked, any update must first validate that it is still the
# first node after locking it, and retry if not. Because new nodes are always
# appended to lists, once a node is first in a bin, it remains first until
# deleted or the bin becomes invalidated (upon resizing). However, operations
# that only conditionally update may inspect nodes until the point of update.
# This is a converse of sorts to the lazy locking technique described by
# Herlihy & Shavit.
#
# The main disadvantage of per-bin locks is that other update operations on
# other nodes in a bin list protected by the same lock can stall, for example
# when user +eql?+ or mapping functions take a long time. However,
# statistically, under random hash codes, this is not a common problem.
# Ideally, the frequency of nodes in bins follows a Poisson distribution
# (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of
# about 0.5 on average, given the resizing threshold of 0.75, although with a
# large variance because of resizing granularity. Ignoring variance, the
# expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
# factorial(k)). The first values are:
#
# - 0: 0.60653066
# - 1: 0.30326533
# - 2: 0.07581633
# - 3: 0.01263606
# - 4: 0.00157952
# - 5: 0.00015795
# - 6: 0.00001316
# - 7: 0.00000094
# - 8: 0.00000006
# - more: less than 1 in ten million
#
# Lock contention probability for two threads accessing distinct elements is
# roughly 1 / (8 * #elements) under random hashes.
#
# The table is resized when occupancy exceeds a percentage threshold
# (nominally, 0.75, but see below). Only a single thread performs the resize
# (using field +size_control+, to arrange exclusion), but the table otherwise
# remains usable for reads and updates. Resizing proceeds by transferring
# bins, one by one, from the table to the next table. Because we are using
# power-of-two expansion, the elements from each bin must either stay at same
# index, or move with a power of two offset. We eliminate unnecessary node
# creation by catching cases where old nodes can be reused because their next
# fields won't change. On average, only about one-sixth of them need cloning
# when a table doubles. The nodes they replace will be garbage collectable as
# soon as they are no longer referenced by any reader thread that may be in
# the midst of concurrently traversing table. Upon transfer, the old table bin
# contains only a special forwarding node (with hash field +MOVED+) that
# contains the next table as its key. On encountering a forwarding node,
# access and update operations restart, using the new table.
#
# Each bin transfer requires its bin lock. However, unlike other cases, a
# transfer can skip a bin if it fails to acquire its lock, and revisit it
# later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that
# have been skipped because of failure to acquire a lock, and blocks only if
# none are available (i.e., only very rarely). The transfer operation must
# also ensure that all accessible bins in both the old and new table are
# usable by any traversal. When there are no lock acquisition failures, this
# is arranged simply by proceeding from the last bin (+table.size - 1+) up
# towards the first. Upon seeing a forwarding node, traversals arrange to move
# to the new table without revisiting nodes. However, when any node is skipped
# during a transfer, all earlier table bins may have become visible, so are
# initialized with a reverse-forwarding node back to the old table until the
# new ones are established. (This sometimes requires transiently locking a
# forwarding node, which is possible under the above encoding.) These more
# expensive mechanics trigger only when necessary.
#
# The traversal scheme also applies to partial traversals of
# ranges of bins (via an alternate Traverser constructor)
# to support partitioned aggregate operations. Also, read-only
# operations give up if ever forwarded to a null table, which
# provides support for shutdown-style clearing, which is also not
# currently implemented.
#
# Lazy table initialization minimizes footprint until first use.
#
# The element count is maintained using a +Concurrent::ThreadSafe::Util::Adder+,
# which avoids contention on updates but can encounter cache thrashing
# if read too frequently during concurrent access. To avoid reading so
# often, resizing is attempted either when a bin lock is
# contended, or upon adding to a bin already holding two or more
# nodes (checked before adding in the +x_if_absent+ methods, after
# adding in others). Under uniform hash distributions, the
# probability of this occurring at threshold is around 13%,
# meaning that only about 1 in 8 puts check threshold (and after
# resizing, many fewer do so). But this approximation has high
# variance for small table sizes, so we check on any collision
# for sizes <= 64. The bulk putAll operation further reduces
# contention by only committing count updates upon these size
# checks.
#
# @!visibility private
class AtomicReferenceMapBackend
# @!visibility private
class Table < Concurrent::ThreadSafe::Util::PowerOfTwoTuple
def cas_new_node(i, hash, key, value)
cas(i, nil, Node.new(hash, key, value))
end
def try_to_cas_in_computed(i, hash, key)
succeeded = false
new_value = nil
new_node = Node.new(locked_hash = hash | LOCKED, key, NULL)
if cas(i, nil, new_node)
begin
if NULL == (new_value = yield(NULL))
was_null = true
else
new_node.value = new_value
end
succeeded = true
ensure
volatile_set(i, nil) if !succeeded || was_null
new_node.unlock_via_hash(locked_hash, hash)
end
end
return succeeded, new_value
end
def try_lock_via_hash(i, node, node_hash)
node.try_lock_via_hash(node_hash) do
yield if volatile_get(i) == node
end
end
def delete_node_at(i, node, predecessor_node)
if predecessor_node
predecessor_node.next = node.next
else
volatile_set(i, node.next)
end
end
end
# Key-value entry. Nodes with a hash field of +MOVED+ are special, and do
# not contain user keys or values. Otherwise, keys are never +nil+, and
# +NULL+ +value+ fields indicate that a node is in the process of being
# deleted or created. For purposes of read-only access, a key may be read
# before a value, but can only be used after checking value to be +!= NULL+.
#
# @!visibility private
class Node
extend Concurrent::ThreadSafe::Util::Volatile
attr_volatile :hash, :value, :next
include Concurrent::ThreadSafe::Util::CheapLockable
bit_shift = Concurrent::ThreadSafe::Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves
# Encodings for special uses of Node hash fields. See above for explanation.
MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes
LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit
WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together
HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash
SPIN_LOCK_ATTEMPTS = Concurrent::ThreadSafe::Util::CPU_COUNT > 1 ? Concurrent::ThreadSafe::Util::CPU_COUNT * 2 : 0
attr_reader :key
def initialize(hash, key, value, next_node = nil)
super()
@key = key
self.lazy_set_hash(hash)
self.lazy_set_value(value)
self.next = next_node
end
# Spins a while if +LOCKED+ bit set and this node is the first of its bin,
# and then sets +WAITING+ bits on hash field and blocks (once) if they are
# still set. It is OK for this method to return even if lock is not
# available upon exit, which enables these simple single-wait mechanics.
#
# The corresponding signalling operation is performed within callers: Upon
# detecting that +WAITING+ has been set when unlocking lock (via a failed
# CAS from non-waiting +LOCKED+ state), unlockers acquire the
# +cheap_synchronize+ lock and perform a +cheap_broadcast+.
def try_await_lock(table, i)
if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking?
spins = SPIN_LOCK_ATTEMPTS
randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.get
while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash)
if spins >= 0
if (randomizer = (randomizer >> 1)).even? # spin at random
if (spins -= 1) == 0
Thread.pass # yield before blocking
else
randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero?
end
end
elsif cas_hash(my_hash, my_hash | WAITING)
force_acquire_lock(table, i)
break
end
end
end
end
def key?(key)
@key.eql?(key)
end
def matches?(key, hash)
pure_hash == hash && key?(key)
end
def pure_hash
hash & HASH_BITS
end
def try_lock_via_hash(node_hash = hash)
if cas_hash(node_hash, locked_hash = node_hash | LOCKED)
begin
yield
ensure
unlock_via_hash(locked_hash, node_hash)
end
end
end
def locked?
self.class.locked_hash?(hash)
end
def unlock_via_hash(locked_hash, node_hash)
unless cas_hash(locked_hash, node_hash)
self.hash = node_hash
cheap_synchronize { cheap_broadcast }
end
end
private
def force_acquire_lock(table, i)
cheap_synchronize do
if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING
cheap_wait
else
cheap_broadcast # possibly won race vs signaller
end
end
end
class << self
def locked_hash?(hash)
(hash & LOCKED) != 0
end
end
end
# shorthands
MOVED = Node::MOVED
LOCKED = Node::LOCKED
WAITING = Node::WAITING
HASH_BITS = Node::HASH_BITS
NOW_RESIZING = -1
DEFAULT_CAPACITY = 16
MAX_CAPACITY = Concurrent::ThreadSafe::Util::MAX_INT
# The buffer size for skipped bins during transfers. The
# value is arbitrary but should be large enough to avoid
# most locking stalls during resizes.
TRANSFER_BUFFER_SIZE = 32
extend Concurrent::ThreadSafe::Util::Volatile
attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two.
# Table initialization and resizing control. When negative, the
# table is being initialized or resized. Otherwise, when table is
# null, holds the initial table size to use upon creation, or 0
# for default. After initialization, holds the next element count
# value upon which to resize the table.
:size_control
def initialize(options = nil)
super()
@counter = Concurrent::ThreadSafe::Util::Adder.new
initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY
self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity
end
def get_or_default(key, else_value = nil)
hash = key_hash(key)
current_table = table
while current_table
node = current_table.volatile_get_by_hash(hash)
current_table =
while node
if (node_hash = node.hash) == MOVED
break node.key
elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value)
return value
end
node = node.next
end
end
else_value
end
def [](key)
get_or_default(key)
end
def key?(key)
get_or_default(key, NULL) != NULL
end
def []=(key, value)
get_and_set(key, value)
value
end
def compute_if_absent(key)
hash = key_hash(key)
current_table = table || initialize_table
while true
if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield }
if succeeded
increment_size
return new_value
end
elsif (node_hash = node.hash) == MOVED
current_table = node.key
elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS))
return current_value
elsif Node.locked_hash?(node_hash)
try_await_lock(current_table, i, node)
else
succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield }
return value if succeeded
end
end
end
def compute_if_present(key)
new_value = nil
internal_replace(key) do |old_value|
if (new_value = yield(NULL == old_value ? nil : old_value)).nil?
NULL
else
new_value
end
end
new_value
end
def compute(key)
internal_compute(key) do |old_value|
if (new_value = yield(NULL == old_value ? nil : old_value)).nil?
NULL
else
new_value
end
end
end
def merge_pair(key, value)
internal_compute(key) do |old_value|
if NULL == old_value || !(value = yield(old_value)).nil?
value
else
NULL
end
end
end
def replace_pair(key, old_value, new_value)
NULL != internal_replace(key, old_value) { new_value }
end
def replace_if_exists(key, new_value)
if (result = internal_replace(key) { new_value }) && NULL != result
result
end
end
def get_and_set(key, value) # internalPut in the original CHMV8
hash = key_hash(key)
current_table = table || initialize_table
while true
if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
if current_table.cas_new_node(i, hash, key, value)
increment_size
break
end
elsif (node_hash = node.hash) == MOVED
current_table = node.key
elsif Node.locked_hash?(node_hash)
try_await_lock(current_table, i, node)
else
succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash)
break old_value if succeeded
end
end
end
def delete(key)
replace_if_exists(key, NULL)
end
def delete_pair(key, value)
result = internal_replace(key, value) { NULL }
if result && NULL != result
!!result
else
false
end
end
def each_pair
return self unless current_table = table
current_table_size = base_size = current_table.size
i = base_index = 0
while base_index < base_size
if node = current_table.volatile_get(i)
if node.hash == MOVED
current_table = node.key
current_table_size = current_table.size
else
begin
if NULL != (value = node.value) # skip deleted or special nodes
yield node.key, value
end
end while node = node.next
end
end
if (i_with_base = i + base_size) < current_table_size
i = i_with_base # visit upper slots if present
else
i = base_index += 1
end
end
self
end
def size
(sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values
end
def empty?
size == 0
end
# Implementation for clear. Steps through each bin, removing all nodes.
def clear
return self unless current_table = table
current_table_size = current_table.size
deleted_count = i = 0
while i < current_table_size
if !(node = current_table.volatile_get(i))
i += 1
elsif (node_hash = node.hash) == MOVED
current_table = node.key
current_table_size = current_table.size
elsif Node.locked_hash?(node_hash)
decrement_size(deleted_count) # opportunistically update count
deleted_count = 0
node.try_await_lock(current_table, i)
else
current_table.try_lock_via_hash(i, node, node_hash) do
begin
deleted_count += 1 if NULL != node.value # recheck under lock
node.value = nil
end while node = node.next
current_table.volatile_set(i, nil)
i += 1
end
end
end
decrement_size(deleted_count)
self
end
private
# Internal versions of the insertion methods, each a
# little more complicated than the last. All have
# the same basic structure:
# 1. If table uninitialized, create
# 2. If bin empty, try to CAS new node
# 3. If bin stale, use new table
# 4. Lock and validate; if valid, scan and add or update
#
# The others interweave other checks and/or alternative actions:
# * Plain +get_and_set+ checks for and performs resize after insertion.
# * compute_if_absent prescans for mapping without lock (and fails to add
# if present), which also makes pre-emptive resize checks worthwhile.
#
# Someday when details settle down a bit more, it might be worth
# some factoring to reduce sprawl.
def internal_replace(key, expected_old_value = NULL, &block)
hash = key_hash(key)
current_table = table
while current_table
if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
break
elsif (node_hash = node.hash) == MOVED
current_table = node.key
elsif (node_hash & HASH_BITS) != hash && !node.next # precheck
break # rules out possible existence
elsif Node.locked_hash?(node_hash)
try_await_lock(current_table, i, node)
else
succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block)
return old_value if succeeded
end
end
NULL
end
def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash)
current_table.try_lock_via_hash(i, node, node_hash) do
predecessor_node = nil
old_value = NULL
begin
if node.matches?(key, hash) && NULL != (current_value = node.value)
if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value
old_value = current_value
if NULL == (node.value = yield(old_value))
current_table.delete_node_at(i, node, predecessor_node)
decrement_size
end
end
break
end
predecessor_node = node
end while node = node.next
return true, old_value
end
end
def find_value_in_node_list(node, key, hash, pure_hash)
do_check_for_resize = false
while true
if pure_hash == hash && node.key?(key) && NULL != (value = node.value)
return value
elsif node = node.next
do_check_for_resize = true # at least 2 nodes -> check for resize
pure_hash = node.pure_hash
else
return NULL
end
end
ensure
check_for_resize if do_check_for_resize
end
def internal_compute(key, &block)
hash = key_hash(key)
current_table = table || initialize_table
while true
if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block)
if succeeded
if NULL == new_value
break nil
else
increment_size
break new_value
end
end
elsif (node_hash = node.hash) == MOVED
current_table = node.key
elsif Node.locked_hash?(node_hash)
try_await_lock(current_table, i, node)
else
succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block)
break new_value if succeeded
end
end
end
def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash)
added = false
current_table.try_lock_via_hash(i, node, node_hash) do
while true
if node.matches?(key, hash) && NULL != (value = node.value)
return true, value
end
last = node
unless node = node.next
last.next = Node.new(hash, key, value = yield)
added = true
increment_size
return true, value
end
end
end
ensure
check_for_resize if added
end
def attempt_compute(key, hash, current_table, i, node, node_hash)
added = false
current_table.try_lock_via_hash(i, node, node_hash) do
predecessor_node = nil
while true
if node.matches?(key, hash) && NULL != (value = node.value)
if NULL == (node.value = value = yield(value))
current_table.delete_node_at(i, node, predecessor_node)
decrement_size
value = nil
end
return true, value
end
predecessor_node = node
unless node = node.next
if NULL == (value = yield(NULL))
value = nil
else
predecessor_node.next = Node.new(hash, key, value)
added = true
increment_size
end
return true, value
end
end
end
ensure
check_for_resize if added
end
def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash)
node_nesting = nil
current_table.try_lock_via_hash(i, node, node_hash) do
node_nesting = 1
old_value = nil
found_old_value = false
while node
if node.matches?(key, hash) && NULL != (old_value = node.value)
found_old_value = true
node.value = value
break
end
last = node
unless node = node.next
last.next = Node.new(hash, key, value)
break
end
node_nesting += 1
end
return true, old_value if found_old_value
increment_size
true
end
ensure
check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64)
end
def initialize_copy(other)
super
@counter = Concurrent::ThreadSafe::Util::Adder.new
self.table = nil
self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY
self
end
def try_await_lock(current_table, i, node)
check_for_resize # try resizing if can't get lock
node.try_await_lock(current_table, i)
end
def key_hash(key)
key.hash & HASH_BITS
end
# Returns a power of two table size for the given desired capacity.
def table_size_for(entry_count)
size = 2
size <<= 1 while size < entry_count
size
end
# Initializes table, using the size recorded in +size_control+.
def initialize_table
until current_table ||= table
if (size_ctrl = size_control) == NOW_RESIZING
Thread.pass # lost initialization race; just spin
else
try_in_resize_lock(current_table, size_ctrl) do
initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY
current_table = self.table = Table.new(initial_size)
initial_size - (initial_size >> 2) # 75% load factor
end
end
end
current_table
end
# If table is too small and not already resizing, creates next table and
# transfers bins. Rechecks occupancy after a transfer to see if another
# resize is already needed because resizings are lagging additions.
def check_for_resize
while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum
try_in_resize_lock(current_table, size_ctrl) do
self.table = rebuild(current_table)
(table_size << 1) - (table_size >> 1) # 75% load factor
end
end
end
def try_in_resize_lock(current_table, size_ctrl)
if cas_size_control(size_ctrl, NOW_RESIZING)
begin
if current_table == table # recheck under lock
size_ctrl = yield # get new size_control
end
ensure
self.size_control = size_ctrl
end
end
end
# Moves and/or copies the nodes in each bin to new table. See above for explanation.
def rebuild(table)
old_table_size = table.size
new_table = table.next_in_size_table
# puts "#{old_table_size} -> #{new_table.size}"
forwarder = Node.new(MOVED, new_table, NULL)
rev_forwarder = nil
locked_indexes = nil # holds bins to revisit; nil until needed
locked_arr_idx = 0
bin = old_table_size - 1
i = bin
while true
if !(node = table.volatile_get(i))
# no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table
redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder))
elsif Node.locked_hash?(node_hash = node.hash)
locked_indexes ||= ::Array.new
if bin < 0 && locked_arr_idx > 0
locked_arr_idx -= 1
i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin
redo
end
if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE
node.try_await_lock(table, i) # no other options -- block
redo
end
rev_forwarder ||= Node.new(MOVED, table, NULL)
redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list
locked_indexes << i
new_table.volatile_set(i, rev_forwarder)
new_table.volatile_set(i + old_table_size, rev_forwarder)
else
redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder)
end
if bin > 0
i = (bin -= 1)
elsif locked_indexes && !locked_indexes.empty?
bin = -1
i = locked_indexes.pop
locked_arr_idx = locked_indexes.size - 1
else
return new_table
end
end
end
def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder)
# transiently use a locked forwarding node
locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL)
if old_table.cas(i, nil, locked_forwarder)
new_table.volatile_set(i, nil) # kill the potential reverse forwarders
new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders
old_table.volatile_set(i, forwarder)
locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED)
true
end
end
# Splits a normal bin with list headed by e into lo and hi parts; installs in given table.
def split_old_bin(table, new_table, i, node, node_hash, forwarder)
table.try_lock_via_hash(i, node, node_hash) do
split_bin(new_table, i, node, node_hash)
table.volatile_set(i, forwarder)
end
end
def split_bin(new_table, i, node, node_hash)
bit = new_table.size >> 1 # bit to split on
run_bit = node_hash & bit
last_run = nil
low = nil
high = nil
current_node = node
# this optimises for the lowest amount of volatile writes and objects created
while current_node = current_node.next
unless (b = current_node.hash & bit) == run_bit
run_bit = b
last_run = current_node
end
end
if run_bit == 0
low = last_run
else
high = last_run
end
current_node = node
until current_node == last_run
pure_hash = current_node.pure_hash
if (pure_hash & bit) == 0
low = Node.new(pure_hash, current_node.key, current_node.value, low)
else
high = Node.new(pure_hash, current_node.key, current_node.value, high)
end
current_node = current_node.next
end
new_table.volatile_set(i, low)
new_table.volatile_set(i + bit, high)
end
def increment_size
@counter.increment
end
def decrement_size(by = 1)
@counter.add(-by)
end
end
end
end

View File

@ -10,7 +10,7 @@ module Concurrent
# WARNING: all public methods of the class must operate on the @backend
# directly without calling each other. This is important because of the
# SynchronizedMapBackend which uses a non-reentrant mutex for perfomance
# SynchronizedMapBackend which uses a non-reentrant mutex for performance
# reasons.
def initialize(options = nil)
@backend = {}
@ -95,7 +95,6 @@ module Concurrent
end
def each_pair
return enum_for :each_pair unless block_given?
dupped_backend.each_pair do |k, v|
yield k, v
end

View File

@ -0,0 +1,82 @@
require 'concurrent/collection/map/non_concurrent_map_backend'
module Concurrent
# @!visibility private
module Collection
# @!visibility private
class SynchronizedMapBackend < NonConcurrentMapBackend
require 'mutex_m'
include Mutex_m
# WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are
# not allowed to call each other.
def [](key)
synchronize { super }
end
def []=(key, value)
synchronize { super }
end
def compute_if_absent(key)
synchronize { super }
end
def compute_if_present(key)
synchronize { super }
end
def compute(key)
synchronize { super }
end
def merge_pair(key, value)
synchronize { super }
end
def replace_pair(key, old_value, new_value)
synchronize { super }
end
def replace_if_exists(key, new_value)
synchronize { super }
end
def get_and_set(key, value)
synchronize { super }
end
def key?(key)
synchronize { super }
end
def delete(key)
synchronize { super }
end
def delete_pair(key, value)
synchronize { super }
end
def clear
synchronize { super }
end
def size
synchronize { super }
end
def get_or_default(key, default_value)
synchronize { super }
end
private
def dupped_backend
synchronize { super }
end
end
end
end

View File

@ -0,0 +1,143 @@
require 'concurrent/collection/java_non_concurrent_priority_queue'
require 'concurrent/collection/ruby_non_concurrent_priority_queue'
require 'concurrent/utility/engine'
module Concurrent
module Collection
# @!visibility private
# @!macro internal_implementation_note
NonConcurrentPriorityQueueImplementation = case
when Concurrent.on_jruby?
JavaNonConcurrentPriorityQueue
else
RubyNonConcurrentPriorityQueue
end
private_constant :NonConcurrentPriorityQueueImplementation
# @!macro priority_queue
#
# A queue collection in which the elements are sorted based on their
# comparison (spaceship) operator `<=>`. Items are added to the queue
# at a position relative to their priority. On removal the element
# with the "highest" priority is removed. By default the sort order is
# from highest to lowest, but a lowest-to-highest sort order can be
# set on construction.
#
# The API is based on the `Queue` class from the Ruby standard library.
#
# The pure Ruby implementation, `RubyNonConcurrentPriorityQueue` uses a heap algorithm
# stored in an array. The algorithm is based on the work of Robert Sedgewick
# and Kevin Wayne.
#
# The JRuby native implementation is a thin wrapper around the standard
# library `java.util.NonConcurrentPriorityQueue`.
#
# When running under JRuby the class `NonConcurrentPriorityQueue` extends `JavaNonConcurrentPriorityQueue`.
# When running under all other interpreters it extends `RubyNonConcurrentPriorityQueue`.
#
# @note This implementation is *not* thread safe.
#
# @see http://en.wikipedia.org/wiki/Priority_queue
# @see http://ruby-doc.org/stdlib-2.0.0/libdoc/thread/rdoc/Queue.html
#
# @see http://algs4.cs.princeton.edu/24pq/index.php#2.6
# @see http://algs4.cs.princeton.edu/24pq/MaxPQ.java.html
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/PriorityQueue.html
#
# @!visibility private
class NonConcurrentPriorityQueue < NonConcurrentPriorityQueueImplementation
alias_method :has_priority?, :include?
alias_method :size, :length
alias_method :deq, :pop
alias_method :shift, :pop
alias_method :<<, :push
alias_method :enq, :push
# @!method initialize(opts = {})
# @!macro priority_queue_method_initialize
#
# Create a new priority queue with no items.
#
# @param [Hash] opts the options for creating the queue
# @option opts [Symbol] :order (:max) dictates the order in which items are
# stored: from highest to lowest when `:max` or `:high`; from lowest to
# highest when `:min` or `:low`
# @!method clear
# @!macro priority_queue_method_clear
#
# Removes all of the elements from this priority queue.
# @!method delete(item)
# @!macro priority_queue_method_delete
#
# Deletes all items from `self` that are equal to `item`.
#
# @param [Object] item the item to be removed from the queue
# @return [Object] true if the item is found else false
# @!method empty?
# @!macro priority_queue_method_empty
#
# Returns `true` if `self` contains no elements.
#
# @return [Boolean] true if there are no items in the queue else false
# @!method include?(item)
# @!macro priority_queue_method_include
#
# Returns `true` if the given item is present in `self` (that is, if any
# element == `item`), otherwise returns false.
#
# @param [Object] item the item to search for
#
# @return [Boolean] true if the item is found else false
# @!method length
# @!macro priority_queue_method_length
#
# The current length of the queue.
#
# @return [Fixnum] the number of items in the queue
# @!method peek
# @!macro priority_queue_method_peek
#
# Retrieves, but does not remove, the head of this queue, or returns `nil`
# if this queue is empty.
#
# @return [Object] the head of the queue or `nil` when empty
# @!method pop
# @!macro priority_queue_method_pop
#
# Retrieves and removes the head of this queue, or returns `nil` if this
# queue is empty.
#
# @return [Object] the head of the queue or `nil` when empty
# @!method push(item)
# @!macro priority_queue_method_push
#
# Inserts the specified element into this priority queue.
#
# @param [Object] item the item to insert onto the queue
# @!method self.from_list(list, opts = {})
# @!macro priority_queue_method_from_list
#
# Create a new priority queue from the given list.
#
# @param [Enumerable] list the list to build the queue from
# @param [Hash] opts the options for creating the queue
#
# @return [NonConcurrentPriorityQueue] the newly created and populated queue
end
end
end

View File

@ -0,0 +1,150 @@
module Concurrent
module Collection
# @!macro priority_queue
#
# @!visibility private
# @!macro internal_implementation_note
class RubyNonConcurrentPriorityQueue
# @!macro priority_queue_method_initialize
def initialize(opts = {})
order = opts.fetch(:order, :max)
@comparator = [:min, :low].include?(order) ? -1 : 1
clear
end
# @!macro priority_queue_method_clear
def clear
@queue = [nil]
@length = 0
true
end
# @!macro priority_queue_method_delete
def delete(item)
return false if empty?
original_length = @length
k = 1
while k <= @length
if @queue[k] == item
swap(k, @length)
@length -= 1
sink(k)
@queue.pop
else
k += 1
end
end
@length != original_length
end
# @!macro priority_queue_method_empty
def empty?
size == 0
end
# @!macro priority_queue_method_include
def include?(item)
@queue.include?(item)
end
alias_method :has_priority?, :include?
# @!macro priority_queue_method_length
def length
@length
end
alias_method :size, :length
# @!macro priority_queue_method_peek
def peek
empty? ? nil : @queue[1]
end
# @!macro priority_queue_method_pop
def pop
return nil if empty?
max = @queue[1]
swap(1, @length)
@length -= 1
sink(1)
@queue.pop
max
end
alias_method :deq, :pop
alias_method :shift, :pop
# @!macro priority_queue_method_push
def push(item)
raise ArgumentError.new('cannot enqueue nil') if item.nil?
@length += 1
@queue << item
swim(@length)
true
end
alias_method :<<, :push
alias_method :enq, :push
# @!macro priority_queue_method_from_list
def self.from_list(list, opts = {})
queue = new(opts)
list.each{|item| queue << item }
queue
end
private
# Exchange the values at the given indexes within the internal array.
#
# @param [Integer] x the first index to swap
# @param [Integer] y the second index to swap
#
# @!visibility private
def swap(x, y)
temp = @queue[x]
@queue[x] = @queue[y]
@queue[y] = temp
end
# Are the items at the given indexes ordered based on the priority
# order specified at construction?
#
# @param [Integer] x the first index from which to retrieve a comparable value
# @param [Integer] y the second index from which to retrieve a comparable value
#
# @return [Boolean] true if the two elements are in the correct priority order
# else false
#
# @!visibility private
def ordered?(x, y)
(@queue[x] <=> @queue[y]) == @comparator
end
# Percolate down to maintain heap invariant.
#
# @param [Integer] k the index at which to start the percolation
#
# @!visibility private
def sink(k)
while (j = (2 * k)) <= @length do
j += 1 if j < @length && ! ordered?(j, j+1)
break if ordered?(k, j)
swap(k, j)
k = j
end
end
# Percolate up to maintain heap invariant.
#
# @param [Integer] k the index at which to start the percolation
#
# @!visibility private
def swim(k)
while k > 1 && ! ordered?(k/2, k) do
swap(k, k/2)
k = k/2
end
end
end
end
end

View File

@ -0,0 +1,34 @@
require 'concurrent/concern/logging'
module Concurrent
module Concern
# @!visibility private
# @!macro internal_implementation_note
module Deprecation
# TODO require additional parameter: a version. Display when it'll be removed based on that. Error if not removed.
include Concern::Logging
def deprecated(message, strip = 2)
caller_line = caller(strip).first if strip > 0
klass = if Module === self
self
else
self.class
end
message = if strip > 0
format("[DEPRECATED] %s\ncalled on: %s", message, caller_line)
else
format('[DEPRECATED] %s', message)
end
log WARN, klass.to_s, message
end
def deprecated_method(old_name, new_name)
deprecated "`#{old_name}` is deprecated and it'll removed in next release, use `#{new_name}` instead", 3
end
extend self
end
end
end

View File

@ -0,0 +1,73 @@
module Concurrent
module Concern
# Object references in Ruby are mutable. This can lead to serious problems when
# the `#value` of a concurrent object is a mutable reference. Which is always the
# case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type.
# Most classes in this library that expose a `#value` getter method do so using the
# `Dereferenceable` mixin module.
#
# @!macro copy_options
module Dereferenceable
# NOTE: This module is going away in 2.0. In the mean time we need it to
# play nicely with the synchronization layer. This means that the
# including class SHOULD be synchronized and it MUST implement a
# `#synchronize` method. Not doing so will lead to runtime errors.
# Return the value this object represents after applying the options specified
# by the `#set_deref_options` method.
#
# @return [Object] the current value of the object
def value
synchronize { apply_deref_options(@value) }
end
alias_method :deref, :value
protected
# Set the internal value of this object
#
# @param [Object] value the new value
def value=(value)
synchronize{ @value = value }
end
# @!macro dereferenceable_set_deref_options
# Set the options which define the operations #value performs before
# returning data to the caller (dereferencing).
#
# @note Most classes that include this module will call `#set_deref_options`
# from within the constructor, thus allowing these options to be set at
# object creation.
#
# @param [Hash] opts the options defining dereference behavior.
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
# @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing
# the internal value and returning the value returned from the proc
def set_deref_options(opts = {})
synchronize{ ns_set_deref_options(opts) }
end
# @!macro dereferenceable_set_deref_options
# @!visibility private
def ns_set_deref_options(opts)
@dup_on_deref = opts[:dup_on_deref] || opts[:dup]
@freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze]
@copy_on_deref = opts[:copy_on_deref] || opts[:copy]
@do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref)
nil
end
# @!visibility private
def apply_deref_options(value)
return nil if value.nil?
return value if @do_nothing_on_deref
value = @copy_on_deref.call(value) if @copy_on_deref
value = value.dup if @dup_on_deref
value = value.freeze if @freeze_on_deref
value
end
end
end
end

View File

@ -0,0 +1,32 @@
require 'logger'
module Concurrent
module Concern
# Include where logging is needed
#
# @!visibility private
module Logging
include Logger::Severity
# Logs through {Concurrent.global_logger}, it can be overridden by setting @logger
# @param [Integer] level one of Logger::Severity constants
# @param [String] progname e.g. a path of an Actor
# @param [String, nil] message when nil block is used to generate the message
# @yieldreturn [String] a message
def log(level, progname, message = nil, &block)
#NOTE: Cannot require 'concurrent/configuration' above due to circular references.
# Assume that the gem has been initialized if we've gotten this far.
logger = if defined?(@logger) && @logger
@logger
else
Concurrent.global_logger
end
logger.call level, progname, message, &block
rescue => error
$stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" +
"#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}"
end
end
end
end

View File

@ -0,0 +1,220 @@
require 'thread'
require 'timeout'
require 'concurrent/atomic/event'
require 'concurrent/concern/dereferenceable'
module Concurrent
module Concern
module Obligation
include Concern::Dereferenceable
# NOTE: The Dereferenceable module is going away in 2.0. In the mean time
# we need it to place nicely with the synchronization layer. This means
# that the including class SHOULD be synchronized and it MUST implement a
# `#synchronize` method. Not doing so will lead to runtime errors.
# Has the obligation been fulfilled?
#
# @return [Boolean]
def fulfilled?
state == :fulfilled
end
alias_method :realized?, :fulfilled?
# Has the obligation been rejected?
#
# @return [Boolean]
def rejected?
state == :rejected
end
# Is obligation completion still pending?
#
# @return [Boolean]
def pending?
state == :pending
end
# Is the obligation still unscheduled?
#
# @return [Boolean]
def unscheduled?
state == :unscheduled
end
# Has the obligation completed processing?
#
# @return [Boolean]
def complete?
[:fulfilled, :rejected].include? state
end
# Is the obligation still awaiting completion of processing?
#
# @return [Boolean]
def incomplete?
! complete?
end
# The current value of the obligation. Will be `nil` while the state is
# pending or the operation has been rejected.
#
# @param [Numeric] timeout the maximum time in seconds to wait.
# @return [Object] see Dereferenceable#deref
def value(timeout = nil)
wait timeout
deref
end
# Wait until obligation is complete or the timeout has been reached.
#
# @param [Numeric] timeout the maximum time in seconds to wait.
# @return [Obligation] self
def wait(timeout = nil)
event.wait(timeout) if timeout != 0 && incomplete?
self
end
# Wait until obligation is complete or the timeout is reached. Will re-raise
# any exceptions raised during processing (but will not raise an exception
# on timeout).
#
# @param [Numeric] timeout the maximum time in seconds to wait.
# @return [Obligation] self
# @raise [Exception] raises the reason when rejected
def wait!(timeout = nil)
wait(timeout).tap { raise self if rejected? }
end
alias_method :no_error!, :wait!
# The current value of the obligation. Will be `nil` while the state is
# pending or the operation has been rejected. Will re-raise any exceptions
# raised during processing (but will not raise an exception on timeout).
#
# @param [Numeric] timeout the maximum time in seconds to wait.
# @return [Object] see Dereferenceable#deref
# @raise [Exception] raises the reason when rejected
def value!(timeout = nil)
wait(timeout)
if rejected?
raise self
else
deref
end
end
# The current state of the obligation.
#
# @return [Symbol] the current state
def state
synchronize { @state }
end
# If an exception was raised during processing this will return the
# exception object. Will return `nil` when the state is pending or if
# the obligation has been successfully fulfilled.
#
# @return [Exception] the exception raised during processing or `nil`
def reason
synchronize { @reason }
end
# @example allows Obligation to be risen
# rejected_ivar = Ivar.new.fail
# raise rejected_ivar
def exception(*args)
raise 'obligation is not rejected' unless rejected?
reason.exception(*args)
end
protected
# @!visibility private
def get_arguments_from(opts = {})
[*opts.fetch(:args, [])]
end
# @!visibility private
def init_obligation
@event = Event.new
@value = @reason = nil
end
# @!visibility private
def event
@event
end
# @!visibility private
def set_state(success, value, reason)
if success
@value = value
@state = :fulfilled
else
@reason = reason
@state = :rejected
end
end
# @!visibility private
def state=(value)
synchronize { ns_set_state(value) }
end
# Atomic compare and set operation
# State is set to `next_state` only if `current state == expected_current`.
#
# @param [Symbol] next_state
# @param [Symbol] expected_current
#
# @return [Boolean] true is state is changed, false otherwise
#
# @!visibility private
def compare_and_set_state(next_state, *expected_current)
synchronize do
if expected_current.include? @state
@state = next_state
true
else
false
end
end
end
# Executes the block within mutex if current state is included in expected_states
#
# @return block value if executed, false otherwise
#
# @!visibility private
def if_state(*expected_states)
synchronize do
raise ArgumentError.new('no block given') unless block_given?
if expected_states.include? @state
yield
else
false
end
end
end
protected
# Am I in the current state?
#
# @param [Symbol] expected The state to check against
# @return [Boolean] true if in the expected state else false
#
# @!visibility private
def ns_check_state?(expected)
@state == expected
end
# @!visibility private
def ns_set_state(value)
@state = value
end
end
end
end

View File

@ -0,0 +1,110 @@
require 'concurrent/collection/copy_on_notify_observer_set'
require 'concurrent/collection/copy_on_write_observer_set'
module Concurrent
module Concern
# The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one
# of the most useful design patterns.
#
# The workflow is very simple:
# - an `observer` can register itself to a `subject` via a callback
# - many `observers` can be registered to the same `subject`
# - the `subject` notifies all registered observers when its status changes
# - an `observer` can deregister itself when is no more interested to receive
# event notifications
#
# In a single threaded environment the whole pattern is very easy: the
# `subject` can use a simple data structure to manage all its subscribed
# `observer`s and every `observer` can react directly to every event without
# caring about synchronization.
#
# In a multi threaded environment things are more complex. The `subject` must
# synchronize the access to its data structure and to do so currently we're
# using two specialized ObserverSet: {Concurrent::Concern::CopyOnWriteObserverSet}
# and {Concurrent::Concern::CopyOnNotifyObserverSet}.
#
# When implementing and `observer` there's a very important rule to remember:
# **there are no guarantees about the thread that will execute the callback**
#
# Let's take this example
# ```
# class Observer
# def initialize
# @count = 0
# end
#
# def update
# @count += 1
# end
# end
#
# obs = Observer.new
# [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) }
# # execute [obj1, obj2, obj3, obj4]
# ```
#
# `obs` is wrong because the variable `@count` can be accessed by different
# threads at the same time, so it should be synchronized (using either a Mutex
# or an AtomicFixum)
module Observable
# @!macro observable_add_observer
#
# Adds an observer to this set. If a block is passed, the observer will be
# created by this method and no other params should be passed.
#
# @param [Object] observer the observer to add
# @param [Symbol] func the function to call on the observer during notification.
# Default is :update
# @return [Object] the added observer
def add_observer(observer = nil, func = :update, &block)
observers.add_observer(observer, func, &block)
end
# As `#add_observer` but can be used for chaining.
#
# @param [Object] observer the observer to add
# @param [Symbol] func the function to call on the observer during notification.
# @return [Observable] self
def with_observer(observer = nil, func = :update, &block)
add_observer(observer, func, &block)
self
end
# @!macro observable_delete_observer
#
# Remove `observer` as an observer on this object so that it will no
# longer receive notifications.
#
# @param [Object] observer the observer to remove
# @return [Object] the deleted observer
def delete_observer(observer)
observers.delete_observer(observer)
end
# @!macro observable_delete_observers
#
# Remove all observers associated with this object.
#
# @return [Observable] self
def delete_observers
observers.delete_observers
self
end
# @!macro observable_count_observers
#
# Return the number of observers associated with this object.
#
# @return [Integer] the observers count
def count_observers
observers.count_observers
end
protected
attr_accessor :observers
end
end
end

View File

@ -0,0 +1,184 @@
require 'thread'
require 'concurrent/delay'
require 'concurrent/errors'
require 'concurrent/atomic/atomic_reference'
require 'concurrent/concern/logging'
require 'concurrent/executor/immediate_executor'
require 'concurrent/executor/cached_thread_pool'
require 'concurrent/utility/at_exit'
require 'concurrent/utility/processor_counter'
module Concurrent
extend Concern::Logging
autoload :Options, 'concurrent/options'
autoload :TimerSet, 'concurrent/executor/timer_set'
autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor'
# @return [Logger] Logger with provided level and output.
def self.create_simple_logger(level = Logger::FATAL, output = $stderr)
# TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking
lambda do |severity, progname, message = nil, &block|
return false if severity < level
message = block ? block.call : message
formatted_message = case message
when String
message
when Exception
format "%s (%s)\n%s",
message.message, message.class, (message.backtrace || []).join("\n")
else
message.inspect
end
output.print format "[%s] %5s -- %s: %s\n",
Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'),
Logger::SEV_LABEL[severity],
progname,
formatted_message
true
end
end
# Use logger created by #create_simple_logger to log concurrent-ruby messages.
def self.use_simple_logger(level = Logger::FATAL, output = $stderr)
Concurrent.global_logger = create_simple_logger level, output
end
# @return [Logger] Logger with provided level and output.
# @deprecated
def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr)
logger = Logger.new(output)
logger.level = level
logger.formatter = lambda do |severity, datetime, progname, msg|
formatted_message = case msg
when String
msg
when Exception
format "%s (%s)\n%s",
msg.message, msg.class, (msg.backtrace || []).join("\n")
else
msg.inspect
end
format "[%s] %5s -- %s: %s\n",
datetime.strftime('%Y-%m-%d %H:%M:%S.%L'),
severity,
progname,
formatted_message
end
lambda do |loglevel, progname, message = nil, &block|
logger.add loglevel, message, progname, &block
end
end
# Use logger created by #create_stdlib_logger to log concurrent-ruby messages.
# @deprecated
def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr)
Concurrent.global_logger = create_stdlib_logger level, output
end
# TODO (pitr-ch 27-Dec-2016): remove deadlocking stdlib_logger methods
# Suppresses all output when used for logging.
NULL_LOGGER = lambda { |level, progname, message = nil, &block| }
# @!visibility private
GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN))
private_constant :GLOBAL_LOGGER
def self.global_logger
GLOBAL_LOGGER.value
end
def self.global_logger=(value)
GLOBAL_LOGGER.value = value
end
# @!visibility private
GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor(auto_terminate: true) }
private_constant :GLOBAL_FAST_EXECUTOR
# @!visibility private
GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor(auto_terminate: true) }
private_constant :GLOBAL_IO_EXECUTOR
# @!visibility private
GLOBAL_TIMER_SET = Delay.new { TimerSet.new(auto_terminate: true) }
private_constant :GLOBAL_TIMER_SET
# @!visibility private
GLOBAL_IMMEDIATE_EXECUTOR = ImmediateExecutor.new
private_constant :GLOBAL_IMMEDIATE_EXECUTOR
# Disables AtExit handlers including pool auto-termination handlers.
# When disabled it will be the application programmer's responsibility
# to ensure that the handlers are shutdown properly prior to application
# exit by calling {AtExit.run} method.
#
# @note this option should be needed only because of `at_exit` ordering
# issues which may arise when running some of the testing frameworks.
# E.g. Minitest's test-suite runs itself in `at_exit` callback which
# executes after the pools are already terminated. Then auto termination
# needs to be disabled and called manually after test-suite ends.
# @note This method should *never* be called
# from within a gem. It should *only* be used from within the main
# application and even then it should be used only when necessary.
# @see AtExit
def self.disable_at_exit_handlers!
AtExit.enabled = false
end
# Global thread pool optimized for short, fast *operations*.
#
# @return [ThreadPoolExecutor] the thread pool
def self.global_fast_executor
GLOBAL_FAST_EXECUTOR.value
end
# Global thread pool optimized for long, blocking (IO) *tasks*.
#
# @return [ThreadPoolExecutor] the thread pool
def self.global_io_executor
GLOBAL_IO_EXECUTOR.value
end
def self.global_immediate_executor
GLOBAL_IMMEDIATE_EXECUTOR
end
# Global thread pool user for global *timers*.
#
# @return [Concurrent::TimerSet] the thread pool
def self.global_timer_set
GLOBAL_TIMER_SET.value
end
# General access point to global executors.
# @param [Symbol, Executor] executor_identifier symbols:
# - :fast - {Concurrent.global_fast_executor}
# - :io - {Concurrent.global_io_executor}
# - :immediate - {Concurrent.global_immediate_executor}
# @return [Executor]
def self.executor(executor_identifier)
Options.executor(executor_identifier)
end
def self.new_fast_executor(opts = {})
FixedThreadPool.new(
[2, Concurrent.processor_count].max,
auto_terminate: opts.fetch(:auto_terminate, true),
idletime: 60, # 1 minute
max_queue: 0, # unlimited
fallback_policy: :abort # shouldn't matter -- 0 max queue
)
end
def self.new_io_executor(opts = {})
CachedThreadPool.new(
auto_terminate: opts.fetch(:auto_terminate, true),
fallback_policy: :abort # shouldn't matter -- 0 max queue
)
end
end

View File

@ -3,6 +3,6 @@ module Concurrent
# Various classes within allows for +nil+ values to be stored,
# so a special +NULL+ token is required to indicate the "nil-ness".
# @!visibility private
NULL = Object.new
NULL = ::Object.new
end

View File

@ -0,0 +1,81 @@
require 'concurrent/future'
require 'concurrent/atomic/atomic_fixnum'
module Concurrent
# @!visibility private
class DependencyCounter # :nodoc:
def initialize(count, &block)
@counter = AtomicFixnum.new(count)
@block = block
end
def update(time, value, reason)
if @counter.decrement == 0
@block.call
end
end
end
# Dataflow allows you to create a task that will be scheduled when all of its data dependencies are available.
# {include:file:docs-source/dataflow.md}
#
# @param [Future] inputs zero or more `Future` operations that this dataflow depends upon
#
# @yield The operation to perform once all the dependencies are met
# @yieldparam [Future] inputs each of the `Future` inputs to the dataflow
# @yieldreturn [Object] the result of the block operation
#
# @return [Object] the result of all the operations
#
# @raise [ArgumentError] if no block is given
# @raise [ArgumentError] if any of the inputs are not `IVar`s
def dataflow(*inputs, &block)
dataflow_with(Concurrent.global_io_executor, *inputs, &block)
end
module_function :dataflow
def dataflow_with(executor, *inputs, &block)
call_dataflow(:value, executor, *inputs, &block)
end
module_function :dataflow_with
def dataflow!(*inputs, &block)
dataflow_with!(Concurrent.global_io_executor, *inputs, &block)
end
module_function :dataflow!
def dataflow_with!(executor, *inputs, &block)
call_dataflow(:value!, executor, *inputs, &block)
end
module_function :dataflow_with!
private
def call_dataflow(method, executor, *inputs, &block)
raise ArgumentError.new('an executor must be provided') if executor.nil?
raise ArgumentError.new('no block given') unless block_given?
unless inputs.all? { |input| input.is_a? IVar }
raise ArgumentError.new("Not all dependencies are IVars.\nDependencies: #{ inputs.inspect }")
end
result = Future.new(executor: executor) do
values = inputs.map { |input| input.send(method) }
block.call(*values)
end
if inputs.empty?
result.execute
else
counter = DependencyCounter.new(inputs.size) { result.execute }
inputs.each do |input|
input.add_observer counter
end
end
result
end
module_function :call_dataflow
end

View File

@ -0,0 +1,199 @@
require 'thread'
require 'concurrent/concern/obligation'
require 'concurrent/executor/immediate_executor'
require 'concurrent/synchronization'
module Concurrent
# This file has circular require issues. It must be autoloaded here.
autoload :Options, 'concurrent/options'
# Lazy evaluation of a block yielding an immutable result. Useful for
# expensive operations that may never be needed. It may be non-blocking,
# supports the `Concern::Obligation` interface, and accepts the injection of
# custom executor upon which to execute the block. Processing of
# block will be deferred until the first time `#value` is called.
# At that time the caller can choose to return immediately and let
# the block execute asynchronously, block indefinitely, or block
# with a timeout.
#
# When a `Delay` is created its state is set to `pending`. The value and
# reason are both `nil`. The first time the `#value` method is called the
# enclosed opration will be run and the calling thread will block. Other
# threads attempting to call `#value` will block as well. Once the operation
# is complete the *value* will be set to the result of the operation or the
# *reason* will be set to the raised exception, as appropriate. All threads
# blocked on `#value` will return. Subsequent calls to `#value` will immediately
# return the cached value. The operation will only be run once. This means that
# any side effects created by the operation will only happen once as well.
#
# `Delay` includes the `Concurrent::Concern::Dereferenceable` mixin to support thread
# safety of the reference returned by `#value`.
#
# @!macro copy_options
#
# @!macro delay_note_regarding_blocking
# @note The default behavior of `Delay` is to block indefinitely when
# calling either `value` or `wait`, executing the delayed operation on
# the current thread. This makes the `timeout` value completely
# irrelevant. To enable non-blocking behavior, use the `executor`
# constructor option. This will cause the delayed operation to be
# execute on the given executor, allowing the call to timeout.
#
# @see Concurrent::Concern::Dereferenceable
class Delay < Synchronization::LockableObject
include Concern::Obligation
# NOTE: Because the global thread pools are lazy-loaded with these objects
# there is a performance hit every time we post a new task to one of these
# thread pools. Subsequently it is critical that `Delay` perform as fast
# as possible post-completion. This class has been highly optimized using
# the benchmark script `examples/lazy_and_delay.rb`. Do NOT attempt to
# DRY-up this class or perform other refactoring with running the
# benchmarks and ensuring that performance is not negatively impacted.
# Create a new `Delay` in the `:pending` state.
#
# @!macro executor_and_deref_options
#
# @yield the delayed operation to perform
#
# @raise [ArgumentError] if no block is given
def initialize(opts = {}, &block)
raise ArgumentError.new('no block given') unless block_given?
super(&nil)
synchronize { ns_initialize(opts, &block) }
end
# Return the value this object represents after applying the options
# specified by the `#set_deref_options` method. If the delayed operation
# raised an exception this method will return nil. The execption object
# can be accessed via the `#reason` method.
#
# @param [Numeric] timeout the maximum number of seconds to wait
# @return [Object] the current value of the object
#
# @!macro delay_note_regarding_blocking
def value(timeout = nil)
if @executor # TODO (pitr 12-Sep-2015): broken unsafe read?
super
else
# this function has been optimized for performance and
# should not be modified without running new benchmarks
synchronize do
execute = @evaluation_started = true unless @evaluation_started
if execute
begin
set_state(true, @task.call, nil)
rescue => ex
set_state(false, nil, ex)
end
elsif incomplete?
raise IllegalOperationError, 'Recursive call to #value during evaluation of the Delay'
end
end
if @do_nothing_on_deref
@value
else
apply_deref_options(@value)
end
end
end
# Return the value this object represents after applying the options
# specified by the `#set_deref_options` method. If the delayed operation
# raised an exception, this method will raise that exception (even when)
# the operation has already been executed).
#
# @param [Numeric] timeout the maximum number of seconds to wait
# @return [Object] the current value of the object
# @raise [Exception] when `#rejected?` raises `#reason`
#
# @!macro delay_note_regarding_blocking
def value!(timeout = nil)
if @executor
super
else
result = value
raise @reason if @reason
result
end
end
# Return the value this object represents after applying the options
# specified by the `#set_deref_options` method.
#
# @param [Integer] timeout (nil) the maximum number of seconds to wait for
# the value to be computed. When `nil` the caller will block indefinitely.
#
# @return [Object] self
#
# @!macro delay_note_regarding_blocking
def wait(timeout = nil)
if @executor
execute_task_once
super(timeout)
else
value
end
self
end
# Reconfigures the block returning the value if still `#incomplete?`
#
# @yield the delayed operation to perform
# @return [true, false] if success
def reconfigure(&block)
synchronize do
raise ArgumentError.new('no block given') unless block_given?
unless @evaluation_started
@task = block
true
else
false
end
end
end
protected
def ns_initialize(opts, &block)
init_obligation
set_deref_options(opts)
@executor = opts[:executor]
@task = block
@state = :pending
@evaluation_started = false
end
private
# @!visibility private
def execute_task_once # :nodoc:
# this function has been optimized for performance and
# should not be modified without running new benchmarks
execute = task = nil
synchronize do
execute = @evaluation_started = true unless @evaluation_started
task = @task
end
if execute
executor = Options.executor_from_options(executor: @executor)
executor.post do
begin
result = task.call
success = true
rescue => ex
reason = ex
end
synchronize do
set_state(success, result, reason)
event.set
end
end
end
end
end
end

View File

@ -0,0 +1,69 @@
module Concurrent
Error = Class.new(StandardError)
# Raised when errors occur during configuration.
ConfigurationError = Class.new(Error)
# Raised when an asynchronous operation is cancelled before execution.
CancelledOperationError = Class.new(Error)
# Raised when a lifecycle method (such as `stop`) is called in an improper
# sequence or when the object is in an inappropriate state.
LifecycleError = Class.new(Error)
# Raised when an attempt is made to violate an immutability guarantee.
ImmutabilityError = Class.new(Error)
# Raised when an operation is attempted which is not legal given the
# receiver's current state
IllegalOperationError = Class.new(Error)
# Raised when an object's methods are called when it has not been
# properly initialized.
InitializationError = Class.new(Error)
# Raised when an object with a start/stop lifecycle has been started an
# excessive number of times. Often used in conjunction with a restart
# policy or strategy.
MaxRestartFrequencyError = Class.new(Error)
# Raised when an attempt is made to modify an immutable object
# (such as an `IVar`) after its final state has been set.
class MultipleAssignmentError < Error
attr_reader :inspection_data
def initialize(message = nil, inspection_data = nil)
@inspection_data = inspection_data
super message
end
def inspect
format '%s %s>', super[0..-2], @inspection_data.inspect
end
end
# Raised by an `Executor` when it is unable to process a given task,
# possibly because of a reject policy or other internal error.
RejectedExecutionError = Class.new(Error)
# Raised when any finite resource, such as a lock counter, exceeds its
# maximum limit/threshold.
ResourceLimitError = Class.new(Error)
# Raised when an operation times out.
TimeoutError = Class.new(Error)
# Aggregates multiple exceptions.
class MultipleErrors < Error
attr_reader :errors
def initialize(errors, message = "#{errors.size} errors")
@errors = errors
super [*message,
*errors.map { |e| [format('%s (%s)', e.message, e.class), *e.backtrace] }.flatten(1)
].join("\n")
end
end
end

View File

@ -0,0 +1,352 @@
require 'concurrent/constants'
require 'concurrent/errors'
require 'concurrent/maybe'
require 'concurrent/atomic/atomic_reference'
require 'concurrent/atomic/count_down_latch'
require 'concurrent/utility/engine'
require 'concurrent/utility/monotonic_time'
module Concurrent
# @!macro exchanger
#
# A synchronization point at which threads can pair and swap elements within
# pairs. Each thread presents some object on entry to the exchange method,
# matches with a partner thread, and receives its partner's object on return.
#
# @!macro thread_safe_variable_comparison
#
# This implementation is very simple, using only a single slot for each
# exchanger (unlike more advanced implementations which use an "arena").
# This approach will work perfectly fine when there are only a few threads
# accessing a single `Exchanger`. Beyond a handful of threads the performance
# will degrade rapidly due to contention on the single slot, but the algorithm
# will remain correct.
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html java.util.concurrent.Exchanger
# @example
#
# exchanger = Concurrent::Exchanger.new
#
# threads = [
# Thread.new { puts "first: " << exchanger.exchange('foo', 1) }, #=> "first: bar"
# Thread.new { puts "second: " << exchanger.exchange('bar', 1) } #=> "second: foo"
# ]
# threads.each {|t| t.join(2) }
# @!visibility private
class AbstractExchanger < Synchronization::Object
# @!visibility private
CANCEL = ::Object.new
private_constant :CANCEL
def initialize
super
end
# @!macro exchanger_method_do_exchange
#
# Waits for another thread to arrive at this exchange point (unless the
# current thread is interrupted), and then transfers the given object to
# it, receiving its object in return. The timeout value indicates the
# approximate number of seconds the method should block while waiting
# for the exchange. When the timeout value is `nil` the method will
# block indefinitely.
#
# @param [Object] value the value to exchange with another thread
# @param [Numeric, nil] timeout in seconds, `nil` blocks indefinitely
#
# @!macro exchanger_method_exchange
#
# In some edge cases when a `timeout` is given a return value of `nil` may be
# ambiguous. Specifically, if `nil` is a valid value in the exchange it will
# be impossible to tell whether `nil` is the actual return value or if it
# signifies timeout. When `nil` is a valid value in the exchange consider
# using {#exchange!} or {#try_exchange} instead.
#
# @return [Object] the value exchanged by the other thread or `nil` on timeout
def exchange(value, timeout = nil)
(value = do_exchange(value, timeout)) == CANCEL ? nil : value
end
# @!macro exchanger_method_do_exchange
# @!macro exchanger_method_exchange_bang
#
# On timeout a {Concurrent::TimeoutError} exception will be raised.
#
# @return [Object] the value exchanged by the other thread
# @raise [Concurrent::TimeoutError] on timeout
def exchange!(value, timeout = nil)
if (value = do_exchange(value, timeout)) == CANCEL
raise Concurrent::TimeoutError
else
value
end
end
# @!macro exchanger_method_do_exchange
# @!macro exchanger_method_try_exchange
#
# The return value will be a {Concurrent::Maybe} set to `Just` on success or
# `Nothing` on timeout.
#
# @return [Concurrent::Maybe] on success a `Just` maybe will be returned with
# the item exchanged by the other thread as `#value`; on timeout a
# `Nothing` maybe will be returned with {Concurrent::TimeoutError} as `#reason`
#
# @example
#
# exchanger = Concurrent::Exchanger.new
#
# result = exchanger.exchange(:foo, 0.5)
#
# if result.just?
# puts result.value #=> :bar
# else
# puts 'timeout'
# end
def try_exchange(value, timeout = nil)
if (value = do_exchange(value, timeout)) == CANCEL
Concurrent::Maybe.nothing(Concurrent::TimeoutError)
else
Concurrent::Maybe.just(value)
end
end
private
# @!macro exchanger_method_do_exchange
#
# @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout
def do_exchange(value, timeout)
raise NotImplementedError
end
end
# @!macro internal_implementation_note
# @!visibility private
class RubyExchanger < AbstractExchanger
# A simplified version of java.util.concurrent.Exchanger written by
# Doug Lea, Bill Scherer, and Michael Scott with assistance from members
# of JCP JSR-166 Expert Group and released to the public domain. It does
# not include the arena or the multi-processor spin loops.
# http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/6-b14/java/util/concurrent/Exchanger.java
safe_initialization!
class Node < Concurrent::Synchronization::Object
attr_atomic :value
safe_initialization!
def initialize(item)
super()
@Item = item
@Latch = Concurrent::CountDownLatch.new
self.value = nil
end
def latch
@Latch
end
def item
@Item
end
end
private_constant :Node
def initialize
super
end
private
attr_atomic(:slot)
# @!macro exchanger_method_do_exchange
#
# @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout
def do_exchange(value, timeout)
# ALGORITHM
#
# From the original Java version:
#
# > The basic idea is to maintain a "slot", which is a reference to
# > a Node containing both an Item to offer and a "hole" waiting to
# > get filled in. If an incoming "occupying" thread sees that the
# > slot is null, it CAS'es (compareAndSets) a Node there and waits
# > for another to invoke exchange. That second "fulfilling" thread
# > sees that the slot is non-null, and so CASes it back to null,
# > also exchanging items by CASing the hole, plus waking up the
# > occupying thread if it is blocked. In each case CAS'es may
# > fail because a slot at first appears non-null but is null upon
# > CAS, or vice-versa. So threads may need to retry these
# > actions.
#
# This version:
#
# An exchange occurs between an "occupier" thread and a "fulfiller" thread.
# The "slot" is used to setup this interaction. The first thread in the
# exchange puts itself into the slot (occupies) and waits for a fulfiller.
# The second thread removes the occupier from the slot and attempts to
# perform the exchange. Removing the occupier also frees the slot for
# another occupier/fulfiller pair.
#
# Because the occupier and the fulfiller are operating independently and
# because there may be contention with other threads, any failed operation
# indicates contention. Both the occupier and the fulfiller operate within
# spin loops. Any failed actions along the happy path will cause the thread
# to repeat the loop and try again.
#
# When a timeout value is given the thread must be cognizant of time spent
# in the spin loop. The remaining time is checked every loop. When the time
# runs out the thread will exit.
#
# A "node" is the data structure used to perform the exchange. Only the
# occupier's node is necessary. It's the node used for the exchange.
# Each node has an "item," a "hole" (self), and a "latch." The item is the
# node's initial value. It never changes. It's what the fulfiller returns on
# success. The occupier's hole is where the fulfiller put its item. It's the
# item that the occupier returns on success. The latch is used for synchronization.
# Because a thread may act as either an occupier or fulfiller (or possibly
# both in periods of high contention) every thread creates a node when
# the exchange method is first called.
#
# The following steps occur within the spin loop. If any actions fail
# the thread will loop and try again, so long as there is time remaining.
# If time runs out the thread will return CANCEL.
#
# Check the slot for an occupier:
#
# * If the slot is empty try to occupy
# * If the slot is full try to fulfill
#
# Attempt to occupy:
#
# * Attempt to CAS myself into the slot
# * Go to sleep and wait to be woken by a fulfiller
# * If the sleep is successful then the fulfiller completed its happy path
# - Return the value from my hole (the value given by the fulfiller)
# * When the sleep fails (time ran out) attempt to cancel the operation
# - Attempt to CAS myself out of the hole
# - If successful there is no contention
# - Return CANCEL
# - On failure, I am competing with a fulfiller
# - Attempt to CAS my hole to CANCEL
# - On success
# - Let the fulfiller deal with my cancel
# - Return CANCEL
# - On failure the fulfiller has completed its happy path
# - Return th value from my hole (the fulfiller's value)
#
# Attempt to fulfill:
#
# * Attempt to CAS the occupier out of the slot
# - On failure loop again
# * Attempt to CAS my item into the occupier's hole
# - On failure the occupier is trying to cancel
# - Loop again
# - On success we are on the happy path
# - Wake the sleeping occupier
# - Return the occupier's item
value = NULL if value.nil? # The sentinel allows nil to be a valid value
me = Node.new(value) # create my node in case I need to occupy
end_at = Concurrent.monotonic_time + timeout.to_f # The time to give up
result = loop do
other = slot
if other && compare_and_set_slot(other, nil)
# try to fulfill
if other.compare_and_set_value(nil, value)
# happy path
other.latch.count_down
break other.item
end
elsif other.nil? && compare_and_set_slot(nil, me)
# try to occupy
timeout = end_at - Concurrent.monotonic_time if timeout
if me.latch.wait(timeout)
# happy path
break me.value
else
# attempt to remove myself from the slot
if compare_and_set_slot(me, nil)
break CANCEL
elsif !me.compare_and_set_value(nil, CANCEL)
# I've failed to block the fulfiller
break me.value
end
end
end
break CANCEL if timeout && Concurrent.monotonic_time >= end_at
end
result == NULL ? nil : result
end
end
if Concurrent.on_jruby?
# @!macro internal_implementation_note
# @!visibility private
class JavaExchanger < AbstractExchanger
def initialize
@exchanger = java.util.concurrent.Exchanger.new
end
private
# @!macro exchanger_method_do_exchange
#
# @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout
def do_exchange(value, timeout)
result = nil
if timeout.nil?
Synchronization::JRuby.sleep_interruptibly do
result = @exchanger.exchange(value)
end
else
Synchronization::JRuby.sleep_interruptibly do
result = @exchanger.exchange(value, 1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS)
end
end
result
rescue java.util.concurrent.TimeoutException
CANCEL
end
end
end
# @!visibility private
# @!macro internal_implementation_note
ExchangerImplementation = case
when Concurrent.on_jruby?
JavaExchanger
else
RubyExchanger
end
private_constant :ExchangerImplementation
# @!macro exchanger
class Exchanger < ExchangerImplementation
# @!method initialize
# Creates exchanger instance
# @!method exchange(value, timeout = nil)
# @!macro exchanger_method_do_exchange
# @!macro exchanger_method_exchange
# @!method exchange!(value, timeout = nil)
# @!macro exchanger_method_do_exchange
# @!macro exchanger_method_exchange_bang
# @!method try_exchange(value, timeout = nil)
# @!macro exchanger_method_do_exchange
# @!macro exchanger_method_try_exchange
end
end

View File

@ -0,0 +1,134 @@
require 'concurrent/errors'
require 'concurrent/executor/executor_service'
require 'concurrent/synchronization'
require 'concurrent/utility/at_exit'
module Concurrent
# @!macro abstract_executor_service_public_api
# @!visibility private
class AbstractExecutorService < Synchronization::LockableObject
include ExecutorService
# The set of possible fallback policies that may be set at thread pool creation.
FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze
# @!macro executor_service_attr_reader_fallback_policy
attr_reader :fallback_policy
# Create a new thread pool.
def initialize(*args, &block)
super(&nil)
synchronize { ns_initialize(*args, &block) }
end
# @!macro executor_service_method_shutdown
def shutdown
raise NotImplementedError
end
# @!macro executor_service_method_kill
def kill
raise NotImplementedError
end
# @!macro executor_service_method_wait_for_termination
def wait_for_termination(timeout = nil)
raise NotImplementedError
end
# @!macro executor_service_method_running_question
def running?
synchronize { ns_running? }
end
# @!macro executor_service_method_shuttingdown_question
def shuttingdown?
synchronize { ns_shuttingdown? }
end
# @!macro executor_service_method_shutdown_question
def shutdown?
synchronize { ns_shutdown? }
end
# @!macro executor_service_method_auto_terminate_question
def auto_terminate?
synchronize { ns_auto_terminate? }
end
# @!macro executor_service_method_auto_terminate_setter
def auto_terminate=(value)
synchronize { self.ns_auto_terminate = value }
end
private
# Handler which executes the `fallback_policy` once the queue size
# reaches `max_queue`.
#
# @param [Array] args the arguments to the task which is being handled.
#
# @!visibility private
def handle_fallback(*args)
case fallback_policy
when :abort
raise RejectedExecutionError
when :discard
false
when :caller_runs
begin
yield(*args)
rescue => ex
# let it fail
log DEBUG, ex
end
true
else
fail "Unknown fallback policy #{fallback_policy}"
end
end
def ns_execute(*args, &task)
raise NotImplementedError
end
# @!macro executor_service_method_ns_shutdown_execution
#
# Callback method called when an orderly shutdown has completed.
# The default behavior is to signal all waiting threads.
def ns_shutdown_execution
# do nothing
end
# @!macro executor_service_method_ns_kill_execution
#
# Callback method called when the executor has been killed.
# The default behavior is to do nothing.
def ns_kill_execution
# do nothing
end
def ns_auto_terminate?
!!@auto_terminate
end
def ns_auto_terminate=(value)
case value
when true
AtExit.add(self) { terminate_at_exit }
@auto_terminate = true
when false
AtExit.delete(self)
@auto_terminate = false
else
raise ArgumentError
end
end
def terminate_at_exit
kill # TODO be gentle first
wait_for_termination(10)
end
end
end

View File

@ -0,0 +1,62 @@
require 'concurrent/utility/engine'
require 'concurrent/executor/thread_pool_executor'
module Concurrent
# A thread pool that dynamically grows and shrinks to fit the current workload.
# New threads are created as needed, existing threads are reused, and threads
# that remain idle for too long are killed and removed from the pool. These
# pools are particularly suited to applications that perform a high volume of
# short-lived tasks.
#
# On creation a `CachedThreadPool` has zero running threads. New threads are
# created on the pool as new operations are `#post`. The size of the pool
# will grow until `#max_length` threads are in the pool or until the number
# of threads exceeds the number of running and pending operations. When a new
# operation is post to the pool the first available idle thread will be tasked
# with the new operation.
#
# Should a thread crash for any reason the thread will immediately be removed
# from the pool. Similarly, threads which remain idle for an extended period
# of time will be killed and reclaimed. Thus these thread pools are very
# efficient at reclaiming unused resources.
#
# The API and behavior of this class are based on Java's `CachedThreadPool`
#
# @!macro thread_pool_options
class CachedThreadPool < ThreadPoolExecutor
# @!macro cached_thread_pool_method_initialize
#
# Create a new thread pool.
#
# @param [Hash] opts the options defining pool behavior.
# @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy
#
# @raise [ArgumentError] if `fallback_policy` is not a known policy
#
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool--
def initialize(opts = {})
defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT }
overrides = { min_threads: 0,
max_threads: DEFAULT_MAX_POOL_SIZE,
max_queue: DEFAULT_MAX_QUEUE_SIZE }
super(defaults.merge(opts).merge(overrides))
end
private
# @!macro cached_thread_pool_method_initialize
# @!visibility private
def ns_initialize(opts)
super(opts)
if Concurrent.on_jruby?
@max_queue = 0
@executor = java.util.concurrent.Executors.newCachedThreadPool
@executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new)
@executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS)
self.auto_terminate = opts.fetch(:auto_terminate, true)
end
end
end
end

View File

@ -0,0 +1,185 @@
require 'concurrent/concern/logging'
module Concurrent
###################################################################
# @!macro executor_service_method_post
#
# Submit a task to the executor for asynchronous processing.
#
# @param [Array] args zero or more arguments to be passed to the task
#
# @yield the asynchronous task to perform
#
# @return [Boolean] `true` if the task is queued, `false` if the executor
# is not running
#
# @raise [ArgumentError] if no task is given
# @!macro executor_service_method_left_shift
#
# Submit a task to the executor for asynchronous processing.
#
# @param [Proc] task the asynchronous task to perform
#
# @return [self] returns itself
# @!macro executor_service_method_can_overflow_question
#
# Does the task queue have a maximum size?
#
# @return [Boolean] True if the task queue has a maximum size else false.
# @!macro executor_service_method_serialized_question
#
# Does this executor guarantee serialization of its operations?
#
# @return [Boolean] True if the executor guarantees that all operations
# will be post in the order they are received and no two operations may
# occur simultaneously. Else false.
###################################################################
# @!macro executor_service_public_api
#
# @!method post(*args, &task)
# @!macro executor_service_method_post
#
# @!method <<(task)
# @!macro executor_service_method_left_shift
#
# @!method can_overflow?
# @!macro executor_service_method_can_overflow_question
#
# @!method serialized?
# @!macro executor_service_method_serialized_question
###################################################################
# @!macro executor_service_attr_reader_fallback_policy
# @return [Symbol] The fallback policy in effect. Either `:abort`, `:discard`, or `:caller_runs`.
# @!macro executor_service_method_shutdown
#
# Begin an orderly shutdown. Tasks already in the queue will be executed,
# but no new tasks will be accepted. Has no additional effect if the
# thread pool is not running.
# @!macro executor_service_method_kill
#
# Begin an immediate shutdown. In-progress tasks will be allowed to
# complete but enqueued tasks will be dismissed and no new tasks
# will be accepted. Has no additional effect if the thread pool is
# not running.
# @!macro executor_service_method_wait_for_termination
#
# Block until executor shutdown is complete or until `timeout` seconds have
# passed.
#
# @note Does not initiate shutdown or termination. Either `shutdown` or `kill`
# must be called before this method (or on another thread).
#
# @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete
#
# @return [Boolean] `true` if shutdown complete or false on `timeout`
# @!macro executor_service_method_running_question
#
# Is the executor running?
#
# @return [Boolean] `true` when running, `false` when shutting down or shutdown
# @!macro executor_service_method_shuttingdown_question
#
# Is the executor shuttingdown?
#
# @return [Boolean] `true` when not running and not shutdown, else `false`
# @!macro executor_service_method_shutdown_question
#
# Is the executor shutdown?
#
# @return [Boolean] `true` when shutdown, `false` when shutting down or running
# @!macro executor_service_method_auto_terminate_question
#
# Is the executor auto-terminate when the application exits?
#
# @return [Boolean] `true` when auto-termination is enabled else `false`.
# @!macro executor_service_method_auto_terminate_setter
#
# Set the auto-terminate behavior for this executor.
#
# @param [Boolean] value The new auto-terminate value to set for this executor.
#
# @return [Boolean] `true` when auto-termination is enabled else `false`.
###################################################################
# @!macro abstract_executor_service_public_api
#
# @!macro executor_service_public_api
#
# @!attribute [r] fallback_policy
# @!macro executor_service_attr_reader_fallback_policy
#
# @!method shutdown
# @!macro executor_service_method_shutdown
#
# @!method kill
# @!macro executor_service_method_kill
#
# @!method wait_for_termination(timeout = nil)
# @!macro executor_service_method_wait_for_termination
#
# @!method running?
# @!macro executor_service_method_running_question
#
# @!method shuttingdown?
# @!macro executor_service_method_shuttingdown_question
#
# @!method shutdown?
# @!macro executor_service_method_shutdown_question
#
# @!method auto_terminate?
# @!macro executor_service_method_auto_terminate_question
#
# @!method auto_terminate=(value)
# @!macro executor_service_method_auto_terminate_setter
###################################################################
# @!macro executor_service_public_api
# @!visibility private
module ExecutorService
include Concern::Logging
# @!macro executor_service_method_post
def post(*args, &task)
raise NotImplementedError
end
# @!macro executor_service_method_left_shift
def <<(task)
post(&task)
self
end
# @!macro executor_service_method_can_overflow_question
#
# @note Always returns `false`
def can_overflow?
false
end
# @!macro executor_service_method_serialized_question
#
# @note Always returns `false`
def serialized?
false
end
end
end

View File

@ -0,0 +1,206 @@
require 'concurrent/utility/engine'
require 'concurrent/executor/thread_pool_executor'
module Concurrent
# @!macro thread_pool_executor_constant_default_max_pool_size
# Default maximum number of threads that will be created in the pool.
# @!macro thread_pool_executor_constant_default_min_pool_size
# Default minimum number of threads that will be retained in the pool.
# @!macro thread_pool_executor_constant_default_max_queue_size
# Default maximum number of tasks that may be added to the task queue.
# @!macro thread_pool_executor_constant_default_thread_timeout
# Default maximum number of seconds a thread in the pool may remain idle
# before being reclaimed.
# @!macro thread_pool_executor_attr_reader_max_length
# The maximum number of threads that may be created in the pool.
# @return [Integer] The maximum number of threads that may be created in the pool.
# @!macro thread_pool_executor_attr_reader_min_length
# The minimum number of threads that may be retained in the pool.
# @return [Integer] The minimum number of threads that may be retained in the pool.
# @!macro thread_pool_executor_attr_reader_largest_length
# The largest number of threads that have been created in the pool since construction.
# @return [Integer] The largest number of threads that have been created in the pool since construction.
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
# The number of tasks that have been scheduled for execution on the pool since construction.
# @return [Integer] The number of tasks that have been scheduled for execution on the pool since construction.
# @!macro thread_pool_executor_attr_reader_completed_task_count
# The number of tasks that have been completed by the pool since construction.
# @return [Integer] The number of tasks that have been completed by the pool since construction.
# @!macro thread_pool_executor_attr_reader_idletime
# The number of seconds that a thread may be idle before being reclaimed.
# @return [Integer] The number of seconds that a thread may be idle before being reclaimed.
# @!macro thread_pool_executor_attr_reader_max_queue
# The maximum number of tasks that may be waiting in the work queue at any one time.
# When the queue size reaches `max_queue` subsequent tasks will be rejected in
# accordance with the configured `fallback_policy`.
#
# @return [Integer] The maximum number of tasks that may be waiting in the work queue at any one time.
# When the queue size reaches `max_queue` subsequent tasks will be rejected in
# accordance with the configured `fallback_policy`.
# @!macro thread_pool_executor_attr_reader_length
# The number of threads currently in the pool.
# @return [Integer] The number of threads currently in the pool.
# @!macro thread_pool_executor_attr_reader_queue_length
# The number of tasks in the queue awaiting execution.
# @return [Integer] The number of tasks in the queue awaiting execution.
# @!macro thread_pool_executor_attr_reader_remaining_capacity
# Number of tasks that may be enqueued before reaching `max_queue` and rejecting
# new tasks. A value of -1 indicates that the queue may grow without bound.
#
# @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting
# new tasks. A value of -1 indicates that the queue may grow without bound.
# @!macro thread_pool_executor_public_api
#
# @!macro abstract_executor_service_public_api
#
# @!attribute [r] max_length
# @!macro thread_pool_executor_attr_reader_max_length
#
# @!attribute [r] min_length
# @!macro thread_pool_executor_attr_reader_min_length
#
# @!attribute [r] largest_length
# @!macro thread_pool_executor_attr_reader_largest_length
#
# @!attribute [r] scheduled_task_count
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
#
# @!attribute [r] completed_task_count
# @!macro thread_pool_executor_attr_reader_completed_task_count
#
# @!attribute [r] idletime
# @!macro thread_pool_executor_attr_reader_idletime
#
# @!attribute [r] max_queue
# @!macro thread_pool_executor_attr_reader_max_queue
#
# @!attribute [r] length
# @!macro thread_pool_executor_attr_reader_length
#
# @!attribute [r] queue_length
# @!macro thread_pool_executor_attr_reader_queue_length
#
# @!attribute [r] remaining_capacity
# @!macro thread_pool_executor_attr_reader_remaining_capacity
#
# @!method can_overflow?
# @!macro executor_service_method_can_overflow_question
# @!macro thread_pool_options
#
# **Thread Pool Options**
#
# Thread pools support several configuration options:
#
# * `idletime`: The number of seconds that a thread may be idle before being reclaimed.
# * `max_queue`: The maximum number of tasks that may be waiting in the work queue at
# any one time. When the queue size reaches `max_queue` and no new threads can be created,
# subsequent tasks will be rejected in accordance with the configured `fallback_policy`.
# * `auto_terminate`: When true (default) an `at_exit` handler will be registered which
# will stop the thread pool when the application exits. See below for more information
# on shutting down thread pools.
# * `fallback_policy`: The policy defining how rejected tasks are handled.
#
# Three fallback policies are supported:
#
# * `:abort`: Raise a `RejectedExecutionError` exception and discard the task.
# * `:discard`: Discard the task and return false.
# * `:caller_runs`: Execute the task on the calling thread.
#
# **Shutting Down Thread Pools**
#
# Killing a thread pool while tasks are still being processed, either by calling
# the `#kill` method or at application exit, will have unpredictable results. There
# is no way for the thread pool to know what resources are being used by the
# in-progress tasks. When those tasks are killed the impact on those resources
# cannot be predicted. The *best* practice is to explicitly shutdown all thread
# pools using the provided methods:
#
# * Call `#shutdown` to initiate an orderly termination of all in-progress tasks
# * Call `#wait_for_termination` with an appropriate timeout interval an allow
# the orderly shutdown to complete
# * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time
#
# On some runtime platforms (most notably the JVM) the application will not
# exit until all thread pools have been shutdown. To prevent applications from
# "hanging" on exit all thread pools include an `at_exit` handler that will
# stop the thread pool when the application exits. This handler uses a brute
# force method to stop the pool and makes no guarantees regarding resources being
# used by any tasks still running. Registration of this `at_exit` handler can be
# prevented by setting the thread pool's constructor `:auto_terminate` option to
# `false` when the thread pool is created. All thread pools support this option.
#
# ```ruby
# pool1 = Concurrent::FixedThreadPool.new(5) # an `at_exit` handler will be registered
# pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # prevent `at_exit` handler registration
# ```
#
# @note Failure to properly shutdown a thread pool can lead to unpredictable results.
# Please read *Shutting Down Thread Pools* for more information.
#
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface
# @see http://ruby-doc.org//core-2.2.0/Kernel.html#method-i-at_exit Kernel#at_exit
# @!macro fixed_thread_pool
#
# A thread pool that reuses a fixed number of threads operating off an unbounded queue.
# At any point, at most `num_threads` will be active processing tasks. When all threads are busy new
# tasks `#post` to the thread pool are enqueued until a thread becomes available.
# Should a thread crash for any reason the thread will immediately be removed
# from the pool and replaced.
#
# The API and behavior of this class are based on Java's `FixedThreadPool`
#
# @!macro thread_pool_options
class FixedThreadPool < ThreadPoolExecutor
# @!macro fixed_thread_pool_method_initialize
#
# Create a new thread pool.
#
# @param [Integer] num_threads the number of threads to allocate
# @param [Hash] opts the options defining pool behavior.
# @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy
#
# @raise [ArgumentError] if `num_threads` is less than or equal to zero
# @raise [ArgumentError] if `fallback_policy` is not a known policy
#
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int-
def initialize(num_threads, opts = {})
raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1
defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE,
idletime: DEFAULT_THREAD_IDLETIMEOUT }
overrides = { min_threads: num_threads,
max_threads: num_threads }
super(defaults.merge(opts).merge(overrides))
end
end
end

View File

@ -0,0 +1,66 @@
require 'concurrent/atomic/event'
require 'concurrent/executor/abstract_executor_service'
require 'concurrent/executor/serial_executor_service'
module Concurrent
# An executor service which runs all operations on the current thread,
# blocking as necessary. Operations are performed in the order they are
# received and no two operations can be performed simultaneously.
#
# This executor service exists mainly for testing an debugging. When used
# it immediately runs every `#post` operation on the current thread, blocking
# that thread until the operation is complete. This can be very beneficial
# during testing because it makes all operations deterministic.
#
# @note Intended for use primarily in testing and debugging.
class ImmediateExecutor < AbstractExecutorService
include SerialExecutorService
# Creates a new executor
def initialize
@stopped = Concurrent::Event.new
end
# @!macro executor_service_method_post
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
return false unless running?
task.call(*args)
true
end
# @!macro executor_service_method_left_shift
def <<(task)
post(&task)
self
end
# @!macro executor_service_method_running_question
def running?
! shutdown?
end
# @!macro executor_service_method_shuttingdown_question
def shuttingdown?
false
end
# @!macro executor_service_method_shutdown_question
def shutdown?
@stopped.set?
end
# @!macro executor_service_method_shutdown
def shutdown
@stopped.set
true
end
alias_method :kill, :shutdown
# @!macro executor_service_method_wait_for_termination
def wait_for_termination(timeout = nil)
@stopped.wait(timeout)
end
end
end

View File

@ -0,0 +1,44 @@
require 'concurrent/executor/immediate_executor'
require 'concurrent/executor/simple_executor_service'
module Concurrent
# An executor service which runs all operations on a new thread, blocking
# until it completes. Operations are performed in the order they are received
# and no two operations can be performed simultaneously.
#
# This executor service exists mainly for testing an debugging. When used it
# immediately runs every `#post` operation on a new thread, blocking the
# current thread until the operation is complete. This is similar to how the
# ImmediateExecutor works, but the operation has the full stack of the new
# thread at its disposal. This can be helpful when the operations will spawn
# more operations on the same executor and so on - such a situation might
# overflow the single stack in case of an ImmediateExecutor, which is
# inconsistent with how it would behave for a threaded executor.
#
# @note Intended for use primarily in testing and debugging.
class IndirectImmediateExecutor < ImmediateExecutor
# Creates a new executor
def initialize
super
@internal_executor = SimpleExecutorService.new
end
# @!macro executor_service_method_post
def post(*args, &task)
raise ArgumentError.new("no block given") unless block_given?
return false unless running?
event = Concurrent::Event.new
@internal_executor.post do
begin
task.call(*args)
ensure
event.set
end
end
event.wait
true
end
end
end

View File

@ -0,0 +1,100 @@
if Concurrent.on_jruby?
require 'concurrent/errors'
require 'concurrent/utility/engine'
require 'concurrent/executor/abstract_executor_service'
module Concurrent
# @!macro abstract_executor_service_public_api
# @!visibility private
class JavaExecutorService < AbstractExecutorService
java_import 'java.lang.Runnable'
FALLBACK_POLICY_CLASSES = {
abort: java.util.concurrent.ThreadPoolExecutor::AbortPolicy,
discard: java.util.concurrent.ThreadPoolExecutor::DiscardPolicy,
caller_runs: java.util.concurrent.ThreadPoolExecutor::CallerRunsPolicy
}.freeze
private_constant :FALLBACK_POLICY_CLASSES
def initialize(*args, &block)
super
ns_make_executor_runnable
end
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
return handle_fallback(*args, &task) unless running?
@executor.submit_runnable Job.new(args, task)
true
rescue Java::JavaUtilConcurrent::RejectedExecutionException
raise RejectedExecutionError
end
def wait_for_termination(timeout = nil)
if timeout.nil?
ok = @executor.awaitTermination(60, java.util.concurrent.TimeUnit::SECONDS) until ok
true
else
@executor.awaitTermination(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS)
end
end
def shutdown
synchronize do
self.ns_auto_terminate = false
@executor.shutdown
nil
end
end
def kill
synchronize do
self.ns_auto_terminate = false
@executor.shutdownNow
nil
end
end
private
def ns_running?
!(ns_shuttingdown? || ns_shutdown?)
end
def ns_shuttingdown?
if @executor.respond_to? :isTerminating
@executor.isTerminating
else
false
end
end
def ns_shutdown?
@executor.isShutdown || @executor.isTerminated
end
def ns_make_executor_runnable
if !defined?(@executor.submit_runnable)
@executor.class.class_eval do
java_alias :submit_runnable, :submit, [java.lang.Runnable.java_class]
end
end
end
class Job
include Runnable
def initialize(args, block)
@args = args
@block = block
end
def run
@block.call(*@args)
end
end
private_constant :Job
end
end
end

View File

@ -0,0 +1,29 @@
if Concurrent.on_jruby?
require 'concurrent/executor/java_executor_service'
require 'concurrent/executor/serial_executor_service'
module Concurrent
# @!macro single_thread_executor
# @!macro abstract_executor_service_public_api
# @!visibility private
class JavaSingleThreadExecutor < JavaExecutorService
include SerialExecutorService
# @!macro single_thread_executor_method_initialize
def initialize(opts = {})
super(opts)
end
private
def ns_initialize(opts)
@executor = java.util.concurrent.Executors.newSingleThreadExecutor
@fallback_policy = opts.fetch(:fallback_policy, :discard)
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy)
self.auto_terminate = opts.fetch(:auto_terminate, true)
end
end
end
end

View File

@ -0,0 +1,123 @@
if Concurrent.on_jruby?
require 'concurrent/executor/java_executor_service'
module Concurrent
# @!macro thread_pool_executor
# @!macro thread_pool_options
# @!visibility private
class JavaThreadPoolExecutor < JavaExecutorService
# @!macro thread_pool_executor_constant_default_max_pool_size
DEFAULT_MAX_POOL_SIZE = java.lang.Integer::MAX_VALUE # 2147483647
# @!macro thread_pool_executor_constant_default_min_pool_size
DEFAULT_MIN_POOL_SIZE = 0
# @!macro thread_pool_executor_constant_default_max_queue_size
DEFAULT_MAX_QUEUE_SIZE = 0
# @!macro thread_pool_executor_constant_default_thread_timeout
DEFAULT_THREAD_IDLETIMEOUT = 60
# @!macro thread_pool_executor_attr_reader_max_length
attr_reader :max_length
# @!macro thread_pool_executor_attr_reader_max_queue
attr_reader :max_queue
# @!macro thread_pool_executor_method_initialize
def initialize(opts = {})
super(opts)
end
# @!macro executor_service_method_can_overflow_question
def can_overflow?
@max_queue != 0
end
# @!macro thread_pool_executor_attr_reader_min_length
def min_length
@executor.getCorePoolSize
end
# @!macro thread_pool_executor_attr_reader_max_length
def max_length
@executor.getMaximumPoolSize
end
# @!macro thread_pool_executor_attr_reader_length
def length
@executor.getPoolSize
end
# @!macro thread_pool_executor_attr_reader_largest_length
def largest_length
@executor.getLargestPoolSize
end
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
def scheduled_task_count
@executor.getTaskCount
end
# @!macro thread_pool_executor_attr_reader_completed_task_count
def completed_task_count
@executor.getCompletedTaskCount
end
# @!macro thread_pool_executor_attr_reader_idletime
def idletime
@executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS)
end
# @!macro thread_pool_executor_attr_reader_queue_length
def queue_length
@executor.getQueue.size
end
# @!macro thread_pool_executor_attr_reader_remaining_capacity
def remaining_capacity
@max_queue == 0 ? -1 : @executor.getQueue.remainingCapacity
end
# @!macro executor_service_method_running_question
def running?
super && !@executor.isTerminating
end
private
def ns_initialize(opts)
min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
@fallback_policy = opts.fetch(:fallback_policy, :abort)
raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if max_length < DEFAULT_MIN_POOL_SIZE
raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if max_length > DEFAULT_MAX_POOL_SIZE
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if min_length < DEFAULT_MIN_POOL_SIZE
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
raise ArgumentError.new("#{fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.include?(@fallback_policy)
if @max_queue == 0
queue = java.util.concurrent.LinkedBlockingQueue.new
else
queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue)
end
@executor = java.util.concurrent.ThreadPoolExecutor.new(
min_length,
max_length,
idletime,
java.util.concurrent.TimeUnit::SECONDS,
queue,
FALLBACK_POLICY_CLASSES[@fallback_policy].new)
self.auto_terminate = opts.fetch(:auto_terminate, true)
end
end
end
end

View File

@ -0,0 +1,78 @@
require 'concurrent/executor/abstract_executor_service'
require 'concurrent/atomic/event'
module Concurrent
# @!macro abstract_executor_service_public_api
# @!visibility private
class RubyExecutorService < AbstractExecutorService
safe_initialization!
def initialize(*args, &block)
super
@StopEvent = Event.new
@StoppedEvent = Event.new
end
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
synchronize do
# If the executor is shut down, reject this task
return handle_fallback(*args, &task) unless running?
ns_execute(*args, &task)
true
end
end
def shutdown
synchronize do
break unless running?
self.ns_auto_terminate = false
stop_event.set
ns_shutdown_execution
end
true
end
def kill
synchronize do
break if shutdown?
self.ns_auto_terminate = false
stop_event.set
ns_kill_execution
stopped_event.set
end
true
end
def wait_for_termination(timeout = nil)
stopped_event.wait(timeout)
end
private
def stop_event
@StopEvent
end
def stopped_event
@StoppedEvent
end
def ns_shutdown_execution
stopped_event.set
end
def ns_running?
!stop_event.set?
end
def ns_shuttingdown?
!(ns_running? || ns_shutdown?)
end
def ns_shutdown?
stopped_event.set?
end
end
end

View File

@ -0,0 +1,22 @@
require 'concurrent/executor/ruby_thread_pool_executor'
module Concurrent
# @!macro single_thread_executor
# @!macro abstract_executor_service_public_api
# @!visibility private
class RubySingleThreadExecutor < RubyThreadPoolExecutor
# @!macro single_thread_executor_method_initialize
def initialize(opts = {})
super(
min_threads: 1,
max_threads: 1,
max_queue: 0,
idletime: DEFAULT_THREAD_IDLETIMEOUT,
fallback_policy: opts.fetch(:fallback_policy, :discard),
auto_terminate: opts.fetch(:auto_terminate, true)
)
end
end
end

View File

@ -0,0 +1,362 @@
require 'thread'
require 'concurrent/atomic/event'
require 'concurrent/concern/logging'
require 'concurrent/executor/ruby_executor_service'
require 'concurrent/utility/monotonic_time'
module Concurrent
# @!macro thread_pool_executor
# @!macro thread_pool_options
# @!visibility private
class RubyThreadPoolExecutor < RubyExecutorService
# @!macro thread_pool_executor_constant_default_max_pool_size
DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE
# @!macro thread_pool_executor_constant_default_min_pool_size
DEFAULT_MIN_POOL_SIZE = 0
# @!macro thread_pool_executor_constant_default_max_queue_size
DEFAULT_MAX_QUEUE_SIZE = 0
# @!macro thread_pool_executor_constant_default_thread_timeout
DEFAULT_THREAD_IDLETIMEOUT = 60
# @!macro thread_pool_executor_attr_reader_max_length
attr_reader :max_length
# @!macro thread_pool_executor_attr_reader_min_length
attr_reader :min_length
# @!macro thread_pool_executor_attr_reader_idletime
attr_reader :idletime
# @!macro thread_pool_executor_attr_reader_max_queue
attr_reader :max_queue
# @!macro thread_pool_executor_method_initialize
def initialize(opts = {})
super(opts)
end
# @!macro thread_pool_executor_attr_reader_largest_length
def largest_length
synchronize { @largest_length }
end
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
def scheduled_task_count
synchronize { @scheduled_task_count }
end
# @!macro thread_pool_executor_attr_reader_completed_task_count
def completed_task_count
synchronize { @completed_task_count }
end
# @!macro executor_service_method_can_overflow_question
def can_overflow?
synchronize { ns_limited_queue? }
end
# @!macro thread_pool_executor_attr_reader_length
def length
synchronize { @pool.length }
end
# @!macro thread_pool_executor_attr_reader_queue_length
def queue_length
synchronize { @queue.length }
end
# @!macro thread_pool_executor_attr_reader_remaining_capacity
def remaining_capacity
synchronize do
if ns_limited_queue?
@max_queue - @queue.length
else
-1
end
end
end
# @!visibility private
def remove_busy_worker(worker)
synchronize { ns_remove_busy_worker worker }
end
# @!visibility private
def ready_worker(worker)
synchronize { ns_ready_worker worker }
end
# @!visibility private
def worker_not_old_enough(worker)
synchronize { ns_worker_not_old_enough worker }
end
# @!visibility private
def worker_died(worker)
synchronize { ns_worker_died worker }
end
# @!visibility private
def worker_task_completed
synchronize { @completed_task_count += 1 }
end
private
# @!visibility private
def ns_initialize(opts)
@min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
@max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
@idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
@fallback_policy = opts.fetch(:fallback_policy, :abort)
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy)
raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE
raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
self.auto_terminate = opts.fetch(:auto_terminate, true)
@pool = [] # all workers
@ready = [] # used as a stash (most idle worker is at the start)
@queue = [] # used as queue
# @ready or @queue is empty at all times
@scheduled_task_count = 0
@completed_task_count = 0
@largest_length = 0
@ruby_pid = $$ # detects if Ruby has forked
@gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented
@next_gc_time = Concurrent.monotonic_time + @gc_interval
end
# @!visibility private
def ns_limited_queue?
@max_queue != 0
end
# @!visibility private
def ns_execute(*args, &task)
ns_reset_if_forked
if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task)
@scheduled_task_count += 1
else
handle_fallback(*args, &task)
end
ns_prune_pool if @next_gc_time < Concurrent.monotonic_time
end
# @!visibility private
def ns_shutdown_execution
ns_reset_if_forked
if @pool.empty?
# nothing to do
stopped_event.set
end
if @queue.empty?
# no more tasks will be accepted, just stop all workers
@pool.each(&:stop)
end
end
# @!visibility private
def ns_kill_execution
# TODO log out unprocessed tasks in queue
# TODO try to shutdown first?
@pool.each(&:kill)
@pool.clear
@ready.clear
end
# tries to assign task to a worker, tries to get one from @ready or to create new one
# @return [true, false] if task is assigned to a worker
#
# @!visibility private
def ns_assign_worker(*args, &task)
# keep growing if the pool is not at the minimum yet
worker = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker
if worker
worker << [task, args]
true
else
false
end
rescue ThreadError
# Raised when the operating system refuses to create the new thread
return false
end
# tries to enqueue task
# @return [true, false] if enqueued
#
# @!visibility private
def ns_enqueue(*args, &task)
if !ns_limited_queue? || @queue.size < @max_queue
@queue << [task, args]
true
else
false
end
end
# @!visibility private
def ns_worker_died(worker)
ns_remove_busy_worker worker
replacement_worker = ns_add_busy_worker
ns_ready_worker replacement_worker, false if replacement_worker
end
# creates new worker which has to receive work to do after it's added
# @return [nil, Worker] nil of max capacity is reached
#
# @!visibility private
def ns_add_busy_worker
return if @pool.size >= @max_length
@pool << (worker = Worker.new(self))
@largest_length = @pool.length if @pool.length > @largest_length
worker
end
# handle ready worker, giving it new job or assigning back to @ready
#
# @!visibility private
def ns_ready_worker(worker, success = true)
task_and_args = @queue.shift
if task_and_args
worker << task_and_args
else
# stop workers when !running?, do not return them to @ready
if running?
@ready.push(worker)
else
worker.stop
end
end
end
# returns back worker to @ready which was not idle for enough time
#
# @!visibility private
def ns_worker_not_old_enough(worker)
# let's put workers coming from idle_test back to the start (as the oldest worker)
@ready.unshift(worker)
true
end
# removes a worker which is not in not tracked in @ready
#
# @!visibility private
def ns_remove_busy_worker(worker)
@pool.delete(worker)
stopped_event.set if @pool.empty? && !running?
true
end
# try oldest worker if it is idle for enough time, it's returned back at the start
#
# @!visibility private
def ns_prune_pool
return if @pool.size <= @min_length
last_used = @ready.shift
last_used << :idle_test if last_used
@next_gc_time = Concurrent.monotonic_time + @gc_interval
end
def ns_reset_if_forked
if $$ != @ruby_pid
@queue.clear
@ready.clear
@pool.clear
@scheduled_task_count = 0
@completed_task_count = 0
@largest_length = 0
@ruby_pid = $$
end
end
# @!visibility private
class Worker
include Concern::Logging
def initialize(pool)
# instance variables accessed only under pool's lock so no need to sync here again
@queue = Queue.new
@pool = pool
@thread = create_worker @queue, pool, pool.idletime
end
def <<(message)
@queue << message
end
def stop
@queue << :stop
end
def kill
@thread.kill
end
private
def create_worker(queue, pool, idletime)
Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime|
last_message = Concurrent.monotonic_time
catch(:stop) do
loop do
case message = my_queue.pop
when :idle_test
if (Concurrent.monotonic_time - last_message) > my_idletime
my_pool.remove_busy_worker(self)
throw :stop
else
my_pool.worker_not_old_enough(self)
end
when :stop
my_pool.remove_busy_worker(self)
throw :stop
else
task, args = message
run_task my_pool, task, args
last_message = Concurrent.monotonic_time
my_pool.ready_worker(self)
end
end
end
end
end
def run_task(pool, task, args)
task.call(*args)
pool.worker_task_completed
rescue => ex
# let it fail
log DEBUG, ex
rescue Exception => ex
log ERROR, ex
pool.worker_died(self)
throw :stop
end
end
private_constant :Worker
end
end

View File

@ -0,0 +1,35 @@
require 'concurrent/synchronization'
module Concurrent
# A simple utility class that executes a callable and returns and array of three elements:
# success - indicating if the callable has been executed without errors
# value - filled by the callable result if it has been executed without errors, nil otherwise
# reason - the error risen by the callable if it has been executed with errors, nil otherwise
class SafeTaskExecutor < Synchronization::LockableObject
def initialize(task, opts = {})
@task = task
@exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError
super() # ensures visibility
end
# @return [Array]
def execute(*args)
synchronize do
success = false
value = reason = nil
begin
value = @task.call(*args)
success = true
rescue @exception_class => ex
reason = ex
success = false
end
[success, value, reason]
end
end
end
end

View File

@ -0,0 +1,34 @@
require 'concurrent/executor/executor_service'
module Concurrent
# Indicates that the including `ExecutorService` guarantees
# that all operations will occur in the order they are post and that no
# two operations may occur simultaneously. This module provides no
# functionality and provides no guarantees. That is the responsibility
# of the including class. This module exists solely to allow the including
# object to be interrogated for its serialization status.
#
# @example
# class Foo
# include Concurrent::SerialExecutor
# end
#
# foo = Foo.new
#
# foo.is_a? Concurrent::ExecutorService #=> true
# foo.is_a? Concurrent::SerialExecutor #=> true
# foo.serialized? #=> true
#
# @!visibility private
module SerialExecutorService
include ExecutorService
# @!macro executor_service_method_serialized_question
#
# @note Always returns `true`
def serialized?
true
end
end
end

View File

@ -0,0 +1,107 @@
require 'concurrent/errors'
require 'concurrent/concern/logging'
require 'concurrent/synchronization'
module Concurrent
# Ensures passed jobs in a serialized order never running at the same time.
class SerializedExecution < Synchronization::LockableObject
include Concern::Logging
def initialize()
super()
synchronize { ns_initialize }
end
Job = Struct.new(:executor, :args, :block) do
def call
block.call(*args)
end
end
# Submit a task to the executor for asynchronous processing.
#
# @param [Executor] executor to be used for this job
#
# @param [Array] args zero or more arguments to be passed to the task
#
# @yield the asynchronous task to perform
#
# @return [Boolean] `true` if the task is queued, `false` if the executor
# is not running
#
# @raise [ArgumentError] if no task is given
def post(executor, *args, &task)
posts [[executor, args, task]]
true
end
# As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not
# be interleaved by other tasks.
#
# @param [Array<Array(ExecutorService, Array<Object>, Proc)>] posts array of triplets where
# first is a {ExecutorService}, second is array of args for task, third is a task (Proc)
def posts(posts)
# if can_overflow?
# raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow'
# end
return nil if posts.empty?
jobs = posts.map { |executor, args, task| Job.new executor, args, task }
job_to_post = synchronize do
if @being_executed
@stash.push(*jobs)
nil
else
@being_executed = true
@stash.push(*jobs[1..-1])
jobs.first
end
end
call_job job_to_post if job_to_post
true
end
private
def ns_initialize
@being_executed = false
@stash = []
end
def call_job(job)
did_it_run = begin
job.executor.post { work(job) }
true
rescue RejectedExecutionError => ex
false
end
# TODO not the best idea to run it myself
unless did_it_run
begin
work job
rescue => ex
# let it fail
log DEBUG, ex
end
end
end
# ensures next job is executed if any is stashed
def work(job)
job.call
ensure
synchronize do
job = @stash.shift || (@being_executed = false)
end
# TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end
# of this block
call_job job if job
end
end
end

View File

@ -0,0 +1,28 @@
require 'delegate'
require 'concurrent/executor/serial_executor_service'
require 'concurrent/executor/serialized_execution'
module Concurrent
# A wrapper/delegator for any `ExecutorService` that
# guarantees serialized execution of tasks.
#
# @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html)
# @see Concurrent::SerializedExecution
class SerializedExecutionDelegator < SimpleDelegator
include SerialExecutorService
def initialize(executor)
@executor = executor
@serializer = SerializedExecution.new
super(executor)
end
# @!macro executor_service_method_post
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
return false unless running?
@serializer.post(@executor, *args, &task)
end
end
end

View File

@ -0,0 +1,100 @@
require 'concurrent/atomics'
require 'concurrent/executor/executor_service'
module Concurrent
# An executor service in which every operation spawns a new,
# independently operating thread.
#
# This is perhaps the most inefficient executor service in this
# library. It exists mainly for testing an debugging. Thread creation
# and management is expensive in Ruby and this executor performs no
# resource pooling. This can be very beneficial during testing and
# debugging because it decouples the using code from the underlying
# executor implementation. In production this executor will likely
# lead to suboptimal performance.
#
# @note Intended for use primarily in testing and debugging.
class SimpleExecutorService < RubyExecutorService
# @!macro executor_service_method_post
def self.post(*args)
raise ArgumentError.new('no block given') unless block_given?
Thread.new(*args) do
Thread.current.abort_on_exception = false
yield(*args)
end
true
end
# @!macro executor_service_method_left_shift
def self.<<(task)
post(&task)
self
end
# @!macro executor_service_method_post
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
return false unless running?
@count.increment
Thread.new(*args) do
Thread.current.abort_on_exception = false
begin
yield(*args)
ensure
@count.decrement
@stopped.set if @running.false? && @count.value == 0
end
end
end
# @!macro executor_service_method_left_shift
def <<(task)
post(&task)
self
end
# @!macro executor_service_method_running_question
def running?
@running.true?
end
# @!macro executor_service_method_shuttingdown_question
def shuttingdown?
@running.false? && ! @stopped.set?
end
# @!macro executor_service_method_shutdown_question
def shutdown?
@stopped.set?
end
# @!macro executor_service_method_shutdown
def shutdown
@running.make_false
@stopped.set if @count.value == 0
true
end
# @!macro executor_service_method_kill
def kill
@running.make_false
@stopped.set
true
end
# @!macro executor_service_method_wait_for_termination
def wait_for_termination(timeout = nil)
@stopped.wait(timeout)
end
private
def ns_initialize
@running = Concurrent::AtomicBoolean.new(true)
@stopped = Concurrent::Event.new
@count = Concurrent::AtomicFixnum.new(0)
end
end
end

View File

@ -0,0 +1,56 @@
require 'concurrent/executor/ruby_single_thread_executor'
module Concurrent
if Concurrent.on_jruby?
require 'concurrent/executor/java_single_thread_executor'
end
SingleThreadExecutorImplementation = case
when Concurrent.on_jruby?
JavaSingleThreadExecutor
else
RubySingleThreadExecutor
end
private_constant :SingleThreadExecutorImplementation
# @!macro single_thread_executor
#
# A thread pool with a single thread an unlimited queue. Should the thread
# die for any reason it will be removed and replaced, thus ensuring that
# the executor will always remain viable and available to process jobs.
#
# A common pattern for background processing is to create a single thread
# on which an infinite loop is run. The thread's loop blocks on an input
# source (perhaps blocking I/O or a queue) and processes each input as it
# is received. This pattern has several issues. The thread itself is highly
# susceptible to errors during processing. Also, the thread itself must be
# constantly monitored and restarted should it die. `SingleThreadExecutor`
# encapsulates all these bahaviors. The task processor is highly resilient
# to errors from within tasks. Also, should the thread die it will
# automatically be restarted.
#
# The API and behavior of this class are based on Java's `SingleThreadExecutor`.
#
# @!macro abstract_executor_service_public_api
class SingleThreadExecutor < SingleThreadExecutorImplementation
# @!macro single_thread_executor_method_initialize
#
# Create a new thread pool.
#
# @option opts [Symbol] :fallback_policy (:discard) the policy for handling new
# tasks that are received when the queue size has reached
# `max_queue` or the executor has shut down
#
# @raise [ArgumentError] if `:fallback_policy` is not one of the values specified
# in `FALLBACK_POLICIES`
#
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
# @!method initialize(opts = {})
# @!macro single_thread_executor_method_initialize
end
end

View File

@ -0,0 +1,87 @@
require 'concurrent/utility/engine'
require 'concurrent/executor/ruby_thread_pool_executor'
module Concurrent
if Concurrent.on_jruby?
require 'concurrent/executor/java_thread_pool_executor'
end
ThreadPoolExecutorImplementation = case
when Concurrent.on_jruby?
JavaThreadPoolExecutor
else
RubyThreadPoolExecutor
end
private_constant :ThreadPoolExecutorImplementation
# @!macro thread_pool_executor
#
# An abstraction composed of one or more threads and a task queue. Tasks
# (blocks or `proc` objects) are submitted to the pool and added to the queue.
# The threads in the pool remove the tasks and execute them in the order
# they were received.
#
# A `ThreadPoolExecutor` will automatically adjust the pool size according
# to the bounds set by `min-threads` and `max-threads`. When a new task is
# submitted and fewer than `min-threads` threads are running, a new thread
# is created to handle the request, even if other worker threads are idle.
# If there are more than `min-threads` but less than `max-threads` threads
# running, a new thread will be created only if the queue is full.
#
# Threads that are idle for too long will be garbage collected, down to the
# configured minimum options. Should a thread crash it, too, will be garbage collected.
#
# `ThreadPoolExecutor` is based on the Java class of the same name. From
# the official Java documentation;
#
# > Thread pools address two different problems: they usually provide
# > improved performance when executing large numbers of asynchronous tasks,
# > due to reduced per-task invocation overhead, and they provide a means
# > of bounding and managing the resources, including threads, consumed
# > when executing a collection of tasks. Each ThreadPoolExecutor also
# > maintains some basic statistics, such as the number of completed tasks.
# >
# > To be useful across a wide range of contexts, this class provides many
# > adjustable parameters and extensibility hooks. However, programmers are
# > urged to use the more convenient Executors factory methods
# > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation),
# > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single
# > background thread), that preconfigure settings for the most common usage
# > scenarios.
#
# @!macro thread_pool_options
#
# @!macro thread_pool_executor_public_api
class ThreadPoolExecutor < ThreadPoolExecutorImplementation
# @!macro thread_pool_executor_method_initialize
#
# Create a new thread pool.
#
# @param [Hash] opts the options which configure the thread pool.
#
# @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum
# number of threads to be created
# @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) When a new task is submitted
# and fewer than `min_threads` are running, a new thread is created
# @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum
# number of seconds a thread may be idle before being reclaimed
# @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum
# number of tasks allowed in the work queue at any one time; a value of
# zero means the queue may grow without bound
# @option opts [Symbol] :fallback_policy (:abort) the policy for handling new
# tasks that are received when the queue size has reached
# `max_queue` or the executor has shut down
#
# @raise [ArgumentError] if `:max_threads` is less than one
# @raise [ArgumentError] if `:min_threads` is less than zero
# @raise [ArgumentError] if `:fallback_policy` is not one of the values specified
# in `FALLBACK_POLICIES`
#
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
# @!method initialize(opts = {})
# @!macro thread_pool_executor_method_initialize
end
end

View File

@ -0,0 +1,175 @@
require 'concurrent/scheduled_task'
require 'concurrent/atomic/event'
require 'concurrent/collection/non_concurrent_priority_queue'
require 'concurrent/executor/executor_service'
require 'concurrent/executor/single_thread_executor'
require 'concurrent/options'
module Concurrent
# Executes a collection of tasks, each after a given delay. A master task
# monitors the set and schedules each task for execution at the appropriate
# time. Tasks are run on the global thread pool or on the supplied executor.
# Each task is represented as a `ScheduledTask`.
#
# @see Concurrent::ScheduledTask
#
# @!macro monotonic_clock_warning
class TimerSet < RubyExecutorService
# Create a new set of timed tasks.
#
# @!macro executor_options
#
# @param [Hash] opts the options used to specify the executor on which to perform actions
# @option opts [Executor] :executor when set use the given `Executor` instance.
# Three special values are also supported: `:task` returns the global task pool,
# `:operation` returns the global operation pool, and `:immediate` returns a new
# `ImmediateExecutor` object.
def initialize(opts = {})
super(opts)
end
# Post a task to be execute run after a given delay (in seconds). If the
# delay is less than 1/100th of a second the task will be immediately post
# to the executor.
#
# @param [Float] delay the number of seconds to wait for before executing the task.
# @param [Array<Object>] args the arguments passed to the task on execution.
#
# @yield the task to be performed.
#
# @return [Concurrent::ScheduledTask, false] IVar representing the task if the post
# is successful; false after shutdown.
#
# @raise [ArgumentError] if the intended execution time is not in the future.
# @raise [ArgumentError] if no block is given.
def post(delay, *args, &task)
raise ArgumentError.new('no block given') unless block_given?
return false unless running?
opts = {
executor: @task_executor,
args: args,
timer_set: self
}
task = ScheduledTask.execute(delay, opts, &task) # may raise exception
task.unscheduled? ? false : task
end
# Begin an immediate shutdown. In-progress tasks will be allowed to
# complete but enqueued tasks will be dismissed and no new tasks
# will be accepted. Has no additional effect if the thread pool is
# not running.
def kill
shutdown
end
private :<<
private
# Initialize the object.
#
# @param [Hash] opts the options to create the object with.
# @!visibility private
def ns_initialize(opts)
@queue = Collection::NonConcurrentPriorityQueue.new(order: :min)
@task_executor = Options.executor_from_options(opts) || Concurrent.global_io_executor
@timer_executor = SingleThreadExecutor.new
@condition = Event.new
@ruby_pid = $$ # detects if Ruby has forked
self.auto_terminate = opts.fetch(:auto_terminate, true)
end
# Post the task to the internal queue.
#
# @note This is intended as a callback method from ScheduledTask
# only. It is not intended to be used directly. Post a task
# by using the `SchedulesTask#execute` method.
#
# @!visibility private
def post_task(task)
synchronize{ ns_post_task(task) }
end
# @!visibility private
def ns_post_task(task)
return false unless ns_running?
ns_reset_if_forked
if (task.initial_delay) <= 0.01
task.executor.post{ task.process_task }
else
@queue.push(task)
# only post the process method when the queue is empty
@timer_executor.post(&method(:process_tasks)) if @queue.length == 1
@condition.set
end
true
end
# Remove the given task from the queue.
#
# @note This is intended as a callback method from `ScheduledTask`
# only. It is not intended to be used directly. Cancel a task
# by using the `ScheduledTask#cancel` method.
#
# @!visibility private
def remove_task(task)
synchronize{ @queue.delete(task) }
end
# `ExecutorService` callback called during shutdown.
#
# @!visibility private
def ns_shutdown_execution
ns_reset_if_forked
@queue.clear
@timer_executor.kill
stopped_event.set
end
def ns_reset_if_forked
if $$ != @ruby_pid
@queue.clear
@condition.reset
@ruby_pid = $$
end
end
# Run a loop and execute tasks in the scheduled order and at the approximate
# scheduled time. If no tasks remain the thread will exit gracefully so that
# garbage collection can occur. If there are no ready tasks it will sleep
# for up to 60 seconds waiting for the next scheduled task.
#
# @!visibility private
def process_tasks
loop do
task = synchronize { @condition.reset; @queue.peek }
break unless task
now = Concurrent.monotonic_time
diff = task.schedule_time - now
if diff <= 0
# We need to remove the task from the queue before passing
# it to the executor, to avoid race conditions where we pass
# the peek'ed task to the executor and then pop a different
# one that's been added in the meantime.
#
# Note that there's no race condition between the peek and
# this pop - this pop could retrieve a different task from
# the peek, but that task would be due to fire now anyway
# (because @queue is a priority queue, and this thread is
# the only reader, so whatever timer is at the head of the
# queue now must have the same pop time, or a closer one, as
# when we peeked).
task = synchronize { @queue.pop }
task.executor.post{ task.process_task }
else
@condition.wait([diff, 60].min)
end
end
end
end
end

View File

@ -0,0 +1,20 @@
require 'concurrent/executor/abstract_executor_service'
require 'concurrent/executor/cached_thread_pool'
require 'concurrent/executor/executor_service'
require 'concurrent/executor/fixed_thread_pool'
require 'concurrent/executor/immediate_executor'
require 'concurrent/executor/indirect_immediate_executor'
require 'concurrent/executor/java_executor_service'
require 'concurrent/executor/java_single_thread_executor'
require 'concurrent/executor/java_thread_pool_executor'
require 'concurrent/executor/ruby_executor_service'
require 'concurrent/executor/ruby_single_thread_executor'
require 'concurrent/executor/ruby_thread_pool_executor'
require 'concurrent/executor/cached_thread_pool'
require 'concurrent/executor/safe_task_executor'
require 'concurrent/executor/serial_executor_service'
require 'concurrent/executor/serialized_execution'
require 'concurrent/executor/serialized_execution_delegator'
require 'concurrent/executor/single_thread_executor'
require 'concurrent/executor/thread_pool_executor'
require 'concurrent/executor/timer_set'

View File

@ -0,0 +1,141 @@
require 'thread'
require 'concurrent/constants'
require 'concurrent/errors'
require 'concurrent/ivar'
require 'concurrent/executor/safe_task_executor'
require 'concurrent/options'
# TODO (pitr-ch 14-Mar-2017): deprecate, Future, Promise, etc.
module Concurrent
# {include:file:docs-source/future.md}
#
# @!macro copy_options
#
# @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module
# @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html java.util.concurrent.Future
class Future < IVar
# Create a new `Future` in the `:unscheduled` state.
#
# @yield the asynchronous operation to perform
#
# @!macro executor_and_deref_options
#
# @option opts [object, Array] :args zero or more arguments to be passed the task
# block on execution
#
# @raise [ArgumentError] if no block is given
def initialize(opts = {}, &block)
raise ArgumentError.new('no block given') unless block_given?
super(NULL, opts.merge(__task_from_block__: block), &nil)
end
# Execute an `:unscheduled` `Future`. Immediately sets the state to `:pending` and
# passes the block to a new thread/thread pool for eventual execution.
# Does nothing if the `Future` is in any state other than `:unscheduled`.
#
# @return [Future] a reference to `self`
#
# @example Instance and execute in separate steps
# future = Concurrent::Future.new{ sleep(1); 42 }
# future.state #=> :unscheduled
# future.execute
# future.state #=> :pending
#
# @example Instance and execute in one line
# future = Concurrent::Future.new{ sleep(1); 42 }.execute
# future.state #=> :pending
def execute
if compare_and_set_state(:pending, :unscheduled)
@executor.post{ safe_execute(@task, @args) }
self
end
end
# Create a new `Future` object with the given block, execute it, and return the
# `:pending` object.
#
# @yield the asynchronous operation to perform
#
# @!macro executor_and_deref_options
#
# @option opts [object, Array] :args zero or more arguments to be passed the task
# block on execution
#
# @raise [ArgumentError] if no block is given
#
# @return [Future] the newly created `Future` in the `:pending` state
#
# @example
# future = Concurrent::Future.execute{ sleep(1); 42 }
# future.state #=> :pending
def self.execute(opts = {}, &block)
Future.new(opts, &block).execute
end
# @!macro ivar_set_method
def set(value = NULL, &block)
check_for_block_or_value!(block_given?, value)
synchronize do
if @state != :unscheduled
raise MultipleAssignmentError
else
@task = block || Proc.new { value }
end
end
execute
end
# Attempt to cancel the operation if it has not already processed.
# The operation can only be cancelled while still `pending`. It cannot
# be cancelled once it has begun processing or has completed.
#
# @return [Boolean] was the operation successfully cancelled.
def cancel
if compare_and_set_state(:cancelled, :pending)
complete(false, nil, CancelledOperationError.new)
true
else
false
end
end
# Has the operation been successfully cancelled?
#
# @return [Boolean]
def cancelled?
state == :cancelled
end
# Wait the given number of seconds for the operation to complete.
# On timeout attempt to cancel the operation.
#
# @param [Numeric] timeout the maximum time in seconds to wait.
# @return [Boolean] true if the operation completed before the timeout
# else false
def wait_or_cancel(timeout)
wait(timeout)
if complete?
true
else
cancel
false
end
end
protected
def ns_initialize(value, opts)
super
@state = :unscheduled
@task = opts[:__task_from_block__]
@executor = Options.executor_from_options(opts) || Concurrent.global_io_executor
@args = get_arguments_from(opts)
end
end
end

View File

@ -0,0 +1,59 @@
require 'concurrent/utility/engine'
require 'concurrent/thread_safe/util'
module Concurrent
# @!macro concurrent_hash
#
# A thread-safe subclass of Hash. This version locks against the object
# itself for every method call, ensuring only one thread can be reading
# or writing at a time. This includes iteration methods like `#each`,
# which takes the lock repeatedly when reading an item.
#
# @see http://ruby-doc.org/core-2.2.0/Hash.html Ruby standard library `Hash`
# @!macro internal_implementation_note
HashImplementation = case
when Concurrent.on_cruby?
# Because MRI never runs code in parallel, the existing
# non-thread-safe structures should usually work fine.
::Hash
when Concurrent.on_jruby?
require 'jruby/synchronized'
class JRubyHash < ::Hash
include JRuby::Synchronized
end
JRubyHash
when Concurrent.on_rbx?
require 'monitor'
require 'concurrent/thread_safe/util/data_structures'
class RbxHash < ::Hash
end
ThreadSafe::Util.make_synchronized_on_rbx RbxHash
RbxHash
when Concurrent.on_truffleruby?
require 'concurrent/thread_safe/util/data_structures'
class TruffleRubyHash < ::Hash
end
ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyHash
TruffleRubyHash
else
warn 'Possibly unsupported Ruby implementation'
::Hash
end
private_constant :HashImplementation
# @!macro concurrent_hash
class Hash < HashImplementation
end
end

View File

@ -0,0 +1,93 @@
require 'concurrent/synchronization/abstract_struct'
require 'concurrent/synchronization'
module Concurrent
# A thread-safe, immutable variation of Ruby's standard `Struct`.
#
# @see http://ruby-doc.org/core-2.2.0/Struct.html Ruby standard library `Struct`
module ImmutableStruct
include Synchronization::AbstractStruct
def self.included(base)
base.safe_initialization!
end
# @!macro struct_values
def values
ns_values
end
alias_method :to_a, :values
# @!macro struct_values_at
def values_at(*indexes)
ns_values_at(indexes)
end
# @!macro struct_inspect
def inspect
ns_inspect
end
alias_method :to_s, :inspect
# @!macro struct_merge
def merge(other, &block)
ns_merge(other, &block)
end
# @!macro struct_to_h
def to_h
ns_to_h
end
# @!macro struct_get
def [](member)
ns_get(member)
end
# @!macro struct_equality
def ==(other)
ns_equality(other)
end
# @!macro struct_each
def each(&block)
return enum_for(:each) unless block_given?
ns_each(&block)
end
# @!macro struct_each_pair
def each_pair(&block)
return enum_for(:each_pair) unless block_given?
ns_each_pair(&block)
end
# @!macro struct_select
def select(&block)
return enum_for(:select) unless block_given?
ns_select(&block)
end
# @!macro struct_new
def self.new(*args, &block)
clazz_name = nil
if args.length == 0
raise ArgumentError.new('wrong number of arguments (0 for 1+)')
elsif args.length > 0 && args.first.is_a?(String)
clazz_name = args.shift
end
FACTORY.define_struct(clazz_name, args, &block)
end
FACTORY = Class.new(Synchronization::LockableObject) do
def define_struct(name, members, &block)
synchronize do
Synchronization::AbstractStruct.define_struct_class(ImmutableStruct, Synchronization::Object, name, members, &block)
end
end
end.new
private_constant :FACTORY
end
end

View File

@ -0,0 +1,207 @@
require 'concurrent/constants'
require 'concurrent/errors'
require 'concurrent/collection/copy_on_write_observer_set'
require 'concurrent/concern/obligation'
require 'concurrent/concern/observable'
require 'concurrent/synchronization'
module Concurrent
# An `IVar` is like a future that you can assign. As a future is a value that
# is being computed that you can wait on, an `IVar` is a value that is waiting
# to be assigned, that you can wait on. `IVars` are single assignment and
# deterministic.
#
# Then, express futures as an asynchronous computation that assigns an `IVar`.
# The `IVar` becomes the primitive on which [futures](Future) and
# [dataflow](Dataflow) are built.
#
# An `IVar` is a single-element container that is normally created empty, and
# can only be set once. The I in `IVar` stands for immutable. Reading an
# `IVar` normally blocks until it is set. It is safe to set and read an `IVar`
# from different threads.
#
# If you want to have some parallel task set the value in an `IVar`, you want
# a `Future`. If you want to create a graph of parallel tasks all executed
# when the values they depend on are ready you want `dataflow`. `IVar` is
# generally a low-level primitive.
#
# ## Examples
#
# Create, set and get an `IVar`
#
# ```ruby
# ivar = Concurrent::IVar.new
# ivar.set 14
# ivar.value #=> 14
# ivar.set 2 # would now be an error
# ```
#
# ## See Also
#
# 1. For the theory: Arvind, R. Nikhil, and K. Pingali.
# [I-Structures: Data structures for parallel computing](http://dl.acm.org/citation.cfm?id=69562).
# In Proceedings of Workshop on Graph Reduction, 1986.
# 2. For recent application:
# [DataDrivenFuture in Habanero Java from Rice](http://www.cs.rice.edu/~vs3/hjlib/doc/edu/rice/hj/api/HjDataDrivenFuture.html).
class IVar < Synchronization::LockableObject
include Concern::Obligation
include Concern::Observable
# Create a new `IVar` in the `:pending` state with the (optional) initial value.
#
# @param [Object] value the initial value
# @param [Hash] opts the options to create a message with
# @option opts [String] :dup_on_deref (false) call `#dup` before returning
# the data
# @option opts [String] :freeze_on_deref (false) call `#freeze` before
# returning the data
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing
# the internal value and returning the value returned from the proc
def initialize(value = NULL, opts = {}, &block)
if value != NULL && block_given?
raise ArgumentError.new('provide only a value or a block')
end
super(&nil)
synchronize { ns_initialize(value, opts, &block) }
end
# Add an observer on this object that will receive notification on update.
#
# Upon completion the `IVar` will notify all observers in a thread-safe way.
# The `func` method of the observer will be called with three arguments: the
# `Time` at which the `Future` completed the asynchronous operation, the
# final `value` (or `nil` on rejection), and the final `reason` (or `nil` on
# fulfillment).
#
# @param [Object] observer the object that will be notified of changes
# @param [Symbol] func symbol naming the method to call when this
# `Observable` has changes`
def add_observer(observer = nil, func = :update, &block)
raise ArgumentError.new('cannot provide both an observer and a block') if observer && block
direct_notification = false
if block
observer = block
func = :call
end
synchronize do
if event.set?
direct_notification = true
else
observers.add_observer(observer, func)
end
end
observer.send(func, Time.now, self.value, reason) if direct_notification
observer
end
# @!macro ivar_set_method
# Set the `IVar` to a value and wake or notify all threads waiting on it.
#
# @!macro ivar_set_parameters_and_exceptions
# @param [Object] value the value to store in the `IVar`
# @yield A block operation to use for setting the value
# @raise [ArgumentError] if both a value and a block are given
# @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already
# been set or otherwise completed
#
# @return [IVar] self
def set(value = NULL)
check_for_block_or_value!(block_given?, value)
raise MultipleAssignmentError unless compare_and_set_state(:processing, :pending)
begin
value = yield if block_given?
complete_without_notification(true, value, nil)
rescue => ex
complete_without_notification(false, nil, ex)
end
notify_observers(self.value, reason)
self
end
# @!macro ivar_fail_method
# Set the `IVar` to failed due to some error and wake or notify all threads waiting on it.
#
# @param [Object] reason for the failure
# @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already
# been set or otherwise completed
# @return [IVar] self
def fail(reason = StandardError.new)
complete(false, nil, reason)
end
# Attempt to set the `IVar` with the given value or block. Return a
# boolean indicating the success or failure of the set operation.
#
# @!macro ivar_set_parameters_and_exceptions
#
# @return [Boolean] true if the value was set else false
def try_set(value = NULL, &block)
set(value, &block)
true
rescue MultipleAssignmentError
false
end
protected
# @!visibility private
def ns_initialize(value, opts)
value = yield if block_given?
init_obligation
self.observers = Collection::CopyOnWriteObserverSet.new
set_deref_options(opts)
@state = :pending
if value != NULL
ns_complete_without_notification(true, value, nil)
end
end
# @!visibility private
def safe_execute(task, args = [])
if compare_and_set_state(:processing, :pending)
success, val, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args)
complete(success, val, reason)
yield(success, val, reason) if block_given?
end
end
# @!visibility private
def complete(success, value, reason)
complete_without_notification(success, value, reason)
notify_observers(self.value, reason)
self
end
# @!visibility private
def complete_without_notification(success, value, reason)
synchronize { ns_complete_without_notification(success, value, reason) }
self
end
# @!visibility private
def notify_observers(value, reason)
observers.notify_and_delete_observers{ [Time.now, value, reason] }
end
# @!visibility private
def ns_complete_without_notification(success, value, reason)
raise MultipleAssignmentError if [:fulfilled, :rejected].include? @state
set_state(success, value, reason)
event.set
end
# @!visibility private
def check_for_block_or_value!(block_given, value) # :nodoc:
if (block_given && value != NULL) || (! block_given && value == NULL)
raise ArgumentError.new('must set with either a value or a block')
end
end
end
end

View File

@ -0,0 +1,337 @@
require 'thread'
require 'concurrent/constants'
require 'concurrent/synchronization'
require 'concurrent/utility/engine'
module Concurrent
# @!visibility private
module Collection
# @!visibility private
MapImplementation = case
when Concurrent.on_jruby?
# noinspection RubyResolve
JRubyMapBackend
when Concurrent.on_cruby?
require 'concurrent/collection/map/mri_map_backend'
MriMapBackend
when Concurrent.on_rbx? || Concurrent.on_truffleruby?
require 'concurrent/collection/map/atomic_reference_map_backend'
AtomicReferenceMapBackend
else
warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation'
require 'concurrent/collection/map/synchronized_map_backend'
SynchronizedMapBackend
end
end
# `Concurrent::Map` is a hash-like object and should have much better performance
# characteristics, especially under high concurrency, than `Concurrent::Hash`.
# However, `Concurrent::Map `is not strictly semantically equivalent to a ruby `Hash`
# -- for instance, it does not necessarily retain ordering by insertion time as `Hash`
# does. For most uses it should do fine though, and we recommend you consider
# `Concurrent::Map` instead of `Concurrent::Hash` for your concurrency-safe hash needs.
class Map < Collection::MapImplementation
# @!macro map.atomic_method
# This method is atomic.
# @!macro map.atomic_method_with_block
# This method is atomic.
# @note Atomic methods taking a block do not allow the `self` instance
# to be used within the block. Doing so will cause a deadlock.
# @!method compute_if_absent(key)
# Compute and store new value for key if the key is absent.
# @param [Object] key
# @yield new value
# @yieldreturn [Object] new value
# @return [Object] new value or current value
# @!macro map.atomic_method_with_block
# @!method compute_if_present(key)
# Compute and store new value for key if the key is present.
# @param [Object] key
# @yield new value
# @yieldparam old_value [Object]
# @yieldreturn [Object, nil] new value, when nil the key is removed
# @return [Object, nil] new value or nil
# @!macro map.atomic_method_with_block
# @!method compute(key)
# Compute and store new value for key.
# @param [Object] key
# @yield compute new value from old one
# @yieldparam old_value [Object, nil] old_value, or nil when key is absent
# @yieldreturn [Object, nil] new value, when nil the key is removed
# @return [Object, nil] new value or nil
# @!macro map.atomic_method_with_block
# @!method merge_pair(key, value)
# If the key is absent, the value is stored, otherwise new value is
# computed with a block.
# @param [Object] key
# @param [Object] value
# @yield compute new value from old one
# @yieldparam old_value [Object] old value
# @yieldreturn [Object, nil] new value, when nil the key is removed
# @return [Object, nil] new value or nil
# @!macro map.atomic_method_with_block
# @!method replace_pair(key, old_value, new_value)
# Replaces old_value with new_value if key exists and current value
# matches old_value
# @param [Object] key
# @param [Object] old_value
# @param [Object] new_value
# @return [true, false] true if replaced
# @!macro map.atomic_method
# @!method replace_if_exists(key, new_value)
# Replaces current value with new_value if key exists
# @param [Object] key
# @param [Object] new_value
# @return [Object, nil] old value or nil
# @!macro map.atomic_method
# @!method get_and_set(key, value)
# Get the current value under key and set new value.
# @param [Object] key
# @param [Object] value
# @return [Object, nil] old value or nil when the key was absent
# @!macro map.atomic_method
# @!method delete(key)
# Delete key and its value.
# @param [Object] key
# @return [Object, nil] old value or nil when the key was absent
# @!macro map.atomic_method
# @!method delete_pair(key, value)
# Delete pair and its value if current value equals the provided value.
# @param [Object] key
# @param [Object] value
# @return [true, false] true if deleted
# @!macro map.atomic_method
def initialize(options = nil, &block)
if options.kind_of?(::Hash)
validate_options_hash!(options)
else
options = nil
end
super(options)
@default_proc = block
end
# Get a value with key
# @param [Object] key
# @return [Object] the value
def [](key)
if value = super # non-falsy value is an existing mapping, return it right away
value
# re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call
# a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value
# would be returned)
# note: nil == value check is not technically necessary
elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL))
@default_proc.call(self, key)
else
value
end
end
alias_method :get, :[]
# TODO (pitr-ch 30-Oct-2018): doc
alias_method :put, :[]=
# Get a value with key, or default_value when key is absent,
# or fail when no default_value is given.
# @param [Object] key
# @param [Object] default_value
# @yield default value for a key
# @yieldparam key [Object]
# @yieldreturn [Object] default value
# @return [Object] the value or default value
# @raise [KeyError] when key is missing and no default_value is provided
# @!macro map_method_not_atomic
# @note The "fetch-then-act" methods of `Map` are not atomic. `Map` is intended
# to be use as a concurrency primitive with strong happens-before
# guarantees. It is not intended to be used as a high-level abstraction
# supporting complex operations. All read and write operations are
# thread safe, but no guarantees are made regarding race conditions
# between the fetch operation and yielding to the block. Additionally,
# this method does not support recursion. This is due to internal
# constraints that are very unlikely to change in the near future.
def fetch(key, default_value = NULL)
if NULL != (value = get_or_default(key, NULL))
value
elsif block_given?
yield key
elsif NULL != default_value
default_value
else
raise_fetch_no_key
end
end
# Fetch value with key, or store default value when key is absent,
# or fail when no default_value is given. This is a two step operation,
# therefore not atomic. The store can overwrite other concurrently
# stored value.
# @param [Object] key
# @param [Object] default_value
# @yield default value for a key
# @yieldparam key [Object]
# @yieldreturn [Object] default value
# @return [Object] the value or default value
# @!macro map.atomic_method_with_block
def fetch_or_store(key, default_value = NULL)
fetch(key) do
put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value))
end
end
# Insert value into map with key if key is absent in one atomic step.
# @param [Object] key
# @param [Object] value
# @return [Object, nil] the value or nil when key was present
def put_if_absent(key, value)
computed = false
result = compute_if_absent(key) do
computed = true
value
end
computed ? nil : result
end unless method_defined?(:put_if_absent)
# Is the value stored in the map. Iterates over all values.
# @param [Object] value
# @return [true, false]
def value?(value)
each_value do |v|
return true if value.equal?(v)
end
false
end
# All keys
# @return [::Array<Object>] keys
def keys
arr = []
each_pair { |k, v| arr << k }
arr
end unless method_defined?(:keys)
# All values
# @return [::Array<Object>] values
def values
arr = []
each_pair { |k, v| arr << v }
arr
end unless method_defined?(:values)
# Iterates over each key.
# @yield for each key in the map
# @yieldparam key [Object]
# @return [self]
# @!macro map.atomic_method_with_block
def each_key
each_pair { |k, v| yield k }
end unless method_defined?(:each_key)
# Iterates over each value.
# @yield for each value in the map
# @yieldparam value [Object]
# @return [self]
# @!macro map.atomic_method_with_block
def each_value
each_pair { |k, v| yield v }
end unless method_defined?(:each_value)
# Iterates over each key value pair.
# @yield for each key value pair in the map
# @yieldparam key [Object]
# @yieldparam value [Object]
# @return [self]
# @!macro map.atomic_method_with_block
def each_pair
return enum_for :each_pair unless block_given?
super
end
alias_method :each, :each_pair unless method_defined?(:each)
# Find key of a value.
# @param [Object] value
# @return [Object, nil] key or nil when not found
def key(value)
each_pair { |k, v| return k if v == value }
nil
end unless method_defined?(:key)
alias_method :index, :key if RUBY_VERSION < '1.9'
# Is map empty?
# @return [true, false]
def empty?
each_pair { |k, v| return false }
true
end unless method_defined?(:empty?)
# The size of map.
# @return [Integer] size
def size
count = 0
each_pair { |k, v| count += 1 }
count
end unless method_defined?(:size)
# @!visibility private
def marshal_dump
raise TypeError, "can't dump hash with default proc" if @default_proc
h = {}
each_pair { |k, v| h[k] = v }
h
end
# @!visibility private
def marshal_load(hash)
initialize
populate_from(hash)
end
undef :freeze
# @!visibility private
def inspect
format '%s entries=%d default_proc=%s>', to_s[0..-2], size.to_s, @default_proc.inspect
end
private
def raise_fetch_no_key
raise KeyError, 'key not found'
end
def initialize_copy(other)
super
populate_from(other)
end
def populate_from(hash)
hash.each_pair { |k, v| self[k] = v }
self
end
def validate_options_hash!(options)
if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0)
raise ArgumentError, ":initial_capacity must be a positive Integer"
end
if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1)
raise ArgumentError, ":load_factor must be a number between 0 and 1"
end
end
end
end

View File

@ -0,0 +1,229 @@
require 'concurrent/synchronization'
module Concurrent
# A `Maybe` encapsulates an optional value. A `Maybe` either contains a value
# of (represented as `Just`), or it is empty (represented as `Nothing`). Using
# `Maybe` is a good way to deal with errors or exceptional cases without
# resorting to drastic measures such as exceptions.
#
# `Maybe` is a replacement for the use of `nil` with better type checking.
#
# For compatibility with {Concurrent::Concern::Obligation} the predicate and
# accessor methods are aliased as `fulfilled?`, `rejected?`, `value`, and
# `reason`.
#
# ## Motivation
#
# A common pattern in languages with pattern matching, such as Erlang and
# Haskell, is to return *either* a value *or* an error from a function
# Consider this Erlang code:
#
# ```erlang
# case file:consult("data.dat") of
# {ok, Terms} -> do_something_useful(Terms);
# {error, Reason} -> lager:error(Reason)
# end.
# ```
#
# In this example the standard library function `file:consult` returns a
# [tuple](http://erlang.org/doc/reference_manual/data_types.html#id69044)
# with two elements: an [atom](http://erlang.org/doc/reference_manual/data_types.html#id64134)
# (similar to a ruby symbol) and a variable containing ancillary data. On
# success it returns the atom `ok` and the data from the file. On failure it
# returns `error` and a string with an explanation of the problem. With this
# pattern there is no ambiguity regarding success or failure. If the file is
# empty the return value cannot be misinterpreted as an error. And when an
# error occurs the return value provides useful information.
#
# In Ruby we tend to return `nil` when an error occurs or else we raise an
# exception. Both of these idioms are problematic. Returning `nil` is
# ambiguous because `nil` may also be a valid value. It also lacks
# information pertaining to the nature of the error. Raising an exception
# is both expensive and usurps the normal flow of control. All of these
# problems can be solved with the use of a `Maybe`.
#
# A `Maybe` is unambiguous with regard to whether or not it contains a value.
# When `Just` it contains a value, when `Nothing` it does not. When `Just`
# the value it contains may be `nil`, which is perfectly valid. When
# `Nothing` the reason for the lack of a value is contained as well. The
# previous Erlang example can be duplicated in Ruby in a principled way by
# having functions return `Maybe` objects:
#
# ```ruby
# result = MyFileUtils.consult("data.dat") # returns a Maybe
# if result.just?
# do_something_useful(result.value) # or result.just
# else
# logger.error(result.reason) # or result.nothing
# end
# ```
#
# @example Returning a Maybe from a Function
# module MyFileUtils
# def self.consult(path)
# file = File.open(path, 'r')
# Concurrent::Maybe.just(file.read)
# rescue => ex
# return Concurrent::Maybe.nothing(ex)
# ensure
# file.close if file
# end
# end
#
# maybe = MyFileUtils.consult('bogus.file')
# maybe.just? #=> false
# maybe.nothing? #=> true
# maybe.reason #=> #<Errno::ENOENT: No such file or directory @ rb_sysopen - bogus.file>
#
# maybe = MyFileUtils.consult('README.md')
# maybe.just? #=> true
# maybe.nothing? #=> false
# maybe.value #=> "# Concurrent Ruby\n[![Gem Version..."
#
# @example Using Maybe with a Block
# result = Concurrent::Maybe.from do
# Client.find(10) # Client is an ActiveRecord model
# end
#
# # -- if the record was found
# result.just? #=> true
# result.value #=> #<Client id: 10, first_name: "Ryan">
#
# # -- if the record was not found
# result.just? #=> false
# result.reason #=> ActiveRecord::RecordNotFound
#
# @example Using Maybe with the Null Object Pattern
# # In a Rails controller...
# result = ClientService.new(10).find # returns a Maybe
# render json: result.or(NullClient.new)
#
# @see https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html Haskell Data.Maybe
# @see https://github.com/purescript/purescript-maybe/blob/master/docs/Data.Maybe.md PureScript Data.Maybe
class Maybe < Synchronization::Object
include Comparable
safe_initialization!
# Indicates that the given attribute has not been set.
# When `Just` the {#nothing} getter will return `NONE`.
# When `Nothing` the {#just} getter will return `NONE`.
NONE = ::Object.new.freeze
# The value of a `Maybe` when `Just`. Will be `NONE` when `Nothing`.
attr_reader :just
# The reason for the `Maybe` when `Nothing`. Will be `NONE` when `Just`.
attr_reader :nothing
private_class_method :new
# Create a new `Maybe` using the given block.
#
# Runs the given block passing all function arguments to the block as block
# arguments. If the block runs to completion without raising an exception
# a new `Just` is created with the value set to the return value of the
# block. If the block raises an exception a new `Nothing` is created with
# the reason being set to the raised exception.
#
# @param [Array<Object>] args Zero or more arguments to pass to the block.
# @yield The block from which to create a new `Maybe`.
# @yieldparam [Array<Object>] args Zero or more block arguments passed as
# arguments to the function.
#
# @return [Maybe] The newly created object.
#
# @raise [ArgumentError] when no block given.
def self.from(*args)
raise ArgumentError.new('no block given') unless block_given?
begin
value = yield(*args)
return new(value, NONE)
rescue => ex
return new(NONE, ex)
end
end
# Create a new `Just` with the given value.
#
# @param [Object] value The value to set for the new `Maybe` object.
#
# @return [Maybe] The newly created object.
def self.just(value)
return new(value, NONE)
end
# Create a new `Nothing` with the given (optional) reason.
#
# @param [Exception] error The reason to set for the new `Maybe` object.
# When given a string a new `StandardError` will be created with the
# argument as the message. When no argument is given a new
# `StandardError` with an empty message will be created.
#
# @return [Maybe] The newly created object.
def self.nothing(error = '')
if error.is_a?(Exception)
nothing = error
else
nothing = StandardError.new(error.to_s)
end
return new(NONE, nothing)
end
# Is this `Maybe` a `Just` (successfully fulfilled with a value)?
#
# @return [Boolean] True if `Just` or false if `Nothing`.
def just?
! nothing?
end
alias :fulfilled? :just?
# Is this `Maybe` a `nothing` (rejected with an exception upon fulfillment)?
#
# @return [Boolean] True if `Nothing` or false if `Just`.
def nothing?
@nothing != NONE
end
alias :rejected? :nothing?
alias :value :just
alias :reason :nothing
# Comparison operator.
#
# @return [Integer] 0 if self and other are both `Nothing`;
# -1 if self is `Nothing` and other is `Just`;
# 1 if self is `Just` and other is nothing;
# `self.just <=> other.just` if both self and other are `Just`.
def <=>(other)
if nothing?
other.nothing? ? 0 : -1
else
other.nothing? ? 1 : just <=> other.just
end
end
# Return either the value of self or the given default value.
#
# @return [Object] The value of self when `Just`; else the given default.
def or(other)
just? ? just : other
end
private
# Create a new `Maybe` with the given attributes.
#
# @param [Object] just The value when `Just` else `NONE`.
# @param [Exception, Object] nothing The exception when `Nothing` else `NONE`.
#
# @return [Maybe] The new `Maybe`.
#
# @!visibility private
def initialize(just, nothing)
@just = just
@nothing = nothing
end
end
end

View File

@ -0,0 +1,229 @@
require 'concurrent/synchronization/abstract_struct'
require 'concurrent/synchronization'
module Concurrent
# An thread-safe variation of Ruby's standard `Struct`. Values can be set at
# construction or safely changed at any time during the object's lifecycle.
#
# @see http://ruby-doc.org/core-2.2.0/Struct.html Ruby standard library `Struct`
module MutableStruct
include Synchronization::AbstractStruct
# @!macro struct_new
#
# Factory for creating new struct classes.
#
# ```
# new([class_name] [, member_name]+>) -> StructClass click to toggle source
# new([class_name] [, member_name]+>) {|StructClass| block } -> StructClass
# new(value, ...) -> obj
# StructClass[value, ...] -> obj
# ```
#
# The first two forms are used to create a new struct subclass `class_name`
# that can contain a value for each member_name . This subclass can be
# used to create instances of the structure like any other Class .
#
# If the `class_name` is omitted an anonymous struct class will be created.
# Otherwise, the name of this struct will appear as a constant in the struct class,
# so it must be unique for all structs under this base class and must start with a
# capital letter. Assigning a struct class to a constant also gives the class
# the name of the constant.
#
# If a block is given it will be evaluated in the context of `StructClass`, passing
# the created class as a parameter. This is the recommended way to customize a struct.
# Subclassing an anonymous struct creates an extra anonymous class that will never be used.
#
# The last two forms create a new instance of a struct subclass. The number of value
# parameters must be less than or equal to the number of attributes defined for the
# struct. Unset parameters default to nil. Passing more parameters than number of attributes
# will raise an `ArgumentError`.
#
# @see http://ruby-doc.org/core-2.2.0/Struct.html#method-c-new Ruby standard library `Struct#new`
# @!macro struct_values
#
# Returns the values for this struct as an Array.
#
# @return [Array] the values for this struct
#
def values
synchronize { ns_values }
end
alias_method :to_a, :values
# @!macro struct_values_at
#
# Returns the struct member values for each selector as an Array.
#
# A selector may be either an Integer offset or a Range of offsets (as in `Array#values_at`).
#
# @param [Fixnum, Range] indexes the index(es) from which to obatin the values (in order)
def values_at(*indexes)
synchronize { ns_values_at(indexes) }
end
# @!macro struct_inspect
#
# Describe the contents of this struct in a string.
#
# @return [String] the contents of this struct in a string
def inspect
synchronize { ns_inspect }
end
alias_method :to_s, :inspect
# @!macro struct_merge
#
# Returns a new struct containing the contents of `other` and the contents
# of `self`. If no block is specified, the value for entries with duplicate
# keys will be that of `other`. Otherwise the value for each duplicate key
# is determined by calling the block with the key, its value in `self` and
# its value in `other`.
#
# @param [Hash] other the hash from which to set the new values
# @yield an options block for resolving duplicate keys
# @yieldparam [String, Symbol] member the name of the member which is duplicated
# @yieldparam [Object] selfvalue the value of the member in `self`
# @yieldparam [Object] othervalue the value of the member in `other`
#
# @return [Synchronization::AbstractStruct] a new struct with the new values
#
# @raise [ArgumentError] of given a member that is not defined in the struct
def merge(other, &block)
synchronize { ns_merge(other, &block) }
end
# @!macro struct_to_h
#
# Returns a hash containing the names and values for the structs members.
#
# @return [Hash] the names and values for the structs members
def to_h
synchronize { ns_to_h }
end
# @!macro struct_get
#
# Attribute Reference
#
# @param [Symbol, String, Integer] member the string or symbol name of the member
# for which to obtain the value or the member's index
#
# @return [Object] the value of the given struct member or the member at the given index.
#
# @raise [NameError] if the member does not exist
# @raise [IndexError] if the index is out of range.
def [](member)
synchronize { ns_get(member) }
end
# @!macro struct_equality
#
# Equality
#
# @return [Boolean] true if other has the same struct subclass and has
# equal member values (according to `Object#==`)
def ==(other)
synchronize { ns_equality(other) }
end
# @!macro struct_each
#
# Yields the value of each struct member in order. If no block is given
# an enumerator is returned.
#
# @yield the operation to be performed on each struct member
# @yieldparam [Object] value each struct value (in order)
def each(&block)
return enum_for(:each) unless block_given?
synchronize { ns_each(&block) }
end
# @!macro struct_each_pair
#
# Yields the name and value of each struct member in order. If no block is
# given an enumerator is returned.
#
# @yield the operation to be performed on each struct member/value pair
# @yieldparam [Object] member each struct member (in order)
# @yieldparam [Object] value each struct value (in order)
def each_pair(&block)
return enum_for(:each_pair) unless block_given?
synchronize { ns_each_pair(&block) }
end
# @!macro struct_select
#
# Yields each member value from the struct to the block and returns an Array
# containing the member values from the struct for which the given block
# returns a true value (equivalent to `Enumerable#select`).
#
# @yield the operation to be performed on each struct member
# @yieldparam [Object] value each struct value (in order)
#
# @return [Array] an array containing each value for which the block returns true
def select(&block)
return enum_for(:select) unless block_given?
synchronize { ns_select(&block) }
end
# @!macro struct_set
#
# Attribute Assignment
#
# Sets the value of the given struct member or the member at the given index.
#
# @param [Symbol, String, Integer] member the string or symbol name of the member
# for which to obtain the value or the member's index
#
# @return [Object] the value of the given struct member or the member at the given index.
#
# @raise [NameError] if the name does not exist
# @raise [IndexError] if the index is out of range.
def []=(member, value)
if member.is_a? Integer
length = synchronize { @values.length }
if member >= length
raise IndexError.new("offset #{member} too large for struct(size:#{length})")
end
synchronize { @values[member] = value }
else
send("#{member}=", value)
end
rescue NoMethodError
raise NameError.new("no member '#{member}' in struct")
end
# @!macro struct_new
def self.new(*args, &block)
clazz_name = nil
if args.length == 0
raise ArgumentError.new('wrong number of arguments (0 for 1+)')
elsif args.length > 0 && args.first.is_a?(String)
clazz_name = args.shift
end
FACTORY.define_struct(clazz_name, args, &block)
end
FACTORY = Class.new(Synchronization::LockableObject) do
def define_struct(name, members, &block)
synchronize do
clazz = Synchronization::AbstractStruct.define_struct_class(MutableStruct, Synchronization::LockableObject, name, members, &block)
members.each_with_index do |member, index|
clazz.send :remove_method, member
clazz.send(:define_method, member) do
synchronize { @values[index] }
end
clazz.send(:define_method, "#{member}=") do |value|
synchronize { @values[index] = value }
end
end
clazz
end
end
end.new
private_constant :FACTORY
end
end

View File

@ -0,0 +1,242 @@
require 'concurrent/concern/dereferenceable'
require 'concurrent/synchronization'
module Concurrent
# An `MVar` is a synchronized single element container. They are empty or
# contain one item. Taking a value from an empty `MVar` blocks, as does
# putting a value into a full one. You can either think of them as blocking
# queue of length one, or a special kind of mutable variable.
#
# On top of the fundamental `#put` and `#take` operations, we also provide a
# `#mutate` that is atomic with respect to operations on the same instance.
# These operations all support timeouts.
#
# We also support non-blocking operations `#try_put!` and `#try_take!`, a
# `#set!` that ignores existing values, a `#value` that returns the value
# without removing it or returns `MVar::EMPTY`, and a `#modify!` that yields
# `MVar::EMPTY` if the `MVar` is empty and can be used to set `MVar::EMPTY`.
# You shouldn't use these operations in the first instance.
#
# `MVar` is a [Dereferenceable](Dereferenceable).
#
# `MVar` is related to M-structures in Id, `MVar` in Haskell and `SyncVar` in Scala.
#
# Note that unlike the original Haskell paper, our `#take` is blocking. This is how
# Haskell and Scala do it today.
#
# @!macro copy_options
#
# ## See Also
#
# 1. P. Barth, R. Nikhil, and Arvind. [M-Structures: Extending a parallel, non- strict, functional language with state](http://dl.acm.org/citation.cfm?id=652538). In Proceedings of the 5th
# ACM Conference on Functional Programming Languages and Computer Architecture (FPCA), 1991.
#
# 2. S. Peyton Jones, A. Gordon, and S. Finne. [Concurrent Haskell](http://dl.acm.org/citation.cfm?id=237794).
# In Proceedings of the 23rd Symposium on Principles of Programming Languages
# (PoPL), 1996.
class MVar < Synchronization::Object
include Concern::Dereferenceable
safe_initialization!
# Unique value that represents that an `MVar` was empty
EMPTY = ::Object.new
# Unique value that represents that an `MVar` timed out before it was able
# to produce a value.
TIMEOUT = ::Object.new
# Create a new `MVar`, either empty or with an initial value.
#
# @param [Hash] opts the options controlling how the future will be processed
#
# @!macro deref_options
def initialize(value = EMPTY, opts = {})
@value = value
@mutex = Mutex.new
@empty_condition = ConditionVariable.new
@full_condition = ConditionVariable.new
set_deref_options(opts)
end
# Remove the value from an `MVar`, leaving it empty, and blocking if there
# isn't a value. A timeout can be set to limit the time spent blocked, in
# which case it returns `TIMEOUT` if the time is exceeded.
# @return [Object] the value that was taken, or `TIMEOUT`
def take(timeout = nil)
@mutex.synchronize do
wait_for_full(timeout)
# If we timed out we'll still be empty
if unlocked_full?
value = @value
@value = EMPTY
@empty_condition.signal
apply_deref_options(value)
else
TIMEOUT
end
end
end
# acquires lock on the from an `MVAR`, yields the value to provided block,
# and release lock. A timeout can be set to limit the time spent blocked,
# in which case it returns `TIMEOUT` if the time is exceeded.
# @return [Object] the value returned by the block, or `TIMEOUT`
def borrow(timeout = nil)
@mutex.synchronize do
wait_for_full(timeout)
# if we timeoud out we'll still be empty
if unlocked_full?
yield @value
else
TIMEOUT
end
end
end
# Put a value into an `MVar`, blocking if there is already a value until
# it is empty. A timeout can be set to limit the time spent blocked, in
# which case it returns `TIMEOUT` if the time is exceeded.
# @return [Object] the value that was put, or `TIMEOUT`
def put(value, timeout = nil)
@mutex.synchronize do
wait_for_empty(timeout)
# If we timed out we won't be empty
if unlocked_empty?
@value = value
@full_condition.signal
apply_deref_options(value)
else
TIMEOUT
end
end
end
# Atomically `take`, yield the value to a block for transformation, and then
# `put` the transformed value. Returns the transformed value. A timeout can
# be set to limit the time spent blocked, in which case it returns `TIMEOUT`
# if the time is exceeded.
# @return [Object] the transformed value, or `TIMEOUT`
def modify(timeout = nil)
raise ArgumentError.new('no block given') unless block_given?
@mutex.synchronize do
wait_for_full(timeout)
# If we timed out we'll still be empty
if unlocked_full?
value = @value
@value = yield value
@full_condition.signal
apply_deref_options(value)
else
TIMEOUT
end
end
end
# Non-blocking version of `take`, that returns `EMPTY` instead of blocking.
def try_take!
@mutex.synchronize do
if unlocked_full?
value = @value
@value = EMPTY
@empty_condition.signal
apply_deref_options(value)
else
EMPTY
end
end
end
# Non-blocking version of `put`, that returns whether or not it was successful.
def try_put!(value)
@mutex.synchronize do
if unlocked_empty?
@value = value
@full_condition.signal
true
else
false
end
end
end
# Non-blocking version of `put` that will overwrite an existing value.
def set!(value)
@mutex.synchronize do
old_value = @value
@value = value
@full_condition.signal
apply_deref_options(old_value)
end
end
# Non-blocking version of `modify` that will yield with `EMPTY` if there is no value yet.
def modify!
raise ArgumentError.new('no block given') unless block_given?
@mutex.synchronize do
value = @value
@value = yield value
if unlocked_empty?
@empty_condition.signal
else
@full_condition.signal
end
apply_deref_options(value)
end
end
# Returns if the `MVar` is currently empty.
def empty?
@mutex.synchronize { @value == EMPTY }
end
# Returns if the `MVar` currently contains a value.
def full?
!empty?
end
protected
def synchronize(&block)
@mutex.synchronize(&block)
end
private
def unlocked_empty?
@value == EMPTY
end
def unlocked_full?
! unlocked_empty?
end
def wait_for_full(timeout)
wait_while(@full_condition, timeout) { unlocked_empty? }
end
def wait_for_empty(timeout)
wait_while(@empty_condition, timeout) { unlocked_full? }
end
def wait_while(condition, timeout)
if timeout.nil?
while yield
condition.wait(@mutex)
end
else
stop = Concurrent.monotonic_time + timeout
while yield && timeout > 0.0
condition.wait(@mutex, timeout)
timeout = stop - Concurrent.monotonic_time
end
end
end
end
end

View File

@ -0,0 +1,42 @@
require 'concurrent/configuration'
module Concurrent
# @!visibility private
module Options
# Get the requested `Executor` based on the values set in the options hash.
#
# @param [Hash] opts the options defining the requested executor
# @option opts [Executor] :executor when set use the given `Executor` instance.
# Three special values are also supported: `:fast` returns the global fast executor,
# `:io` returns the global io executor, and `:immediate` returns a new
# `ImmediateExecutor` object.
#
# @return [Executor, nil] the requested thread pool, or nil when no option specified
#
# @!visibility private
def self.executor_from_options(opts = {}) # :nodoc:
if identifier = opts.fetch(:executor, nil)
executor(identifier)
else
nil
end
end
def self.executor(executor_identifier)
case executor_identifier
when :fast
Concurrent.global_fast_executor
when :io
Concurrent.global_io_executor
when :immediate
Concurrent.global_immediate_executor
when Concurrent::ExecutorService
executor_identifier
else
raise ArgumentError, "executor not recognized by '#{executor_identifier}'"
end
end
end
end

View File

@ -0,0 +1,579 @@
require 'thread'
require 'concurrent/constants'
require 'concurrent/errors'
require 'concurrent/ivar'
require 'concurrent/executor/safe_task_executor'
require 'concurrent/options'
module Concurrent
PromiseExecutionError = Class.new(StandardError)
# Promises are inspired by the JavaScript [Promises/A](http://wiki.commonjs.org/wiki/Promises/A)
# and [Promises/A+](http://promises-aplus.github.io/promises-spec/) specifications.
#
# > A promise represents the eventual value returned from the single
# > completion of an operation.
#
# Promises are similar to futures and share many of the same behaviours.
# Promises are far more robust, however. Promises can be chained in a tree
# structure where each promise may have zero or more children. Promises are
# chained using the `then` method. The result of a call to `then` is always
# another promise. Promises are resolved asynchronously (with respect to the
# main thread) but in a strict order: parents are guaranteed to be resolved
# before their children, children before their younger siblings. The `then`
# method takes two parameters: an optional block to be executed upon parent
# resolution and an optional callable to be executed upon parent failure. The
# result of each promise is passed to each of its children upon resolution.
# When a promise is rejected all its children will be summarily rejected and
# will receive the reason.
#
# Promises have several possible states: *:unscheduled*, *:pending*,
# *:processing*, *:rejected*, or *:fulfilled*. These are also aggregated as
# `#incomplete?` and `#complete?`. When a Promise is created it is set to
# *:unscheduled*. Once the `#execute` method is called the state becomes
# *:pending*. Once a job is pulled from the thread pool's queue and is given
# to a thread for processing (often immediately upon `#post`) the state
# becomes *:processing*. The future will remain in this state until processing
# is complete. A future that is in the *:unscheduled*, *:pending*, or
# *:processing* is considered `#incomplete?`. A `#complete?` Promise is either
# *:rejected*, indicating that an exception was thrown during processing, or
# *:fulfilled*, indicating success. If a Promise is *:fulfilled* its `#value`
# will be updated to reflect the result of the operation. If *:rejected* the
# `reason` will be updated with a reference to the thrown exception. The
# predicate methods `#unscheduled?`, `#pending?`, `#rejected?`, and
# `#fulfilled?` can be called at any time to obtain the state of the Promise,
# as can the `#state` method, which returns a symbol.
#
# Retrieving the value of a promise is done through the `value` (alias:
# `deref`) method. Obtaining the value of a promise is a potentially blocking
# operation. When a promise is *rejected* a call to `value` will return `nil`
# immediately. When a promise is *fulfilled* a call to `value` will
# immediately return the current value. When a promise is *pending* a call to
# `value` will block until the promise is either *rejected* or *fulfilled*. A
# *timeout* value can be passed to `value` to limit how long the call will
# block. If `nil` the call will block indefinitely. If `0` the call will not
# block. Any other integer or float value will indicate the maximum number of
# seconds to block.
#
# Promises run on the global thread pool.
#
# @!macro copy_options
#
# ### Examples
#
# Start by requiring promises
#
# ```ruby
# require 'concurrent'
# ```
#
# Then create one
#
# ```ruby
# p = Concurrent::Promise.execute do
# # do something
# 42
# end
# ```
#
# Promises can be chained using the `then` method. The `then` method accepts a
# block and an executor, to be executed on fulfillment, and a callable argument to be executed
# on rejection. The result of the each promise is passed as the block argument
# to chained promises.
#
# ```ruby
# p = Concurrent::Promise.new{10}.then{|x| x * 2}.then{|result| result - 10 }.execute
# ```
#
# And so on, and so on, and so on...
#
# ```ruby
# p = Concurrent::Promise.fulfill(20).
# then{|result| result - 10 }.
# then{|result| result * 3 }.
# then(executor: different_executor){|result| result % 5 }.execute
# ```
#
# The initial state of a newly created Promise depends on the state of its parent:
# - if parent is *unscheduled* the child will be *unscheduled*
# - if parent is *pending* the child will be *pending*
# - if parent is *fulfilled* the child will be *pending*
# - if parent is *rejected* the child will be *pending* (but will ultimately be *rejected*)
#
# Promises are executed asynchronously from the main thread. By the time a
# child Promise finishes intialization it may be in a different state than its
# parent (by the time a child is created its parent may have completed
# execution and changed state). Despite being asynchronous, however, the order
# of execution of Promise objects in a chain (or tree) is strictly defined.
#
# There are multiple ways to create and execute a new `Promise`. Both ways
# provide identical behavior:
#
# ```ruby
# # create, operate, then execute
# p1 = Concurrent::Promise.new{ "Hello World!" }
# p1.state #=> :unscheduled
# p1.execute
#
# # create and immediately execute
# p2 = Concurrent::Promise.new{ "Hello World!" }.execute
#
# # execute during creation
# p3 = Concurrent::Promise.execute{ "Hello World!" }
# ```
#
# Once the `execute` method is called a `Promise` becomes `pending`:
#
# ```ruby
# p = Concurrent::Promise.execute{ "Hello, world!" }
# p.state #=> :pending
# p.pending? #=> true
# ```
#
# Wait a little bit, and the promise will resolve and provide a value:
#
# ```ruby
# p = Concurrent::Promise.execute{ "Hello, world!" }
# sleep(0.1)
#
# p.state #=> :fulfilled
# p.fulfilled? #=> true
# p.value #=> "Hello, world!"
# ```
#
# If an exception occurs, the promise will be rejected and will provide
# a reason for the rejection:
#
# ```ruby
# p = Concurrent::Promise.execute{ raise StandardError.new("Here comes the Boom!") }
# sleep(0.1)
#
# p.state #=> :rejected
# p.rejected? #=> true
# p.reason #=> "#<StandardError: Here comes the Boom!>"
# ```
#
# #### Rejection
#
# When a promise is rejected all its children will be rejected and will
# receive the rejection `reason` as the rejection callable parameter:
#
# ```ruby
# p = Concurrent::Promise.execute { Thread.pass; raise StandardError }
#
# c1 = p.then(-> reason { 42 })
# c2 = p.then(-> reason { raise 'Boom!' })
#
# c1.wait.state #=> :fulfilled
# c1.value #=> 45
# c2.wait.state #=> :rejected
# c2.reason #=> #<RuntimeError: Boom!>
# ```
#
# Once a promise is rejected it will continue to accept children that will
# receive immediately rejection (they will be executed asynchronously).
#
# #### Aliases
#
# The `then` method is the most generic alias: it accepts a block to be
# executed upon parent fulfillment and a callable to be executed upon parent
# rejection. At least one of them should be passed. The default block is `{
# |result| result }` that fulfills the child with the parent value. The
# default callable is `{ |reason| raise reason }` that rejects the child with
# the parent reason.
#
# - `on_success { |result| ... }` is the same as `then {|result| ... }`
# - `rescue { |reason| ... }` is the same as `then(Proc.new { |reason| ... } )`
# - `rescue` is aliased by `catch` and `on_error`
class Promise < IVar
# Initialize a new Promise with the provided options.
#
# @!macro executor_and_deref_options
#
# @!macro promise_init_options
#
# @option opts [Promise] :parent the parent `Promise` when building a chain/tree
# @option opts [Proc] :on_fulfill fulfillment handler
# @option opts [Proc] :on_reject rejection handler
# @option opts [object, Array] :args zero or more arguments to be passed
# the task block on execution
#
# @yield The block operation to be performed asynchronously.
#
# @raise [ArgumentError] if no block is given
#
# @see http://wiki.commonjs.org/wiki/Promises/A
# @see http://promises-aplus.github.io/promises-spec/
def initialize(opts = {}, &block)
opts.delete_if { |k, v| v.nil? }
super(NULL, opts.merge(__promise_body_from_block__: block), &nil)
end
# Create a new `Promise` and fulfill it immediately.
#
# @!macro executor_and_deref_options
#
# @!macro promise_init_options
#
# @raise [ArgumentError] if no block is given
#
# @return [Promise] the newly created `Promise`
def self.fulfill(value, opts = {})
Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, true, value, nil) }
end
# Create a new `Promise` and reject it immediately.
#
# @!macro executor_and_deref_options
#
# @!macro promise_init_options
#
# @raise [ArgumentError] if no block is given
#
# @return [Promise] the newly created `Promise`
def self.reject(reason, opts = {})
Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, false, nil, reason) }
end
# Execute an `:unscheduled` `Promise`. Immediately sets the state to `:pending` and
# passes the block to a new thread/thread pool for eventual execution.
# Does nothing if the `Promise` is in any state other than `:unscheduled`.
#
# @return [Promise] a reference to `self`
def execute
if root?
if compare_and_set_state(:pending, :unscheduled)
set_pending
realize(@promise_body)
end
else
@parent.execute
end
self
end
# @!macro ivar_set_method
#
# @raise [Concurrent::PromiseExecutionError] if not the root promise
def set(value = NULL, &block)
raise PromiseExecutionError.new('supported only on root promise') unless root?
check_for_block_or_value!(block_given?, value)
synchronize do
if @state != :unscheduled
raise MultipleAssignmentError
else
@promise_body = block || Proc.new { |result| value }
end
end
execute
end
# @!macro ivar_fail_method
#
# @raise [Concurrent::PromiseExecutionError] if not the root promise
def fail(reason = StandardError.new)
set { raise reason }
end
# Create a new `Promise` object with the given block, execute it, and return the
# `:pending` object.
#
# @!macro executor_and_deref_options
#
# @!macro promise_init_options
#
# @return [Promise] the newly created `Promise` in the `:pending` state
#
# @raise [ArgumentError] if no block is given
#
# @example
# promise = Concurrent::Promise.execute{ sleep(1); 42 }
# promise.state #=> :pending
def self.execute(opts = {}, &block)
new(opts, &block).execute
end
# Chain a new promise off the current promise.
#
# @return [Promise] the new promise
# @yield The block operation to be performed asynchronously.
# @overload then(rescuer, executor, &block)
# @param [Proc] rescuer An optional rescue block to be executed if the
# promise is rejected.
# @param [ThreadPool] executor An optional thread pool executor to be used
# in the new Promise
# @overload then(rescuer, executor: executor, &block)
# @param [Proc] rescuer An optional rescue block to be executed if the
# promise is rejected.
# @param [ThreadPool] executor An optional thread pool executor to be used
# in the new Promise
def then(*args, &block)
if args.last.is_a?(::Hash)
executor = args.pop[:executor]
rescuer = args.first
else
rescuer, executor = args
end
executor ||= @executor
raise ArgumentError.new('rescuers and block are both missing') if rescuer.nil? && !block_given?
block = Proc.new { |result| result } unless block_given?
child = Promise.new(
parent: self,
executor: executor,
on_fulfill: block,
on_reject: rescuer
)
synchronize do
child.state = :pending if @state == :pending
child.on_fulfill(apply_deref_options(@value)) if @state == :fulfilled
child.on_reject(@reason) if @state == :rejected
@children << child
end
child
end
# Chain onto this promise an action to be undertaken on success
# (fulfillment).
#
# @yield The block to execute
#
# @return [Promise] self
def on_success(&block)
raise ArgumentError.new('no block given') unless block_given?
self.then(&block)
end
# Chain onto this promise an action to be undertaken on failure
# (rejection).
#
# @yield The block to execute
#
# @return [Promise] self
def rescue(&block)
self.then(block)
end
alias_method :catch, :rescue
alias_method :on_error, :rescue
# Yield the successful result to the block that returns a promise. If that
# promise is also successful the result is the result of the yielded promise.
# If either part fails the whole also fails.
#
# @example
# Promise.execute { 1 }.flat_map { |v| Promise.execute { v + 2 } }.value! #=> 3
#
# @return [Promise]
def flat_map(&block)
child = Promise.new(
parent: self,
executor: ImmediateExecutor.new,
)
on_error { |e| child.on_reject(e) }
on_success do |result1|
begin
inner = block.call(result1)
inner.execute
inner.on_success { |result2| child.on_fulfill(result2) }
inner.on_error { |e| child.on_reject(e) }
rescue => e
child.on_reject(e)
end
end
child
end
# Builds a promise that produces the result of promises in an Array
# and fails if any of them fails.
#
# @overload zip(*promises)
# @param [Array<Promise>] promises
#
# @overload zip(*promises, opts)
# @param [Array<Promise>] promises
# @param [Hash] opts the configuration options
# @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance.
# @option opts [Boolean] :execute (true) execute promise before returning
#
# @return [Promise<Array>]
def self.zip(*promises)
opts = promises.last.is_a?(::Hash) ? promises.pop.dup : {}
opts[:executor] ||= ImmediateExecutor.new
zero = if !opts.key?(:execute) || opts.delete(:execute)
fulfill([], opts)
else
Promise.new(opts) { [] }
end
promises.reduce(zero) do |p1, p2|
p1.flat_map do |results|
p2.then do |next_result|
results << next_result
end
end
end
end
# Builds a promise that produces the result of self and others in an Array
# and fails if any of them fails.
#
# @overload zip(*promises)
# @param [Array<Promise>] others
#
# @overload zip(*promises, opts)
# @param [Array<Promise>] others
# @param [Hash] opts the configuration options
# @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance.
# @option opts [Boolean] :execute (true) execute promise before returning
#
# @return [Promise<Array>]
def zip(*others)
self.class.zip(self, *others)
end
# Aggregates a collection of promises and executes the `then` condition
# if all aggregated promises succeed. Executes the `rescue` handler with
# a `Concurrent::PromiseExecutionError` if any of the aggregated promises
# fail. Upon execution will execute any of the aggregate promises that
# were not already executed.
#
# @!macro promise_self_aggregate
#
# The returned promise will not yet have been executed. Additional `#then`
# and `#rescue` handlers may still be provided. Once the returned promise
# is execute the aggregate promises will be also be executed (if they have
# not been executed already). The results of the aggregate promises will
# be checked upon completion. The necessary `#then` and `#rescue` blocks
# on the aggregating promise will then be executed as appropriate. If the
# `#rescue` handlers are executed the raises exception will be
# `Concurrent::PromiseExecutionError`.
#
# @param [Array] promises Zero or more promises to aggregate
# @return [Promise] an unscheduled (not executed) promise that aggregates
# the promises given as arguments
def self.all?(*promises)
aggregate(:all?, *promises)
end
# Aggregates a collection of promises and executes the `then` condition
# if any aggregated promises succeed. Executes the `rescue` handler with
# a `Concurrent::PromiseExecutionError` if any of the aggregated promises
# fail. Upon execution will execute any of the aggregate promises that
# were not already executed.
#
# @!macro promise_self_aggregate
def self.any?(*promises)
aggregate(:any?, *promises)
end
protected
def ns_initialize(value, opts)
super
@executor = Options.executor_from_options(opts) || Concurrent.global_io_executor
@args = get_arguments_from(opts)
@parent = opts.fetch(:parent) { nil }
@on_fulfill = opts.fetch(:on_fulfill) { Proc.new { |result| result } }
@on_reject = opts.fetch(:on_reject) { Proc.new { |reason| raise reason } }
@promise_body = opts[:__promise_body_from_block__] || Proc.new { |result| result }
@state = :unscheduled
@children = []
end
# Aggregate a collection of zero or more promises under a composite promise,
# execute the aggregated promises and collect them into a standard Ruby array,
# call the given Ruby `Ennnumerable` predicate (such as `any?`, `all?`, `none?`,
# or `one?`) on the collection checking for the success or failure of each,
# then executing the composite's `#then` handlers if the predicate returns
# `true` or executing the composite's `#rescue` handlers if the predicate
# returns false.
#
# @!macro promise_self_aggregate
def self.aggregate(method, *promises)
composite = Promise.new do
completed = promises.collect do |promise|
promise.execute if promise.unscheduled?
promise.wait
promise
end
unless completed.empty? || completed.send(method){|promise| promise.fulfilled? }
raise PromiseExecutionError
end
end
composite
end
# @!visibility private
def set_pending
synchronize do
@state = :pending
@children.each { |c| c.set_pending }
end
end
# @!visibility private
def root? # :nodoc:
@parent.nil?
end
# @!visibility private
def on_fulfill(result)
realize Proc.new { @on_fulfill.call(result) }
nil
end
# @!visibility private
def on_reject(reason)
realize Proc.new { @on_reject.call(reason) }
nil
end
# @!visibility private
def notify_child(child)
if_state(:fulfilled) { child.on_fulfill(apply_deref_options(@value)) }
if_state(:rejected) { child.on_reject(@reason) }
end
# @!visibility private
def complete(success, value, reason)
children_to_notify = synchronize do
set_state!(success, value, reason)
@children.dup
end
children_to_notify.each { |child| notify_child(child) }
observers.notify_and_delete_observers{ [Time.now, self.value, reason] }
end
# @!visibility private
def realize(task)
@executor.post do
success, value, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args)
complete(success, value, reason)
end
end
# @!visibility private
def set_state!(success, value, reason)
set_state(success, value, reason)
event.set
end
# @!visibility private
def synchronized_set_state!(success, value, reason)
synchronize { set_state!(success, value, reason) }
end
end
end

View File

@ -0,0 +1,58 @@
module Concurrent
# Methods form module A included to a module B, which is already included into class C,
# will not be visible in the C class. If this module is extended to B then A's methods
# are correctly made visible to C.
#
# @example
# module A
# def a
# :a
# end
# end
#
# module B1
# end
#
# class C1
# include B1
# end
#
# module B2
# extend Concurrent::ReInclude
# end
#
# class C2
# include B2
# end
#
# B1.send :include, A
# B2.send :include, A
#
# C1.new.respond_to? :a # => false
# C2.new.respond_to? :a # => true
module ReInclude
# @!visibility private
def included(base)
(@re_include_to_bases ||= []) << [:include, base]
super(base)
end
# @!visibility private
def extended(base)
(@re_include_to_bases ||= []) << [:extend, base]
super(base)
end
# @!visibility private
def include(*modules)
result = super(*modules)
modules.reverse.each do |module_being_included|
(@re_include_to_bases ||= []).each do |method, mod|
mod.send method, module_being_included
end
end
result
end
end
end

View File

@ -0,0 +1,318 @@
require 'concurrent/constants'
require 'concurrent/errors'
require 'concurrent/configuration'
require 'concurrent/ivar'
require 'concurrent/collection/copy_on_notify_observer_set'
require 'concurrent/utility/monotonic_time'
require 'concurrent/options'
module Concurrent
# `ScheduledTask` is a close relative of `Concurrent::Future` but with one
# important difference: A `Future` is set to execute as soon as possible
# whereas a `ScheduledTask` is set to execute after a specified delay. This
# implementation is loosely based on Java's
# [ScheduledExecutorService](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledExecutorService.html).
# It is a more feature-rich variant of {Concurrent.timer}.
#
# The *intended* schedule time of task execution is set on object construction
# with the `delay` argument. The delay is a numeric (floating point or integer)
# representing a number of seconds in the future. Any other value or a numeric
# equal to or less than zero will result in an exception. The *actual* schedule
# time of task execution is set when the `execute` method is called.
#
# The constructor can also be given zero or more processing options. Currently
# the only supported options are those recognized by the
# [Dereferenceable](Dereferenceable) module.
#
# The final constructor argument is a block representing the task to be performed.
# If no block is given an `ArgumentError` will be raised.
#
# **States**
#
# `ScheduledTask` mixes in the [Obligation](Obligation) module thus giving it
# "future" behavior. This includes the expected lifecycle states. `ScheduledTask`
# has one additional state, however. While the task (block) is being executed the
# state of the object will be `:processing`. This additional state is necessary
# because it has implications for task cancellation.
#
# **Cancellation**
#
# A `:pending` task can be cancelled using the `#cancel` method. A task in any
# other state, including `:processing`, cannot be cancelled. The `#cancel`
# method returns a boolean indicating the success of the cancellation attempt.
# A cancelled `ScheduledTask` cannot be restarted. It is immutable.
#
# **Obligation and Observation**
#
# The result of a `ScheduledTask` can be obtained either synchronously or
# asynchronously. `ScheduledTask` mixes in both the [Obligation](Obligation)
# module and the
# [Observable](http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html)
# module from the Ruby standard library. With one exception `ScheduledTask`
# behaves identically to [Future](Observable) with regard to these modules.
#
# @!macro copy_options
#
# @example Basic usage
#
# require 'concurrent'
# require 'thread' # for Queue
# require 'open-uri' # for open(uri)
#
# class Ticker
# def get_year_end_closing(symbol, year)
# uri = "http://ichart.finance.yahoo.com/table.csv?s=#{symbol}&a=11&b=01&c=#{year}&d=11&e=31&f=#{year}&g=m"
# data = open(uri) {|f| f.collect{|line| line.strip } }
# data[1].split(',')[4].to_f
# end
# end
#
# # Future
# price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013) }
# price.state #=> :pending
# sleep(1) # do other stuff
# price.value #=> 63.65
# price.state #=> :fulfilled
#
# # ScheduledTask
# task = Concurrent::ScheduledTask.execute(2){ Ticker.new.get_year_end_closing('INTC', 2013) }
# task.state #=> :pending
# sleep(3) # do other stuff
# task.value #=> 25.96
#
# @example Successful task execution
#
# task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' }
# task.state #=> :unscheduled
# task.execute
# task.state #=> pending
#
# # wait for it...
# sleep(3)
#
# task.unscheduled? #=> false
# task.pending? #=> false
# task.fulfilled? #=> true
# task.rejected? #=> false
# task.value #=> 'What does the fox say?'
#
# @example One line creation and execution
#
# task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' }.execute
# task.state #=> pending
#
# task = Concurrent::ScheduledTask.execute(2){ 'What do you get when you multiply 6 by 9?' }
# task.state #=> pending
#
# @example Failed task execution
#
# task = Concurrent::ScheduledTask.execute(2){ raise StandardError.new('Call me maybe?') }
# task.pending? #=> true
#
# # wait for it...
# sleep(3)
#
# task.unscheduled? #=> false
# task.pending? #=> false
# task.fulfilled? #=> false
# task.rejected? #=> true
# task.value #=> nil
# task.reason #=> #<StandardError: Call me maybe?>
#
# @example Task execution with observation
#
# observer = Class.new{
# def update(time, value, reason)
# puts "The task completed at #{time} with value '#{value}'"
# end
# }.new
#
# task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' }
# task.add_observer(observer)
# task.execute
# task.pending? #=> true
#
# # wait for it...
# sleep(3)
#
# #>> The task completed at 2013-11-07 12:26:09 -0500 with value 'What does the fox say?'
#
# @!macro monotonic_clock_warning
#
# @see Concurrent.timer
class ScheduledTask < IVar
include Comparable
# The executor on which to execute the task.
# @!visibility private
attr_reader :executor
# Schedule a task for execution at a specified future time.
#
# @param [Float] delay the number of seconds to wait for before executing the task
#
# @yield the task to be performed
#
# @!macro executor_and_deref_options
#
# @option opts [object, Array] :args zero or more arguments to be passed the task
# block on execution
#
# @raise [ArgumentError] When no block is given
# @raise [ArgumentError] When given a time that is in the past
def initialize(delay, opts = {}, &task)
raise ArgumentError.new('no block given') unless block_given?
raise ArgumentError.new('seconds must be greater than zero') if delay.to_f < 0.0
super(NULL, opts, &nil)
synchronize do
ns_set_state(:unscheduled)
@parent = opts.fetch(:timer_set, Concurrent.global_timer_set)
@args = get_arguments_from(opts)
@delay = delay.to_f
@task = task
@time = nil
@executor = Options.executor_from_options(opts) || Concurrent.global_io_executor
self.observers = Collection::CopyOnNotifyObserverSet.new
end
end
# The `delay` value given at instanciation.
#
# @return [Float] the initial delay.
def initial_delay
synchronize { @delay }
end
# The monotonic time at which the the task is scheduled to be executed.
#
# @return [Float] the schedule time or nil if `unscheduled`
def schedule_time
synchronize { @time }
end
# Comparator which orders by schedule time.
#
# @!visibility private
def <=>(other)
schedule_time <=> other.schedule_time
end
# Has the task been cancelled?
#
# @return [Boolean] true if the task is in the given state else false
def cancelled?
synchronize { ns_check_state?(:cancelled) }
end
# In the task execution in progress?
#
# @return [Boolean] true if the task is in the given state else false
def processing?
synchronize { ns_check_state?(:processing) }
end
# Cancel this task and prevent it from executing. A task can only be
# cancelled if it is pending or unscheduled.
#
# @return [Boolean] true if successfully cancelled else false
def cancel
if compare_and_set_state(:cancelled, :pending, :unscheduled)
complete(false, nil, CancelledOperationError.new)
# To avoid deadlocks this call must occur outside of #synchronize
# Changing the state above should prevent redundant calls
@parent.send(:remove_task, self)
else
false
end
end
# Reschedule the task using the original delay and the current time.
# A task can only be reset while it is `:pending`.
#
# @return [Boolean] true if successfully rescheduled else false
def reset
synchronize{ ns_reschedule(@delay) }
end
# Reschedule the task using the given delay and the current time.
# A task can only be reset while it is `:pending`.
#
# @param [Float] delay the number of seconds to wait for before executing the task
#
# @return [Boolean] true if successfully rescheduled else false
#
# @raise [ArgumentError] When given a time that is in the past
def reschedule(delay)
delay = delay.to_f
raise ArgumentError.new('seconds must be greater than zero') if delay < 0.0
synchronize{ ns_reschedule(delay) }
end
# Execute an `:unscheduled` `ScheduledTask`. Immediately sets the state to `:pending`
# and starts counting down toward execution. Does nothing if the `ScheduledTask` is
# in any state other than `:unscheduled`.
#
# @return [ScheduledTask] a reference to `self`
def execute
if compare_and_set_state(:pending, :unscheduled)
synchronize{ ns_schedule(@delay) }
end
self
end
# Create a new `ScheduledTask` object with the given block, execute it, and return the
# `:pending` object.
#
# @param [Float] delay the number of seconds to wait for before executing the task
#
# @!macro executor_and_deref_options
#
# @return [ScheduledTask] the newly created `ScheduledTask` in the `:pending` state
#
# @raise [ArgumentError] if no block is given
def self.execute(delay, opts = {}, &task)
new(delay, opts, &task).execute
end
# Execute the task.
#
# @!visibility private
def process_task
safe_execute(@task, @args)
end
protected :set, :try_set, :fail, :complete
protected
# Schedule the task using the given delay and the current time.
#
# @param [Float] delay the number of seconds to wait for before executing the task
#
# @return [Boolean] true if successfully rescheduled else false
#
# @!visibility private
def ns_schedule(delay)
@delay = delay
@time = Concurrent.monotonic_time + @delay
@parent.send(:post_task, self)
end
# Reschedule the task using the given delay and the current time.
# A task can only be reset while it is `:pending`.
#
# @param [Float] delay the number of seconds to wait for before executing the task
#
# @return [Boolean] true if successfully rescheduled else false
#
# @!visibility private
def ns_reschedule(delay)
return false unless ns_check_state?(:pending)
@parent.send(:remove_task, self) && ns_schedule(delay)
end
end
end

View File

@ -0,0 +1,66 @@
require 'concurrent/utility/engine'
require 'concurrent/thread_safe/util'
require 'set'
module Concurrent
# @!macro concurrent_set
#
# A thread-safe subclass of Set. This version locks against the object
# itself for every method call, ensuring only one thread can be reading
# or writing at a time. This includes iteration methods like `#each`.
#
# @note `a += b` is **not** a **thread-safe** operation on
# `Concurrent::Set`. It reads Set `a`, then it creates new `Concurrent::Set`
# which is union of `a` and `b`, then it writes the union to `a`.
# The read and write are independent operations they do not form a single atomic
# operation therefore when two `+=` operations are executed concurrently updates
# may be lost. Use `#merge` instead.
#
# @see http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html Ruby standard library `Set`
# @!macro internal_implementation_note
SetImplementation = case
when Concurrent.on_cruby?
# Because MRI never runs code in parallel, the existing
# non-thread-safe structures should usually work fine.
::Set
when Concurrent.on_jruby?
require 'jruby/synchronized'
class JRubySet < ::Set
include JRuby::Synchronized
end
JRubySet
when Concurrent.on_rbx?
require 'monitor'
require 'concurrent/thread_safe/util/data_structures'
class RbxSet < ::Set
end
ThreadSafe::Util.make_synchronized_on_rbx Concurrent::RbxSet
RbxSet
when Concurrent.on_truffleruby?
require 'concurrent/thread_safe/util/data_structures'
class TruffleRubySet < ::Set
end
ThreadSafe::Util.make_synchronized_on_truffleruby Concurrent::TruffleRubySet
TruffleRubySet
else
warn 'Possibly unsupported Ruby implementation'
::Set
end
private_constant :SetImplementation
# @!macro concurrent_set
class Set < SetImplementation
end
end

View File

@ -0,0 +1,129 @@
require 'concurrent/synchronization/abstract_struct'
require 'concurrent/errors'
require 'concurrent/synchronization'
module Concurrent
# An thread-safe, write-once variation of Ruby's standard `Struct`.
# Each member can have its value set at most once, either at construction
# or any time thereafter. Attempting to assign a value to a member
# that has already been set will result in a `Concurrent::ImmutabilityError`.
#
# @see http://ruby-doc.org/core-2.2.0/Struct.html Ruby standard library `Struct`
# @see http://en.wikipedia.org/wiki/Final_(Java) Java `final` keyword
module SettableStruct
include Synchronization::AbstractStruct
# @!macro struct_values
def values
synchronize { ns_values }
end
alias_method :to_a, :values
# @!macro struct_values_at
def values_at(*indexes)
synchronize { ns_values_at(indexes) }
end
# @!macro struct_inspect
def inspect
synchronize { ns_inspect }
end
alias_method :to_s, :inspect
# @!macro struct_merge
def merge(other, &block)
synchronize { ns_merge(other, &block) }
end
# @!macro struct_to_h
def to_h
synchronize { ns_to_h }
end
# @!macro struct_get
def [](member)
synchronize { ns_get(member) }
end
# @!macro struct_equality
def ==(other)
synchronize { ns_equality(other) }
end
# @!macro struct_each
def each(&block)
return enum_for(:each) unless block_given?
synchronize { ns_each(&block) }
end
# @!macro struct_each_pair
def each_pair(&block)
return enum_for(:each_pair) unless block_given?
synchronize { ns_each_pair(&block) }
end
# @!macro struct_select
def select(&block)
return enum_for(:select) unless block_given?
synchronize { ns_select(&block) }
end
# @!macro struct_set
#
# @raise [Concurrent::ImmutabilityError] if the given member has already been set
def []=(member, value)
if member.is_a? Integer
length = synchronize { @values.length }
if member >= length
raise IndexError.new("offset #{member} too large for struct(size:#{length})")
end
synchronize do
unless @values[member].nil?
raise Concurrent::ImmutabilityError.new('struct member has already been set')
end
@values[member] = value
end
else
send("#{member}=", value)
end
rescue NoMethodError
raise NameError.new("no member '#{member}' in struct")
end
# @!macro struct_new
def self.new(*args, &block)
clazz_name = nil
if args.length == 0
raise ArgumentError.new('wrong number of arguments (0 for 1+)')
elsif args.length > 0 && args.first.is_a?(String)
clazz_name = args.shift
end
FACTORY.define_struct(clazz_name, args, &block)
end
FACTORY = Class.new(Synchronization::LockableObject) do
def define_struct(name, members, &block)
synchronize do
clazz = Synchronization::AbstractStruct.define_struct_class(SettableStruct, Synchronization::LockableObject, name, members, &block)
members.each_with_index do |member, index|
clazz.send :remove_method, member if clazz.instance_methods.include? member
clazz.send(:define_method, member) do
synchronize { @values[index] }
end
clazz.send(:define_method, "#{member}=") do |value|
synchronize do
unless @values[index].nil?
raise Concurrent::ImmutabilityError.new('struct member has already been set')
end
@values[index] = value
end
end
end
clazz
end
end
end.new
private_constant :FACTORY
end
end

View File

@ -7,15 +7,14 @@ Concurrent.load_native_extensions
require 'concurrent/synchronization/mri_object'
require 'concurrent/synchronization/jruby_object'
require 'concurrent/synchronization/rbx_object'
require 'concurrent/synchronization/truffle_object'
require 'concurrent/synchronization/truffleruby_object'
require 'concurrent/synchronization/object'
require 'concurrent/synchronization/volatile'
require 'concurrent/synchronization/abstract_lockable_object'
require 'concurrent/synchronization/mri_lockable_object'
require 'concurrent/synchronization/mutex_lockable_object'
require 'concurrent/synchronization/jruby_lockable_object'
require 'concurrent/synchronization/rbx_lockable_object'
require 'concurrent/synchronization/truffle_lockable_object'
require 'concurrent/synchronization/lockable_object'
@ -23,8 +22,8 @@ require 'concurrent/synchronization/condition'
require 'concurrent/synchronization/lock'
module Concurrent
# {include:file:doc/synchronization.md}
# {include:file:doc/synchronization-notes.md}
# {include:file:docs-source/synchronization.md}
# {include:file:docs-source/synchronization-notes.md}
module Synchronization
end
end

View File

@ -6,7 +6,7 @@ module Concurrent
protected
# @!macro [attach] synchronization_object_method_synchronize
# @!macro synchronization_object_method_synchronize
#
# @yield runs the block synchronized against this object,
# equivalent of java's `synchronize(this) {}`
@ -15,7 +15,7 @@ module Concurrent
raise NotImplementedError
end
# @!macro [attach] synchronization_object_method_ns_wait_until
# @!macro synchronization_object_method_ns_wait_until
#
# Wait until condition is met or timeout passes,
# protects against spurious wake-ups.
@ -45,7 +45,7 @@ module Concurrent
end
end
# @!macro [attach] synchronization_object_method_ns_wait
# @!macro synchronization_object_method_ns_wait
#
# Wait until another thread calls #signal or #broadcast,
# spurious wake-ups can happen.
@ -63,7 +63,7 @@ module Concurrent
raise NotImplementedError
end
# @!macro [attach] synchronization_object_method_ns_signal
# @!macro synchronization_object_method_ns_signal
#
# Signal one waiting thread.
# @return [self]
@ -78,7 +78,7 @@ module Concurrent
raise NotImplementedError
end
# @!macro [attach] synchronization_object_method_ns_broadcast
# @!macro synchronization_object_method_ns_broadcast
#
# Broadcast to all waiting threads.
# @return [self]

View File

@ -0,0 +1,159 @@
module Concurrent
module Synchronization
# @!visibility private
# @!macro internal_implementation_note
module AbstractStruct
# @!visibility private
def initialize(*values)
super()
ns_initialize(*values)
end
# @!macro struct_length
#
# Returns the number of struct members.
#
# @return [Fixnum] the number of struct members
def length
self.class::MEMBERS.length
end
alias_method :size, :length
# @!macro struct_members
#
# Returns the struct members as an array of symbols.
#
# @return [Array] the struct members as an array of symbols
def members
self.class::MEMBERS.dup
end
protected
# @!macro struct_values
#
# @!visibility private
def ns_values
@values.dup
end
# @!macro struct_values_at
#
# @!visibility private
def ns_values_at(indexes)
@values.values_at(*indexes)
end
# @!macro struct_to_h
#
# @!visibility private
def ns_to_h
length.times.reduce({}){|memo, i| memo[self.class::MEMBERS[i]] = @values[i]; memo}
end
# @!macro struct_get
#
# @!visibility private
def ns_get(member)
if member.is_a? Integer
if member >= @values.length
raise IndexError.new("offset #{member} too large for struct(size:#{@values.length})")
end
@values[member]
else
send(member)
end
rescue NoMethodError
raise NameError.new("no member '#{member}' in struct")
end
# @!macro struct_equality
#
# @!visibility private
def ns_equality(other)
self.class == other.class && self.values == other.values
end
# @!macro struct_each
#
# @!visibility private
def ns_each
values.each{|value| yield value }
end
# @!macro struct_each_pair
#
# @!visibility private
def ns_each_pair
@values.length.times do |index|
yield self.class::MEMBERS[index], @values[index]
end
end
# @!macro struct_select
#
# @!visibility private
def ns_select
values.select{|value| yield value }
end
# @!macro struct_inspect
#
# @!visibility private
def ns_inspect
struct = pr_underscore(self.class.ancestors[1])
clazz = ((self.class.to_s =~ /^#<Class:/) == 0) ? '' : " #{self.class}"
"#<#{struct}#{clazz} #{ns_to_h}>"
end
# @!macro struct_merge
#
# @!visibility private
def ns_merge(other, &block)
self.class.new(*self.to_h.merge(other, &block).values)
end
# @!visibility private
def pr_underscore(clazz)
word = clazz.to_s.dup # dup string to workaround JRuby 9.2.0.0 bug https://github.com/jruby/jruby/issues/5229
word.gsub!(/::/, '/')
word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2')
word.gsub!(/([a-z\d])([A-Z])/,'\1_\2')
word.tr!("-", "_")
word.downcase!
word
end
# @!visibility private
def self.define_struct_class(parent, base, name, members, &block)
clazz = Class.new(base || Object) do
include parent
self.const_set(:MEMBERS, members.collect{|member| member.to_s.to_sym}.freeze)
def ns_initialize(*values)
raise ArgumentError.new('struct size differs') if values.length > length
@values = values.fill(nil, values.length..length-1)
end
end
unless name.nil?
begin
parent.send :remove_const, name if parent.const_defined? name
parent.const_set(name, clazz)
clazz
rescue NameError
raise NameError.new("identifier #{name} needs to be constant")
end
end
members.each_with_index do |member, index|
clazz.send :remove_method, member if clazz.instance_methods.include? member
clazz.send(:define_method, member) do
@values[index]
end
end
clazz.class_exec(&block) unless block.nil?
clazz
end
end
end
end

View File

@ -5,18 +5,18 @@ module Concurrent
# @!macro internal_implementation_note
LockableObjectImplementation = case
when Concurrent.on_cruby? && Concurrent.ruby_version(:<=, 1, 9, 3)
MriMonitorLockableObject
MonitorLockableObject
when Concurrent.on_cruby? && Concurrent.ruby_version(:>, 1, 9, 3)
MriMutexLockableObject
MutexLockableObject
when Concurrent.on_jruby?
JRubyLockableObject
when Concurrent.on_rbx?
RbxLockableObject
when Concurrent.on_truffle?
MriMutexLockableObject
when Concurrent.on_truffleruby?
MutexLockableObject
else
warn 'Possibly unsupported Ruby implementation'
MriMonitorLockableObject
MonitorLockableObject
end
private_constant :LockableObjectImplementation
@ -31,7 +31,7 @@ module Concurrent
# `Thread#sleep` and `Thread#wakeup` will work as expected but mixing `Synchronization::Object#wait` and
# `Thread#wakeup` will not work on all platforms.
#
# @see {Event} implementation as an example of this class use
# @see Event implementation as an example of this class use
#
# @example simple
# class AnClass < Synchronization::Object

Some files were not shown because too many files have changed in this diff Show More