Merge pull request #7022 from Homebrew/dependabot/bundler/Library/Homebrew/concurrent-ruby-1.1.6
build: bump concurrent-ruby from 1.1.5 to 1.1.6 in /Library/Homebrew
This commit is contained in:
commit
708010e6fd
@ -26,6 +26,10 @@ RSpec/LeakyConstantDeclaration:
|
|||||||
Enabled: false
|
Enabled: false
|
||||||
RSpec/MessageSpies:
|
RSpec/MessageSpies:
|
||||||
Enabled: false
|
Enabled: false
|
||||||
|
RSpec/RepeatedDescription:
|
||||||
|
Enabled: false
|
||||||
|
RSpec/RepeatedExampleGroupDescription:
|
||||||
|
Enabled: false
|
||||||
|
|
||||||
# TODO: try to reduce these (also requires fixing Homebrew/bundle)
|
# TODO: try to reduce these (also requires fixing Homebrew/bundle)
|
||||||
RSpec/ExampleLength:
|
RSpec/ExampleLength:
|
||||||
|
|||||||
@ -8,7 +8,7 @@ GEM
|
|||||||
tzinfo (~> 1.1)
|
tzinfo (~> 1.1)
|
||||||
zeitwerk (~> 2.2)
|
zeitwerk (~> 2.2)
|
||||||
ast (2.4.0)
|
ast (2.4.0)
|
||||||
concurrent-ruby (1.1.5)
|
concurrent-ruby (1.1.6)
|
||||||
connection_pool (2.2.2)
|
connection_pool (2.2.2)
|
||||||
coveralls (0.8.23)
|
coveralls (0.8.23)
|
||||||
json (>= 1.8, < 3)
|
json (>= 1.8, < 3)
|
||||||
@ -89,7 +89,7 @@ GEM
|
|||||||
unicode-display_width (>= 1.4.0, < 1.7)
|
unicode-display_width (>= 1.4.0, < 1.7)
|
||||||
rubocop-performance (1.5.2)
|
rubocop-performance (1.5.2)
|
||||||
rubocop (>= 0.71.0)
|
rubocop (>= 0.71.0)
|
||||||
rubocop-rspec (1.37.1)
|
rubocop-rspec (1.38.0)
|
||||||
rubocop (>= 0.68.1)
|
rubocop (>= 0.68.1)
|
||||||
ruby-macho (2.2.0)
|
ruby-macho (2.2.0)
|
||||||
ruby-progressbar (1.10.1)
|
ruby-progressbar (1.10.1)
|
||||||
|
|||||||
12
Library/Homebrew/vendor/bundle/bundler/setup.rb
vendored
12
Library/Homebrew/vendor/bundle/bundler/setup.rb
vendored
@ -3,9 +3,9 @@ require 'rbconfig'
|
|||||||
ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
|
ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
|
||||||
ruby_version = RbConfig::CONFIG["ruby_version"]
|
ruby_version = RbConfig::CONFIG["ruby_version"]
|
||||||
path = File.expand_path('..', __FILE__)
|
path = File.expand_path('..', __FILE__)
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.5/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.6/lib/concurrent-ruby"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/i18n-1.8.2/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/i18n-1.8.2/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/minitest-5.13.0/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/minitest-5.14.0/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/thread_safe-0.3.6/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/thread_safe-0.3.6/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tzinfo-1.2.6/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tzinfo-1.2.6/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/zeitwerk-2.2.2/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/zeitwerk-2.2.2/lib"
|
||||||
@ -19,7 +19,7 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/docile-1.3.2/lib"
|
|||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/simplecov-html-0.10.2/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/simplecov-html-0.10.2/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/simplecov-0.16.1/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/simplecov-0.16.1/lib"
|
||||||
$:.unshift "#{path}/../../../../../../../../Library/Ruby/Gems/2.6.0/gems/sync-0.5.0/lib"
|
$:.unshift "#{path}/../../../../../../../../Library/Ruby/Gems/2.6.0/gems/sync-0.5.0/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tins-1.24.0/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tins-1.24.1/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/term-ansicolor-1.7.1/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/term-ansicolor-1.7.1/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/thor-1.0.1/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/thor-1.0.1/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/coveralls-0.8.23/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/coveralls-0.8.23/lib"
|
||||||
@ -45,7 +45,7 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/webrobots-0.1.2/lib"
|
|||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/mechanize-2.7.6/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/mechanize-2.7.6/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/mustache-1.1.1/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/mustache-1.1.1/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parallel-1.19.1/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parallel-1.19.1/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parallel_tests-2.30.0/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parallel_tests-2.31.0/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parser-2.7.0.2/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parser-2.7.0.2/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/plist-3.5.0/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/plist-3.5.0/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rainbow-3.0.0/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rainbow-3.0.0/lib"
|
||||||
@ -61,8 +61,8 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rspec-its-1.3.0/lib"
|
|||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rspec-retry-0.6.2/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rspec-retry-0.6.2/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rspec-wait-0.0.9/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rspec-wait-0.0.9/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-progressbar-1.10.1/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-progressbar-1.10.1/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/unicode-display_width-1.6.0/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/unicode-display_width-1.6.1/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-0.79.0/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-0.79.0/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-performance-1.5.2/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-performance-1.5.2/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-rspec-1.37.1/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-rspec-1.38.0/lib"
|
||||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-macho-2.2.0/lib"
|
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-macho-2.2.0/lib"
|
||||||
|
|||||||
@ -1,97 +0,0 @@
|
|||||||
require 'logger'
|
|
||||||
require 'concurrent/synchronization'
|
|
||||||
|
|
||||||
module Concurrent
|
|
||||||
|
|
||||||
# Provides ability to add and remove handlers to be run at `Kernel#at_exit`, order is undefined.
|
|
||||||
# Each handler is executed at most once.
|
|
||||||
#
|
|
||||||
# @!visibility private
|
|
||||||
class AtExitImplementation < Synchronization::LockableObject
|
|
||||||
include Logger::Severity
|
|
||||||
|
|
||||||
def initialize(*args)
|
|
||||||
super()
|
|
||||||
synchronize { ns_initialize(*args) }
|
|
||||||
end
|
|
||||||
|
|
||||||
# Add a handler to be run at `Kernel#at_exit`
|
|
||||||
# @param [Object] handler_id optionally provide an id, if already present, handler is replaced
|
|
||||||
# @yield the handler
|
|
||||||
# @return id of the handler
|
|
||||||
def add(handler_id = nil, &handler)
|
|
||||||
id = handler_id || handler.object_id
|
|
||||||
synchronize { @handlers[id] = handler }
|
|
||||||
id
|
|
||||||
end
|
|
||||||
|
|
||||||
# Delete a handler by handler_id
|
|
||||||
# @return [true, false]
|
|
||||||
def delete(handler_id)
|
|
||||||
!!synchronize { @handlers.delete handler_id }
|
|
||||||
end
|
|
||||||
|
|
||||||
# Is handler with handler_id rpesent?
|
|
||||||
# @return [true, false]
|
|
||||||
def handler?(handler_id)
|
|
||||||
synchronize { @handlers.key? handler_id }
|
|
||||||
end
|
|
||||||
|
|
||||||
# @return copy of the handlers
|
|
||||||
def handlers
|
|
||||||
synchronize { @handlers }.clone
|
|
||||||
end
|
|
||||||
|
|
||||||
# install `Kernel#at_exit` callback to execute added handlers
|
|
||||||
def install
|
|
||||||
synchronize do
|
|
||||||
@installed ||= begin
|
|
||||||
at_exit { runner }
|
|
||||||
true
|
|
||||||
end
|
|
||||||
self
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# Will it run during `Kernel#at_exit`
|
|
||||||
def enabled?
|
|
||||||
synchronize { @enabled }
|
|
||||||
end
|
|
||||||
|
|
||||||
# Configure if it runs during `Kernel#at_exit`
|
|
||||||
def enabled=(value)
|
|
||||||
synchronize { @enabled = value }
|
|
||||||
end
|
|
||||||
|
|
||||||
# run the handlers manually
|
|
||||||
# @return ids of the handlers
|
|
||||||
def run
|
|
||||||
handlers, _ = synchronize { handlers, @handlers = @handlers, {} }
|
|
||||||
handlers.each do |_, handler|
|
|
||||||
begin
|
|
||||||
handler.call
|
|
||||||
rescue => error
|
|
||||||
Concurrent.global_logger.call(ERROR, error)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
handlers.keys
|
|
||||||
end
|
|
||||||
|
|
||||||
private
|
|
||||||
|
|
||||||
def ns_initialize(enabled = true)
|
|
||||||
@handlers = {}
|
|
||||||
@enabled = enabled
|
|
||||||
end
|
|
||||||
|
|
||||||
def runner
|
|
||||||
run if synchronize { @enabled }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
private_constant :AtExitImplementation
|
|
||||||
|
|
||||||
# @see AtExitImplementation
|
|
||||||
# @!visibility private
|
|
||||||
AtExit = AtExitImplementation.new.install
|
|
||||||
end
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
module Concurrent
|
|
||||||
VERSION = '1.1.5'
|
|
||||||
end
|
|
||||||
@ -10,11 +10,11 @@ module Concurrent
|
|||||||
# or writing at a time. This includes iteration methods like `#each`.
|
# or writing at a time. This includes iteration methods like `#each`.
|
||||||
#
|
#
|
||||||
# @note `a += b` is **not** a **thread-safe** operation on
|
# @note `a += b` is **not** a **thread-safe** operation on
|
||||||
# `Concurrent::Array`. It reads array `a`, then it creates new `Concurrent::Array`
|
# `Concurrent::Array`. It reads array `a`, then it creates new `Concurrent::Array`
|
||||||
# which is concatenation of `a` and `b`, then it writes the concatenation to `a`.
|
# which is concatenation of `a` and `b`, then it writes the concatenation to `a`.
|
||||||
# The read and write are independent operations they do not form a single atomic
|
# The read and write are independent operations they do not form a single atomic
|
||||||
# operation therefore when two `+=` operations are executed concurrently updates
|
# operation therefore when two `+=` operations are executed concurrently updates
|
||||||
# may be lost. Use `#concat` instead.
|
# may be lost. Use `#concat` instead.
|
||||||
#
|
#
|
||||||
# @see http://ruby-doc.org/core-2.2.0/Array.html Ruby standard library `Array`
|
# @see http://ruby-doc.org/core-2.2.0/Array.html Ruby standard library `Array`
|
||||||
|
|
||||||
@ -18,7 +18,7 @@ require 'concurrent/synchronization'
|
|||||||
# uncoordinated, *synchronous* change of individual values. Best used when
|
# uncoordinated, *synchronous* change of individual values. Best used when
|
||||||
# the value will undergo frequent reads but only occasional, though complex,
|
# the value will undergo frequent reads but only occasional, though complex,
|
||||||
# updates. Suitable when the result of an update must be known immediately.
|
# updates. Suitable when the result of an update must be known immediately.
|
||||||
# * *{Concurrent::AtomicReference}:* A simple object reference that can be
|
# * *{Concurrent::AtomicReference}:* A simple object reference that can be updated
|
||||||
# atomically. Updates are synchronous but fast. Best used when updates a
|
# atomically. Updates are synchronous but fast. Best used when updates a
|
||||||
# simple set operations. Not suitable when updates are complex.
|
# simple set operations. Not suitable when updates are complex.
|
||||||
# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar
|
# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar
|
||||||
@ -41,13 +41,13 @@ module Concurrent
|
|||||||
#
|
#
|
||||||
# Explicitly sets the value to true.
|
# Explicitly sets the value to true.
|
||||||
#
|
#
|
||||||
# @return [Boolean] true is value has changed, otherwise false
|
# @return [Boolean] true if value has changed, otherwise false
|
||||||
|
|
||||||
# @!macro atomic_boolean_method_make_false
|
# @!macro atomic_boolean_method_make_false
|
||||||
#
|
#
|
||||||
# Explicitly sets the value to false.
|
# Explicitly sets the value to false.
|
||||||
#
|
#
|
||||||
# @return [Boolean] true is value has changed, otherwise false
|
# @return [Boolean] true if value has changed, otherwise false
|
||||||
|
|
||||||
###################################################################
|
###################################################################
|
||||||
|
|
||||||
@ -1,6 +1,6 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
require 'concurrent/atomic/mutex_count_down_latch'
|
require 'concurrent/atomic/mutex_count_down_latch'
|
||||||
require 'concurrent/atomic/java_count_down_latch'
|
require 'concurrent/atomic/java_count_down_latch'
|
||||||
require 'concurrent/utility/engine'
|
|
||||||
|
|
||||||
module Concurrent
|
module Concurrent
|
||||||
|
|
||||||
@ -32,12 +32,38 @@ module Concurrent
|
|||||||
FREE = []
|
FREE = []
|
||||||
LOCK = Mutex.new
|
LOCK = Mutex.new
|
||||||
ARRAYS = {} # used as a hash set
|
ARRAYS = {} # used as a hash set
|
||||||
|
# noinspection RubyClassVariableUsageInspection
|
||||||
@@next = 0
|
@@next = 0
|
||||||
private_constant :FREE, :LOCK, :ARRAYS
|
QUEUE = Queue.new
|
||||||
|
THREAD = Thread.new do
|
||||||
|
while true
|
||||||
|
method, i = QUEUE.pop
|
||||||
|
case method
|
||||||
|
when :thread_local_finalizer
|
||||||
|
LOCK.synchronize do
|
||||||
|
FREE.push(i)
|
||||||
|
# The cost of GC'ing a TLV is linear in the number of threads using TLVs
|
||||||
|
# But that is natural! More threads means more storage is used per TLV
|
||||||
|
# So naturally more CPU time is required to free more storage
|
||||||
|
ARRAYS.each_value do |array|
|
||||||
|
array[i] = nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
when :thread_finalizer
|
||||||
|
LOCK.synchronize do
|
||||||
|
# The thread which used this thread-local array is now gone
|
||||||
|
# So don't hold onto a reference to the array (thus blocking GC)
|
||||||
|
ARRAYS.delete(i)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private_constant :FREE, :LOCK, :ARRAYS, :QUEUE, :THREAD
|
||||||
|
|
||||||
# @!macro thread_local_var_method_get
|
# @!macro thread_local_var_method_get
|
||||||
def value
|
def value
|
||||||
if array = get_threadlocal_array
|
if (array = get_threadlocal_array)
|
||||||
value = array[@index]
|
value = array[@index]
|
||||||
if value.nil?
|
if value.nil?
|
||||||
default
|
default
|
||||||
@ -57,10 +83,10 @@ module Concurrent
|
|||||||
# We could keep the thread-local arrays in a hash, keyed by Thread
|
# We could keep the thread-local arrays in a hash, keyed by Thread
|
||||||
# But why? That would require locking
|
# But why? That would require locking
|
||||||
# Using Ruby's built-in thread-local storage is faster
|
# Using Ruby's built-in thread-local storage is faster
|
||||||
unless array = get_threadlocal_array(me)
|
unless (array = get_threadlocal_array(me))
|
||||||
array = set_threadlocal_array([], me)
|
array = set_threadlocal_array([], me)
|
||||||
LOCK.synchronize { ARRAYS[array.object_id] = array }
|
LOCK.synchronize { ARRAYS[array.object_id] = array }
|
||||||
ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array))
|
ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array.object_id))
|
||||||
end
|
end
|
||||||
array[@index] = (value.nil? ? NULL : value)
|
array[@index] = (value.nil? ? NULL : value)
|
||||||
value
|
value
|
||||||
@ -69,6 +95,7 @@ module Concurrent
|
|||||||
protected
|
protected
|
||||||
|
|
||||||
# @!visibility private
|
# @!visibility private
|
||||||
|
# noinspection RubyClassVariableUsageInspection
|
||||||
def allocate_storage
|
def allocate_storage
|
||||||
@index = LOCK.synchronize do
|
@index = LOCK.synchronize do
|
||||||
FREE.pop || begin
|
FREE.pop || begin
|
||||||
@ -77,37 +104,19 @@ module Concurrent
|
|||||||
result
|
result
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
ObjectSpace.define_finalizer(self, self.class.threadlocal_finalizer(@index))
|
ObjectSpace.define_finalizer(self, self.class.thread_local_finalizer(@index))
|
||||||
end
|
end
|
||||||
|
|
||||||
# @!visibility private
|
# @!visibility private
|
||||||
def self.threadlocal_finalizer(index)
|
def self.thread_local_finalizer(index)
|
||||||
proc do
|
# avoid error: can't be called from trap context
|
||||||
Thread.new do # avoid error: can't be called from trap context
|
proc { QUEUE.push [:thread_local_finalizer, index] }
|
||||||
LOCK.synchronize do
|
|
||||||
FREE.push(index)
|
|
||||||
# The cost of GC'ing a TLV is linear in the number of threads using TLVs
|
|
||||||
# But that is natural! More threads means more storage is used per TLV
|
|
||||||
# So naturally more CPU time is required to free more storage
|
|
||||||
ARRAYS.each_value do |array|
|
|
||||||
array[index] = nil
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
# @!visibility private
|
# @!visibility private
|
||||||
def self.thread_finalizer(array)
|
def self.thread_finalizer(id)
|
||||||
proc do
|
# avoid error: can't be called from trap context
|
||||||
Thread.new do # avoid error: can't be called from trap context
|
proc { QUEUE.push [:thread_finalizer, id] }
|
||||||
LOCK.synchronize do
|
|
||||||
# The thread which used this thread-local array is now gone
|
|
||||||
# So don't hold onto a reference to the array (thus blocking GC)
|
|
||||||
ARRAYS.delete(array.object_id)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
@ -136,21 +145,22 @@ module Concurrent
|
|||||||
# This exists only for use in testing
|
# This exists only for use in testing
|
||||||
# @!visibility private
|
# @!visibility private
|
||||||
def value_for(thread)
|
def value_for(thread)
|
||||||
if array = get_threadlocal_array(thread)
|
if (array = get_threadlocal_array(thread))
|
||||||
value = array[@index]
|
value = array[@index]
|
||||||
if value.nil?
|
if value.nil?
|
||||||
default_for(thread)
|
get_default
|
||||||
elsif value.equal?(NULL)
|
elsif value.equal?(NULL)
|
||||||
nil
|
nil
|
||||||
else
|
else
|
||||||
value
|
value
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
default_for(thread)
|
get_default
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def default_for(thread)
|
# @!visibility private
|
||||||
|
def get_default
|
||||||
if @default_block
|
if @default_block
|
||||||
raise "Cannot use default_for with default block"
|
raise "Cannot use default_for with default block"
|
||||||
else
|
else
|
||||||
@ -1,6 +1,6 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
require 'concurrent/atomic/ruby_thread_local_var'
|
require 'concurrent/atomic/ruby_thread_local_var'
|
||||||
require 'concurrent/atomic/java_thread_local_var'
|
require 'concurrent/atomic/java_thread_local_var'
|
||||||
require 'concurrent/utility/engine'
|
|
||||||
|
|
||||||
module Concurrent
|
module Concurrent
|
||||||
|
|
||||||
@ -1,6 +1,6 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
require 'concurrent/collection/java_non_concurrent_priority_queue'
|
require 'concurrent/collection/java_non_concurrent_priority_queue'
|
||||||
require 'concurrent/collection/ruby_non_concurrent_priority_queue'
|
require 'concurrent/collection/ruby_non_concurrent_priority_queue'
|
||||||
require 'concurrent/utility/engine'
|
|
||||||
|
|
||||||
module Concurrent
|
module Concurrent
|
||||||
module Collection
|
module Collection
|
||||||
@ -37,8 +37,8 @@ module Concurrent
|
|||||||
# returning data to the caller (dereferencing).
|
# returning data to the caller (dereferencing).
|
||||||
#
|
#
|
||||||
# @note Most classes that include this module will call `#set_deref_options`
|
# @note Most classes that include this module will call `#set_deref_options`
|
||||||
# from within the constructor, thus allowing these options to be set at
|
# from within the constructor, thus allowing these options to be set at
|
||||||
# object creation.
|
# object creation.
|
||||||
#
|
#
|
||||||
# @param [Hash] opts the options defining dereference behavior.
|
# @param [Hash] opts the options defining dereference behavior.
|
||||||
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
|
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
|
||||||
Binary file not shown.
@ -3,13 +3,14 @@ require 'concurrent/delay'
|
|||||||
require 'concurrent/errors'
|
require 'concurrent/errors'
|
||||||
require 'concurrent/atomic/atomic_reference'
|
require 'concurrent/atomic/atomic_reference'
|
||||||
require 'concurrent/concern/logging'
|
require 'concurrent/concern/logging'
|
||||||
|
require 'concurrent/concern/deprecation'
|
||||||
require 'concurrent/executor/immediate_executor'
|
require 'concurrent/executor/immediate_executor'
|
||||||
require 'concurrent/executor/cached_thread_pool'
|
require 'concurrent/executor/cached_thread_pool'
|
||||||
require 'concurrent/utility/at_exit'
|
|
||||||
require 'concurrent/utility/processor_counter'
|
require 'concurrent/utility/processor_counter'
|
||||||
|
|
||||||
module Concurrent
|
module Concurrent
|
||||||
extend Concern::Logging
|
extend Concern::Logging
|
||||||
|
extend Concern::Deprecation
|
||||||
|
|
||||||
autoload :Options, 'concurrent/options'
|
autoload :Options, 'concurrent/options'
|
||||||
autoload :TimerSet, 'concurrent/executor/timer_set'
|
autoload :TimerSet, 'concurrent/executor/timer_set'
|
||||||
@ -97,15 +98,15 @@ module Concurrent
|
|||||||
end
|
end
|
||||||
|
|
||||||
# @!visibility private
|
# @!visibility private
|
||||||
GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor(auto_terminate: true) }
|
GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor }
|
||||||
private_constant :GLOBAL_FAST_EXECUTOR
|
private_constant :GLOBAL_FAST_EXECUTOR
|
||||||
|
|
||||||
# @!visibility private
|
# @!visibility private
|
||||||
GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor(auto_terminate: true) }
|
GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor }
|
||||||
private_constant :GLOBAL_IO_EXECUTOR
|
private_constant :GLOBAL_IO_EXECUTOR
|
||||||
|
|
||||||
# @!visibility private
|
# @!visibility private
|
||||||
GLOBAL_TIMER_SET = Delay.new { TimerSet.new(auto_terminate: true) }
|
GLOBAL_TIMER_SET = Delay.new { TimerSet.new }
|
||||||
private_constant :GLOBAL_TIMER_SET
|
private_constant :GLOBAL_TIMER_SET
|
||||||
|
|
||||||
# @!visibility private
|
# @!visibility private
|
||||||
@ -115,7 +116,7 @@ module Concurrent
|
|||||||
# Disables AtExit handlers including pool auto-termination handlers.
|
# Disables AtExit handlers including pool auto-termination handlers.
|
||||||
# When disabled it will be the application programmer's responsibility
|
# When disabled it will be the application programmer's responsibility
|
||||||
# to ensure that the handlers are shutdown properly prior to application
|
# to ensure that the handlers are shutdown properly prior to application
|
||||||
# exit by calling {AtExit.run} method.
|
# exit by calling `AtExit.run` method.
|
||||||
#
|
#
|
||||||
# @note this option should be needed only because of `at_exit` ordering
|
# @note this option should be needed only because of `at_exit` ordering
|
||||||
# issues which may arise when running some of the testing frameworks.
|
# issues which may arise when running some of the testing frameworks.
|
||||||
@ -125,9 +126,10 @@ module Concurrent
|
|||||||
# @note This method should *never* be called
|
# @note This method should *never* be called
|
||||||
# from within a gem. It should *only* be used from within the main
|
# from within a gem. It should *only* be used from within the main
|
||||||
# application and even then it should be used only when necessary.
|
# application and even then it should be used only when necessary.
|
||||||
# @see AtExit
|
# @deprecated Has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841.
|
||||||
|
#
|
||||||
def self.disable_at_exit_handlers!
|
def self.disable_at_exit_handlers!
|
||||||
AtExit.enabled = false
|
deprecated "Method #disable_at_exit_handlers! has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841."
|
||||||
end
|
end
|
||||||
|
|
||||||
# Global thread pool optimized for short, fast *operations*.
|
# Global thread pool optimized for short, fast *operations*.
|
||||||
@ -171,14 +173,16 @@ module Concurrent
|
|||||||
auto_terminate: opts.fetch(:auto_terminate, true),
|
auto_terminate: opts.fetch(:auto_terminate, true),
|
||||||
idletime: 60, # 1 minute
|
idletime: 60, # 1 minute
|
||||||
max_queue: 0, # unlimited
|
max_queue: 0, # unlimited
|
||||||
fallback_policy: :abort # shouldn't matter -- 0 max queue
|
fallback_policy: :abort, # shouldn't matter -- 0 max queue
|
||||||
|
name: "fast"
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
def self.new_io_executor(opts = {})
|
def self.new_io_executor(opts = {})
|
||||||
CachedThreadPool.new(
|
CachedThreadPool.new(
|
||||||
auto_terminate: opts.fetch(:auto_terminate, true),
|
auto_terminate: opts.fetch(:auto_terminate, true),
|
||||||
fallback_policy: :abort # shouldn't matter -- 0 max queue
|
fallback_policy: :abort, # shouldn't matter -- 0 max queue
|
||||||
|
name: "io"
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -1,7 +1,7 @@
|
|||||||
require 'concurrent/errors'
|
require 'concurrent/errors'
|
||||||
|
require 'concurrent/concern/deprecation'
|
||||||
require 'concurrent/executor/executor_service'
|
require 'concurrent/executor/executor_service'
|
||||||
require 'concurrent/synchronization'
|
require 'concurrent/synchronization'
|
||||||
require 'concurrent/utility/at_exit'
|
|
||||||
|
|
||||||
module Concurrent
|
module Concurrent
|
||||||
|
|
||||||
@ -9,6 +9,7 @@ module Concurrent
|
|||||||
# @!visibility private
|
# @!visibility private
|
||||||
class AbstractExecutorService < Synchronization::LockableObject
|
class AbstractExecutorService < Synchronization::LockableObject
|
||||||
include ExecutorService
|
include ExecutorService
|
||||||
|
include Concern::Deprecation
|
||||||
|
|
||||||
# The set of possible fallback policies that may be set at thread pool creation.
|
# The set of possible fallback policies that may be set at thread pool creation.
|
||||||
FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze
|
FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze
|
||||||
@ -16,10 +17,20 @@ module Concurrent
|
|||||||
# @!macro executor_service_attr_reader_fallback_policy
|
# @!macro executor_service_attr_reader_fallback_policy
|
||||||
attr_reader :fallback_policy
|
attr_reader :fallback_policy
|
||||||
|
|
||||||
|
attr_reader :name
|
||||||
|
|
||||||
# Create a new thread pool.
|
# Create a new thread pool.
|
||||||
def initialize(*args, &block)
|
def initialize(opts = {}, &block)
|
||||||
super(&nil)
|
super(&nil)
|
||||||
synchronize { ns_initialize(*args, &block) }
|
synchronize do
|
||||||
|
@auto_terminate = opts.fetch(:auto_terminate, true)
|
||||||
|
@name = opts.fetch(:name) if opts.key?(:name)
|
||||||
|
ns_initialize(opts, &block)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def to_s
|
||||||
|
name ? "#{super[0..-2]} name: #{name}>" : super
|
||||||
end
|
end
|
||||||
|
|
||||||
# @!macro executor_service_method_shutdown
|
# @!macro executor_service_method_shutdown
|
||||||
@ -54,12 +65,12 @@ module Concurrent
|
|||||||
|
|
||||||
# @!macro executor_service_method_auto_terminate_question
|
# @!macro executor_service_method_auto_terminate_question
|
||||||
def auto_terminate?
|
def auto_terminate?
|
||||||
synchronize { ns_auto_terminate? }
|
synchronize { @auto_terminate }
|
||||||
end
|
end
|
||||||
|
|
||||||
# @!macro executor_service_method_auto_terminate_setter
|
# @!macro executor_service_method_auto_terminate_setter
|
||||||
def auto_terminate=(value)
|
def auto_terminate=(value)
|
||||||
synchronize { self.ns_auto_terminate = value }
|
deprecated "Method #auto_terminate= has no effect. Set :auto_terminate option when executor is initialized."
|
||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
@ -110,25 +121,8 @@ module Concurrent
|
|||||||
end
|
end
|
||||||
|
|
||||||
def ns_auto_terminate?
|
def ns_auto_terminate?
|
||||||
!!@auto_terminate
|
@auto_terminate
|
||||||
end
|
end
|
||||||
|
|
||||||
def ns_auto_terminate=(value)
|
|
||||||
case value
|
|
||||||
when true
|
|
||||||
AtExit.add(self) { terminate_at_exit }
|
|
||||||
@auto_terminate = true
|
|
||||||
when false
|
|
||||||
AtExit.delete(self)
|
|
||||||
@auto_terminate = false
|
|
||||||
else
|
|
||||||
raise ArgumentError
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def terminate_at_exit
|
|
||||||
kill # TODO be gentle first
|
|
||||||
wait_for_termination(10)
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -37,7 +37,7 @@ module Concurrent
|
|||||||
#
|
#
|
||||||
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool--
|
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool--
|
||||||
def initialize(opts = {})
|
def initialize(opts = {})
|
||||||
defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT }
|
defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT }
|
||||||
overrides = { min_threads: 0,
|
overrides = { min_threads: 0,
|
||||||
max_threads: DEFAULT_MAX_POOL_SIZE,
|
max_threads: DEFAULT_MAX_POOL_SIZE,
|
||||||
max_queue: DEFAULT_MAX_QUEUE_SIZE }
|
max_queue: DEFAULT_MAX_QUEUE_SIZE }
|
||||||
@ -51,11 +51,11 @@ module Concurrent
|
|||||||
def ns_initialize(opts)
|
def ns_initialize(opts)
|
||||||
super(opts)
|
super(opts)
|
||||||
if Concurrent.on_jruby?
|
if Concurrent.on_jruby?
|
||||||
@max_queue = 0
|
@max_queue = 0
|
||||||
@executor = java.util.concurrent.Executors.newCachedThreadPool
|
@executor = java.util.concurrent.Executors.newCachedThreadPool(
|
||||||
|
DaemonThreadFactory.new(ns_auto_terminate?))
|
||||||
@executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new)
|
@executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new)
|
||||||
@executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS)
|
@executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS)
|
||||||
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -111,10 +111,10 @@ module Concurrent
|
|||||||
|
|
||||||
# @!macro executor_service_method_auto_terminate_setter
|
# @!macro executor_service_method_auto_terminate_setter
|
||||||
#
|
#
|
||||||
|
#
|
||||||
# Set the auto-terminate behavior for this executor.
|
# Set the auto-terminate behavior for this executor.
|
||||||
#
|
# @deprecated Has no effect
|
||||||
# @param [Boolean] value The new auto-terminate value to set for this executor.
|
# @param [Boolean] value The new auto-terminate value to set for this executor.
|
||||||
#
|
|
||||||
# @return [Boolean] `true` when auto-termination is enabled else `false`.
|
# @return [Boolean] `true` when auto-termination is enabled else `false`.
|
||||||
|
|
||||||
###################################################################
|
###################################################################
|
||||||
@ -115,12 +115,13 @@ module Concurrent
|
|||||||
# Thread pools support several configuration options:
|
# Thread pools support several configuration options:
|
||||||
#
|
#
|
||||||
# * `idletime`: The number of seconds that a thread may be idle before being reclaimed.
|
# * `idletime`: The number of seconds that a thread may be idle before being reclaimed.
|
||||||
|
# * `name`: The name of the executor (optional). Printed in the executor's `#to_s` output and
|
||||||
|
# a `<name>-worker-<id>` name is given to its threads if supported by used Ruby
|
||||||
|
# implementation. `<id>` is uniq for each thread.
|
||||||
# * `max_queue`: The maximum number of tasks that may be waiting in the work queue at
|
# * `max_queue`: The maximum number of tasks that may be waiting in the work queue at
|
||||||
# any one time. When the queue size reaches `max_queue` and no new threads can be created,
|
# any one time. When the queue size reaches `max_queue` and no new threads can be created,
|
||||||
# subsequent tasks will be rejected in accordance with the configured `fallback_policy`.
|
# subsequent tasks will be rejected in accordance with the configured `fallback_policy`.
|
||||||
# * `auto_terminate`: When true (default) an `at_exit` handler will be registered which
|
# * `auto_terminate`: When true (default), the threads started will be marked as daemon.
|
||||||
# will stop the thread pool when the application exits. See below for more information
|
|
||||||
# on shutting down thread pools.
|
|
||||||
# * `fallback_policy`: The policy defining how rejected tasks are handled.
|
# * `fallback_policy`: The policy defining how rejected tasks are handled.
|
||||||
#
|
#
|
||||||
# Three fallback policies are supported:
|
# Three fallback policies are supported:
|
||||||
@ -145,16 +146,12 @@ module Concurrent
|
|||||||
#
|
#
|
||||||
# On some runtime platforms (most notably the JVM) the application will not
|
# On some runtime platforms (most notably the JVM) the application will not
|
||||||
# exit until all thread pools have been shutdown. To prevent applications from
|
# exit until all thread pools have been shutdown. To prevent applications from
|
||||||
# "hanging" on exit all thread pools include an `at_exit` handler that will
|
# "hanging" on exit, all threads can be marked as daemon according to the
|
||||||
# stop the thread pool when the application exits. This handler uses a brute
|
# `:auto_terminate` option.
|
||||||
# force method to stop the pool and makes no guarantees regarding resources being
|
|
||||||
# used by any tasks still running. Registration of this `at_exit` handler can be
|
|
||||||
# prevented by setting the thread pool's constructor `:auto_terminate` option to
|
|
||||||
# `false` when the thread pool is created. All thread pools support this option.
|
|
||||||
#
|
#
|
||||||
# ```ruby
|
# ```ruby
|
||||||
# pool1 = Concurrent::FixedThreadPool.new(5) # an `at_exit` handler will be registered
|
# pool1 = Concurrent::FixedThreadPool.new(5) # threads will be marked as daemon
|
||||||
# pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # prevent `at_exit` handler registration
|
# pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # mark threads as non-daemon
|
||||||
# ```
|
# ```
|
||||||
#
|
#
|
||||||
# @note Failure to properly shutdown a thread pool can lead to unpredictable results.
|
# @note Failure to properly shutdown a thread pool can lead to unpredictable results.
|
||||||
@ -163,7 +160,7 @@ module Concurrent
|
|||||||
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools
|
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools
|
||||||
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class
|
||||||
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface
|
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface
|
||||||
# @see http://ruby-doc.org//core-2.2.0/Kernel.html#method-i-at_exit Kernel#at_exit
|
# @see https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html#setDaemon-boolean-
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -18,10 +18,6 @@ if Concurrent.on_jruby?
|
|||||||
}.freeze
|
}.freeze
|
||||||
private_constant :FALLBACK_POLICY_CLASSES
|
private_constant :FALLBACK_POLICY_CLASSES
|
||||||
|
|
||||||
def initialize(*args, &block)
|
|
||||||
super
|
|
||||||
end
|
|
||||||
|
|
||||||
def post(*args, &task)
|
def post(*args, &task)
|
||||||
raise ArgumentError.new('no block given') unless block_given?
|
raise ArgumentError.new('no block given') unless block_given?
|
||||||
return handle_fallback(*args, &task) unless running?
|
return handle_fallback(*args, &task) unless running?
|
||||||
@ -42,7 +38,6 @@ if Concurrent.on_jruby?
|
|||||||
|
|
||||||
def shutdown
|
def shutdown
|
||||||
synchronize do
|
synchronize do
|
||||||
self.ns_auto_terminate = false
|
|
||||||
@executor.shutdown
|
@executor.shutdown
|
||||||
nil
|
nil
|
||||||
end
|
end
|
||||||
@ -50,7 +45,6 @@ if Concurrent.on_jruby?
|
|||||||
|
|
||||||
def kill
|
def kill
|
||||||
synchronize do
|
synchronize do
|
||||||
self.ns_auto_terminate = false
|
|
||||||
@executor.shutdownNow
|
@executor.shutdownNow
|
||||||
nil
|
nil
|
||||||
end
|
end
|
||||||
@ -87,5 +81,23 @@ if Concurrent.on_jruby?
|
|||||||
end
|
end
|
||||||
private_constant :Job
|
private_constant :Job
|
||||||
end
|
end
|
||||||
|
|
||||||
|
class DaemonThreadFactory
|
||||||
|
# hide include from YARD
|
||||||
|
send :include, java.util.concurrent.ThreadFactory
|
||||||
|
|
||||||
|
def initialize(daemonize = true)
|
||||||
|
@daemonize = daemonize
|
||||||
|
end
|
||||||
|
|
||||||
|
def newThread(runnable)
|
||||||
|
thread = java.util.concurrent.Executors.defaultThreadFactory().newThread(runnable)
|
||||||
|
thread.setDaemon(@daemonize)
|
||||||
|
return thread
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
private_constant :DaemonThreadFactory
|
||||||
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -17,12 +17,13 @@ if Concurrent.on_jruby?
|
|||||||
end
|
end
|
||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def ns_initialize(opts)
|
def ns_initialize(opts)
|
||||||
@executor = java.util.concurrent.Executors.newSingleThreadExecutor
|
@executor = java.util.concurrent.Executors.newSingleThreadExecutor(
|
||||||
|
DaemonThreadFactory.new(ns_auto_terminate?)
|
||||||
|
)
|
||||||
@fallback_policy = opts.fetch(:fallback_policy, :discard)
|
@fallback_policy = opts.fetch(:fallback_policy, :discard)
|
||||||
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy)
|
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy)
|
||||||
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -114,10 +114,11 @@ if Concurrent.on_jruby?
|
|||||||
idletime,
|
idletime,
|
||||||
java.util.concurrent.TimeUnit::SECONDS,
|
java.util.concurrent.TimeUnit::SECONDS,
|
||||||
queue,
|
queue,
|
||||||
|
DaemonThreadFactory.new(ns_auto_terminate?),
|
||||||
FALLBACK_POLICY_CLASSES[@fallback_policy].new)
|
FALLBACK_POLICY_CLASSES[@fallback_policy].new)
|
||||||
|
|
||||||
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -27,7 +27,6 @@ module Concurrent
|
|||||||
def shutdown
|
def shutdown
|
||||||
synchronize do
|
synchronize do
|
||||||
break unless running?
|
break unless running?
|
||||||
self.ns_auto_terminate = false
|
|
||||||
stop_event.set
|
stop_event.set
|
||||||
ns_shutdown_execution
|
ns_shutdown_execution
|
||||||
end
|
end
|
||||||
@ -37,7 +36,6 @@ module Concurrent
|
|||||||
def kill
|
def kill
|
||||||
synchronize do
|
synchronize do
|
||||||
break if shutdown?
|
break if shutdown?
|
||||||
self.ns_auto_terminate = false
|
|
||||||
stop_event.set
|
stop_event.set
|
||||||
ns_kill_execution
|
ns_kill_execution
|
||||||
stopped_event.set
|
stopped_event.set
|
||||||
@ -15,7 +15,6 @@ module Concurrent
|
|||||||
max_queue: 0,
|
max_queue: 0,
|
||||||
idletime: DEFAULT_THREAD_IDLETIMEOUT,
|
idletime: DEFAULT_THREAD_IDLETIMEOUT,
|
||||||
fallback_policy: opts.fetch(:fallback_policy, :discard),
|
fallback_policy: opts.fetch(:fallback_policy, :discard),
|
||||||
auto_terminate: opts.fetch(:auto_terminate, true)
|
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -122,8 +122,6 @@ module Concurrent
|
|||||||
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE
|
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE
|
||||||
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
|
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
|
||||||
|
|
||||||
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
|
||||||
|
|
||||||
@pool = [] # all workers
|
@pool = [] # all workers
|
||||||
@ready = [] # used as a stash (most idle worker is at the start)
|
@ready = [] # used as a stash (most idle worker is at the start)
|
||||||
@queue = [] # used as queue
|
@queue = [] # used as queue
|
||||||
@ -131,6 +129,7 @@ module Concurrent
|
|||||||
@scheduled_task_count = 0
|
@scheduled_task_count = 0
|
||||||
@completed_task_count = 0
|
@completed_task_count = 0
|
||||||
@largest_length = 0
|
@largest_length = 0
|
||||||
|
@workers_counter = 0
|
||||||
@ruby_pid = $$ # detects if Ruby has forked
|
@ruby_pid = $$ # detects if Ruby has forked
|
||||||
|
|
||||||
@gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented
|
@gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented
|
||||||
@ -224,7 +223,8 @@ module Concurrent
|
|||||||
def ns_add_busy_worker
|
def ns_add_busy_worker
|
||||||
return if @pool.size >= @max_length
|
return if @pool.size >= @max_length
|
||||||
|
|
||||||
@pool << (worker = Worker.new(self))
|
@workers_counter += 1
|
||||||
|
@pool << (worker = Worker.new(self, @workers_counter))
|
||||||
@largest_length = @pool.length if @pool.length > @largest_length
|
@largest_length = @pool.length if @pool.length > @largest_length
|
||||||
worker
|
worker
|
||||||
end
|
end
|
||||||
@ -284,6 +284,7 @@ module Concurrent
|
|||||||
@scheduled_task_count = 0
|
@scheduled_task_count = 0
|
||||||
@completed_task_count = 0
|
@completed_task_count = 0
|
||||||
@largest_length = 0
|
@largest_length = 0
|
||||||
|
@workers_counter = 0
|
||||||
@ruby_pid = $$
|
@ruby_pid = $$
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@ -292,11 +293,15 @@ module Concurrent
|
|||||||
class Worker
|
class Worker
|
||||||
include Concern::Logging
|
include Concern::Logging
|
||||||
|
|
||||||
def initialize(pool)
|
def initialize(pool, id)
|
||||||
# instance variables accessed only under pool's lock so no need to sync here again
|
# instance variables accessed only under pool's lock so no need to sync here again
|
||||||
@queue = Queue.new
|
@queue = Queue.new
|
||||||
@pool = pool
|
@pool = pool
|
||||||
@thread = create_worker @queue, pool, pool.idletime
|
@thread = create_worker @queue, pool, pool.idletime
|
||||||
|
|
||||||
|
if @thread.respond_to?(:name=)
|
||||||
|
@thread.name = [pool.name, 'worker', id].compact.join('-')
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def <<(message)
|
def <<(message)
|
||||||
@ -91,7 +91,7 @@ module Concurrent
|
|||||||
|
|
||||||
private
|
private
|
||||||
|
|
||||||
def ns_initialize
|
def ns_initialize(*args)
|
||||||
@running = Concurrent::AtomicBoolean.new(true)
|
@running = Concurrent::AtomicBoolean.new(true)
|
||||||
@stopped = Concurrent::Event.new
|
@stopped = Concurrent::Event.new
|
||||||
@count = Concurrent::AtomicFixnum.new(0)
|
@count = Concurrent::AtomicFixnum.new(0)
|
||||||
@ -1,3 +1,4 @@
|
|||||||
|
require 'concurrent/utility/engine'
|
||||||
require 'concurrent/executor/ruby_single_thread_executor'
|
require 'concurrent/executor/ruby_single_thread_executor'
|
||||||
|
|
||||||
module Concurrent
|
module Concurrent
|
||||||
@ -77,7 +77,6 @@ module Concurrent
|
|||||||
@timer_executor = SingleThreadExecutor.new
|
@timer_executor = SingleThreadExecutor.new
|
||||||
@condition = Event.new
|
@condition = Event.new
|
||||||
@ruby_pid = $$ # detects if Ruby has forked
|
@ruby_pid = $$ # detects if Ruby has forked
|
||||||
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
# Post the task to the internal queue.
|
# Post the task to the internal queue.
|
||||||
@ -70,6 +70,14 @@ module Concurrent
|
|||||||
ns_select(&block)
|
ns_select(&block)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def initialize_copy(original)
|
||||||
|
super(original)
|
||||||
|
ns_initialize_copy
|
||||||
|
end
|
||||||
|
|
||||||
# @!macro struct_new
|
# @!macro struct_new
|
||||||
def self.new(*args, &block)
|
def self.new(*args, &block)
|
||||||
clazz_name = nil
|
clazz_name = nil
|
||||||
@ -197,7 +197,7 @@ module Concurrent
|
|||||||
# Insert value into map with key if key is absent in one atomic step.
|
# Insert value into map with key if key is absent in one atomic step.
|
||||||
# @param [Object] key
|
# @param [Object] key
|
||||||
# @param [Object] value
|
# @param [Object] value
|
||||||
# @return [Object, nil] the value or nil when key was present
|
# @return [Object, nil] the previous value when key was present or nil when there was no key
|
||||||
def put_if_absent(key, value)
|
def put_if_absent(key, value)
|
||||||
computed = false
|
computed = false
|
||||||
result = compute_if_absent(key) do
|
result = compute_if_absent(key) do
|
||||||
@ -196,6 +196,16 @@ module Concurrent
|
|||||||
raise NameError.new("no member '#{member}' in struct")
|
raise NameError.new("no member '#{member}' in struct")
|
||||||
end
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def initialize_copy(original)
|
||||||
|
synchronize do
|
||||||
|
super(original)
|
||||||
|
ns_initialize_copy
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
# @!macro struct_new
|
# @!macro struct_new
|
||||||
def self.new(*args, &block)
|
def self.new(*args, &block)
|
||||||
clazz_name = nil
|
clazz_name = nil
|
||||||
@ -11,11 +11,11 @@ module Concurrent
|
|||||||
# or writing at a time. This includes iteration methods like `#each`.
|
# or writing at a time. This includes iteration methods like `#each`.
|
||||||
#
|
#
|
||||||
# @note `a += b` is **not** a **thread-safe** operation on
|
# @note `a += b` is **not** a **thread-safe** operation on
|
||||||
# `Concurrent::Set`. It reads Set `a`, then it creates new `Concurrent::Set`
|
# `Concurrent::Set`. It reads Set `a`, then it creates new `Concurrent::Set`
|
||||||
# which is union of `a` and `b`, then it writes the union to `a`.
|
# which is union of `a` and `b`, then it writes the union to `a`.
|
||||||
# The read and write are independent operations they do not form a single atomic
|
# The read and write are independent operations they do not form a single atomic
|
||||||
# operation therefore when two `+=` operations are executed concurrently updates
|
# operation therefore when two `+=` operations are executed concurrently updates
|
||||||
# may be lost. Use `#merge` instead.
|
# may be lost. Use `#merge` instead.
|
||||||
#
|
#
|
||||||
# @see http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html Ruby standard library `Set`
|
# @see http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html Ruby standard library `Set`
|
||||||
|
|
||||||
@ -91,6 +91,16 @@ module Concurrent
|
|||||||
raise NameError.new("no member '#{member}' in struct")
|
raise NameError.new("no member '#{member}' in struct")
|
||||||
end
|
end
|
||||||
|
|
||||||
|
private
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def initialize_copy(original)
|
||||||
|
synchronize do
|
||||||
|
super(original)
|
||||||
|
ns_initialize_copy
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
# @!macro struct_new
|
# @!macro struct_new
|
||||||
def self.new(*args, &block)
|
def self.new(*args, &block)
|
||||||
clazz_name = nil
|
clazz_name = nil
|
||||||
@ -115,6 +115,17 @@ module Concurrent
|
|||||||
self.class.new(*self.to_h.merge(other, &block).values)
|
self.class.new(*self.to_h.merge(other, &block).values)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# @!visibility private
|
||||||
|
def ns_initialize_copy
|
||||||
|
@values = @values.map do |val|
|
||||||
|
begin
|
||||||
|
val.clone
|
||||||
|
rescue TypeError
|
||||||
|
val
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
# @!visibility private
|
# @!visibility private
|
||||||
def pr_underscore(clazz)
|
def pr_underscore(clazz)
|
||||||
word = clazz.to_s.dup # dup string to workaround JRuby 9.2.0.0 bug https://github.com/jruby/jruby/issues/5229
|
word = clazz.to_s.dup # dup string to workaround JRuby 9.2.0.0 bug https://github.com/jruby/jruby/issues/5229
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user