Merge pull request #7022 from Homebrew/dependabot/bundler/Library/Homebrew/concurrent-ruby-1.1.6
build: bump concurrent-ruby from 1.1.5 to 1.1.6 in /Library/Homebrew
This commit is contained in:
commit
708010e6fd
@ -26,6 +26,10 @@ RSpec/LeakyConstantDeclaration:
|
||||
Enabled: false
|
||||
RSpec/MessageSpies:
|
||||
Enabled: false
|
||||
RSpec/RepeatedDescription:
|
||||
Enabled: false
|
||||
RSpec/RepeatedExampleGroupDescription:
|
||||
Enabled: false
|
||||
|
||||
# TODO: try to reduce these (also requires fixing Homebrew/bundle)
|
||||
RSpec/ExampleLength:
|
||||
|
||||
@ -8,7 +8,7 @@ GEM
|
||||
tzinfo (~> 1.1)
|
||||
zeitwerk (~> 2.2)
|
||||
ast (2.4.0)
|
||||
concurrent-ruby (1.1.5)
|
||||
concurrent-ruby (1.1.6)
|
||||
connection_pool (2.2.2)
|
||||
coveralls (0.8.23)
|
||||
json (>= 1.8, < 3)
|
||||
@ -89,7 +89,7 @@ GEM
|
||||
unicode-display_width (>= 1.4.0, < 1.7)
|
||||
rubocop-performance (1.5.2)
|
||||
rubocop (>= 0.71.0)
|
||||
rubocop-rspec (1.37.1)
|
||||
rubocop-rspec (1.38.0)
|
||||
rubocop (>= 0.68.1)
|
||||
ruby-macho (2.2.0)
|
||||
ruby-progressbar (1.10.1)
|
||||
|
||||
12
Library/Homebrew/vendor/bundle/bundler/setup.rb
vendored
12
Library/Homebrew/vendor/bundle/bundler/setup.rb
vendored
@ -3,9 +3,9 @@ require 'rbconfig'
|
||||
ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
|
||||
ruby_version = RbConfig::CONFIG["ruby_version"]
|
||||
path = File.expand_path('..', __FILE__)
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.5/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.6/lib/concurrent-ruby"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/i18n-1.8.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/minitest-5.13.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/minitest-5.14.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/thread_safe-0.3.6/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tzinfo-1.2.6/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/zeitwerk-2.2.2/lib"
|
||||
@ -19,7 +19,7 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/docile-1.3.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/simplecov-html-0.10.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/simplecov-0.16.1/lib"
|
||||
$:.unshift "#{path}/../../../../../../../../Library/Ruby/Gems/2.6.0/gems/sync-0.5.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tins-1.24.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tins-1.24.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/term-ansicolor-1.7.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/thor-1.0.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/coveralls-0.8.23/lib"
|
||||
@ -45,7 +45,7 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/webrobots-0.1.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/mechanize-2.7.6/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/mustache-1.1.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parallel-1.19.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parallel_tests-2.30.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parallel_tests-2.31.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parser-2.7.0.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/plist-3.5.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rainbow-3.0.0/lib"
|
||||
@ -61,8 +61,8 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rspec-its-1.3.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rspec-retry-0.6.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rspec-wait-0.0.9/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-progressbar-1.10.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/unicode-display_width-1.6.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/unicode-display_width-1.6.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-0.79.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-performance-1.5.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-rspec-1.37.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-rspec-1.38.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-macho-2.2.0/lib"
|
||||
|
||||
@ -1,97 +0,0 @@
|
||||
require 'logger'
|
||||
require 'concurrent/synchronization'
|
||||
|
||||
module Concurrent
|
||||
|
||||
# Provides ability to add and remove handlers to be run at `Kernel#at_exit`, order is undefined.
|
||||
# Each handler is executed at most once.
|
||||
#
|
||||
# @!visibility private
|
||||
class AtExitImplementation < Synchronization::LockableObject
|
||||
include Logger::Severity
|
||||
|
||||
def initialize(*args)
|
||||
super()
|
||||
synchronize { ns_initialize(*args) }
|
||||
end
|
||||
|
||||
# Add a handler to be run at `Kernel#at_exit`
|
||||
# @param [Object] handler_id optionally provide an id, if already present, handler is replaced
|
||||
# @yield the handler
|
||||
# @return id of the handler
|
||||
def add(handler_id = nil, &handler)
|
||||
id = handler_id || handler.object_id
|
||||
synchronize { @handlers[id] = handler }
|
||||
id
|
||||
end
|
||||
|
||||
# Delete a handler by handler_id
|
||||
# @return [true, false]
|
||||
def delete(handler_id)
|
||||
!!synchronize { @handlers.delete handler_id }
|
||||
end
|
||||
|
||||
# Is handler with handler_id rpesent?
|
||||
# @return [true, false]
|
||||
def handler?(handler_id)
|
||||
synchronize { @handlers.key? handler_id }
|
||||
end
|
||||
|
||||
# @return copy of the handlers
|
||||
def handlers
|
||||
synchronize { @handlers }.clone
|
||||
end
|
||||
|
||||
# install `Kernel#at_exit` callback to execute added handlers
|
||||
def install
|
||||
synchronize do
|
||||
@installed ||= begin
|
||||
at_exit { runner }
|
||||
true
|
||||
end
|
||||
self
|
||||
end
|
||||
end
|
||||
|
||||
# Will it run during `Kernel#at_exit`
|
||||
def enabled?
|
||||
synchronize { @enabled }
|
||||
end
|
||||
|
||||
# Configure if it runs during `Kernel#at_exit`
|
||||
def enabled=(value)
|
||||
synchronize { @enabled = value }
|
||||
end
|
||||
|
||||
# run the handlers manually
|
||||
# @return ids of the handlers
|
||||
def run
|
||||
handlers, _ = synchronize { handlers, @handlers = @handlers, {} }
|
||||
handlers.each do |_, handler|
|
||||
begin
|
||||
handler.call
|
||||
rescue => error
|
||||
Concurrent.global_logger.call(ERROR, error)
|
||||
end
|
||||
end
|
||||
handlers.keys
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def ns_initialize(enabled = true)
|
||||
@handlers = {}
|
||||
@enabled = enabled
|
||||
end
|
||||
|
||||
def runner
|
||||
run if synchronize { @enabled }
|
||||
end
|
||||
end
|
||||
|
||||
private_constant :AtExitImplementation
|
||||
|
||||
# @see AtExitImplementation
|
||||
# @!visibility private
|
||||
AtExit = AtExitImplementation.new.install
|
||||
end
|
||||
@ -1,3 +0,0 @@
|
||||
module Concurrent
|
||||
VERSION = '1.1.5'
|
||||
end
|
||||
@ -18,7 +18,7 @@ require 'concurrent/synchronization'
|
||||
# uncoordinated, *synchronous* change of individual values. Best used when
|
||||
# the value will undergo frequent reads but only occasional, though complex,
|
||||
# updates. Suitable when the result of an update must be known immediately.
|
||||
# * *{Concurrent::AtomicReference}:* A simple object reference that can be
|
||||
# * *{Concurrent::AtomicReference}:* A simple object reference that can be updated
|
||||
# atomically. Updates are synchronous but fast. Best used when updates a
|
||||
# simple set operations. Not suitable when updates are complex.
|
||||
# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar
|
||||
@ -41,13 +41,13 @@ module Concurrent
|
||||
#
|
||||
# Explicitly sets the value to true.
|
||||
#
|
||||
# @return [Boolean] true is value has changed, otherwise false
|
||||
# @return [Boolean] true if value has changed, otherwise false
|
||||
|
||||
# @!macro atomic_boolean_method_make_false
|
||||
#
|
||||
# Explicitly sets the value to false.
|
||||
#
|
||||
# @return [Boolean] true is value has changed, otherwise false
|
||||
# @return [Boolean] true if value has changed, otherwise false
|
||||
|
||||
###################################################################
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
require 'concurrent/utility/engine'
|
||||
require 'concurrent/atomic/mutex_count_down_latch'
|
||||
require 'concurrent/atomic/java_count_down_latch'
|
||||
require 'concurrent/utility/engine'
|
||||
|
||||
module Concurrent
|
||||
|
||||
@ -32,12 +32,38 @@ module Concurrent
|
||||
FREE = []
|
||||
LOCK = Mutex.new
|
||||
ARRAYS = {} # used as a hash set
|
||||
# noinspection RubyClassVariableUsageInspection
|
||||
@@next = 0
|
||||
private_constant :FREE, :LOCK, :ARRAYS
|
||||
QUEUE = Queue.new
|
||||
THREAD = Thread.new do
|
||||
while true
|
||||
method, i = QUEUE.pop
|
||||
case method
|
||||
when :thread_local_finalizer
|
||||
LOCK.synchronize do
|
||||
FREE.push(i)
|
||||
# The cost of GC'ing a TLV is linear in the number of threads using TLVs
|
||||
# But that is natural! More threads means more storage is used per TLV
|
||||
# So naturally more CPU time is required to free more storage
|
||||
ARRAYS.each_value do |array|
|
||||
array[i] = nil
|
||||
end
|
||||
end
|
||||
when :thread_finalizer
|
||||
LOCK.synchronize do
|
||||
# The thread which used this thread-local array is now gone
|
||||
# So don't hold onto a reference to the array (thus blocking GC)
|
||||
ARRAYS.delete(i)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private_constant :FREE, :LOCK, :ARRAYS, :QUEUE, :THREAD
|
||||
|
||||
# @!macro thread_local_var_method_get
|
||||
def value
|
||||
if array = get_threadlocal_array
|
||||
if (array = get_threadlocal_array)
|
||||
value = array[@index]
|
||||
if value.nil?
|
||||
default
|
||||
@ -57,10 +83,10 @@ module Concurrent
|
||||
# We could keep the thread-local arrays in a hash, keyed by Thread
|
||||
# But why? That would require locking
|
||||
# Using Ruby's built-in thread-local storage is faster
|
||||
unless array = get_threadlocal_array(me)
|
||||
unless (array = get_threadlocal_array(me))
|
||||
array = set_threadlocal_array([], me)
|
||||
LOCK.synchronize { ARRAYS[array.object_id] = array }
|
||||
ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array))
|
||||
ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array.object_id))
|
||||
end
|
||||
array[@index] = (value.nil? ? NULL : value)
|
||||
value
|
||||
@ -69,6 +95,7 @@ module Concurrent
|
||||
protected
|
||||
|
||||
# @!visibility private
|
||||
# noinspection RubyClassVariableUsageInspection
|
||||
def allocate_storage
|
||||
@index = LOCK.synchronize do
|
||||
FREE.pop || begin
|
||||
@ -77,37 +104,19 @@ module Concurrent
|
||||
result
|
||||
end
|
||||
end
|
||||
ObjectSpace.define_finalizer(self, self.class.threadlocal_finalizer(@index))
|
||||
ObjectSpace.define_finalizer(self, self.class.thread_local_finalizer(@index))
|
||||
end
|
||||
|
||||
# @!visibility private
|
||||
def self.threadlocal_finalizer(index)
|
||||
proc do
|
||||
Thread.new do # avoid error: can't be called from trap context
|
||||
LOCK.synchronize do
|
||||
FREE.push(index)
|
||||
# The cost of GC'ing a TLV is linear in the number of threads using TLVs
|
||||
# But that is natural! More threads means more storage is used per TLV
|
||||
# So naturally more CPU time is required to free more storage
|
||||
ARRAYS.each_value do |array|
|
||||
array[index] = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
def self.thread_local_finalizer(index)
|
||||
# avoid error: can't be called from trap context
|
||||
proc { QUEUE.push [:thread_local_finalizer, index] }
|
||||
end
|
||||
|
||||
# @!visibility private
|
||||
def self.thread_finalizer(array)
|
||||
proc do
|
||||
Thread.new do # avoid error: can't be called from trap context
|
||||
LOCK.synchronize do
|
||||
# The thread which used this thread-local array is now gone
|
||||
# So don't hold onto a reference to the array (thus blocking GC)
|
||||
ARRAYS.delete(array.object_id)
|
||||
end
|
||||
end
|
||||
end
|
||||
def self.thread_finalizer(id)
|
||||
# avoid error: can't be called from trap context
|
||||
proc { QUEUE.push [:thread_finalizer, id] }
|
||||
end
|
||||
|
||||
private
|
||||
@ -136,21 +145,22 @@ module Concurrent
|
||||
# This exists only for use in testing
|
||||
# @!visibility private
|
||||
def value_for(thread)
|
||||
if array = get_threadlocal_array(thread)
|
||||
if (array = get_threadlocal_array(thread))
|
||||
value = array[@index]
|
||||
if value.nil?
|
||||
default_for(thread)
|
||||
get_default
|
||||
elsif value.equal?(NULL)
|
||||
nil
|
||||
else
|
||||
value
|
||||
end
|
||||
else
|
||||
default_for(thread)
|
||||
get_default
|
||||
end
|
||||
end
|
||||
|
||||
def default_for(thread)
|
||||
# @!visibility private
|
||||
def get_default
|
||||
if @default_block
|
||||
raise "Cannot use default_for with default block"
|
||||
else
|
||||
@ -1,6 +1,6 @@
|
||||
require 'concurrent/utility/engine'
|
||||
require 'concurrent/atomic/ruby_thread_local_var'
|
||||
require 'concurrent/atomic/java_thread_local_var'
|
||||
require 'concurrent/utility/engine'
|
||||
|
||||
module Concurrent
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
require 'concurrent/utility/engine'
|
||||
require 'concurrent/collection/java_non_concurrent_priority_queue'
|
||||
require 'concurrent/collection/ruby_non_concurrent_priority_queue'
|
||||
require 'concurrent/utility/engine'
|
||||
|
||||
module Concurrent
|
||||
module Collection
|
||||
Binary file not shown.
@ -3,13 +3,14 @@ require 'concurrent/delay'
|
||||
require 'concurrent/errors'
|
||||
require 'concurrent/atomic/atomic_reference'
|
||||
require 'concurrent/concern/logging'
|
||||
require 'concurrent/concern/deprecation'
|
||||
require 'concurrent/executor/immediate_executor'
|
||||
require 'concurrent/executor/cached_thread_pool'
|
||||
require 'concurrent/utility/at_exit'
|
||||
require 'concurrent/utility/processor_counter'
|
||||
|
||||
module Concurrent
|
||||
extend Concern::Logging
|
||||
extend Concern::Deprecation
|
||||
|
||||
autoload :Options, 'concurrent/options'
|
||||
autoload :TimerSet, 'concurrent/executor/timer_set'
|
||||
@ -97,15 +98,15 @@ module Concurrent
|
||||
end
|
||||
|
||||
# @!visibility private
|
||||
GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor(auto_terminate: true) }
|
||||
GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor }
|
||||
private_constant :GLOBAL_FAST_EXECUTOR
|
||||
|
||||
# @!visibility private
|
||||
GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor(auto_terminate: true) }
|
||||
GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor }
|
||||
private_constant :GLOBAL_IO_EXECUTOR
|
||||
|
||||
# @!visibility private
|
||||
GLOBAL_TIMER_SET = Delay.new { TimerSet.new(auto_terminate: true) }
|
||||
GLOBAL_TIMER_SET = Delay.new { TimerSet.new }
|
||||
private_constant :GLOBAL_TIMER_SET
|
||||
|
||||
# @!visibility private
|
||||
@ -115,7 +116,7 @@ module Concurrent
|
||||
# Disables AtExit handlers including pool auto-termination handlers.
|
||||
# When disabled it will be the application programmer's responsibility
|
||||
# to ensure that the handlers are shutdown properly prior to application
|
||||
# exit by calling {AtExit.run} method.
|
||||
# exit by calling `AtExit.run` method.
|
||||
#
|
||||
# @note this option should be needed only because of `at_exit` ordering
|
||||
# issues which may arise when running some of the testing frameworks.
|
||||
@ -125,9 +126,10 @@ module Concurrent
|
||||
# @note This method should *never* be called
|
||||
# from within a gem. It should *only* be used from within the main
|
||||
# application and even then it should be used only when necessary.
|
||||
# @see AtExit
|
||||
# @deprecated Has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841.
|
||||
#
|
||||
def self.disable_at_exit_handlers!
|
||||
AtExit.enabled = false
|
||||
deprecated "Method #disable_at_exit_handlers! has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841."
|
||||
end
|
||||
|
||||
# Global thread pool optimized for short, fast *operations*.
|
||||
@ -171,14 +173,16 @@ module Concurrent
|
||||
auto_terminate: opts.fetch(:auto_terminate, true),
|
||||
idletime: 60, # 1 minute
|
||||
max_queue: 0, # unlimited
|
||||
fallback_policy: :abort # shouldn't matter -- 0 max queue
|
||||
fallback_policy: :abort, # shouldn't matter -- 0 max queue
|
||||
name: "fast"
|
||||
)
|
||||
end
|
||||
|
||||
def self.new_io_executor(opts = {})
|
||||
CachedThreadPool.new(
|
||||
auto_terminate: opts.fetch(:auto_terminate, true),
|
||||
fallback_policy: :abort # shouldn't matter -- 0 max queue
|
||||
fallback_policy: :abort, # shouldn't matter -- 0 max queue
|
||||
name: "io"
|
||||
)
|
||||
end
|
||||
end
|
||||
@ -1,7 +1,7 @@
|
||||
require 'concurrent/errors'
|
||||
require 'concurrent/concern/deprecation'
|
||||
require 'concurrent/executor/executor_service'
|
||||
require 'concurrent/synchronization'
|
||||
require 'concurrent/utility/at_exit'
|
||||
|
||||
module Concurrent
|
||||
|
||||
@ -9,6 +9,7 @@ module Concurrent
|
||||
# @!visibility private
|
||||
class AbstractExecutorService < Synchronization::LockableObject
|
||||
include ExecutorService
|
||||
include Concern::Deprecation
|
||||
|
||||
# The set of possible fallback policies that may be set at thread pool creation.
|
||||
FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze
|
||||
@ -16,10 +17,20 @@ module Concurrent
|
||||
# @!macro executor_service_attr_reader_fallback_policy
|
||||
attr_reader :fallback_policy
|
||||
|
||||
attr_reader :name
|
||||
|
||||
# Create a new thread pool.
|
||||
def initialize(*args, &block)
|
||||
def initialize(opts = {}, &block)
|
||||
super(&nil)
|
||||
synchronize { ns_initialize(*args, &block) }
|
||||
synchronize do
|
||||
@auto_terminate = opts.fetch(:auto_terminate, true)
|
||||
@name = opts.fetch(:name) if opts.key?(:name)
|
||||
ns_initialize(opts, &block)
|
||||
end
|
||||
end
|
||||
|
||||
def to_s
|
||||
name ? "#{super[0..-2]} name: #{name}>" : super
|
||||
end
|
||||
|
||||
# @!macro executor_service_method_shutdown
|
||||
@ -54,12 +65,12 @@ module Concurrent
|
||||
|
||||
# @!macro executor_service_method_auto_terminate_question
|
||||
def auto_terminate?
|
||||
synchronize { ns_auto_terminate? }
|
||||
synchronize { @auto_terminate }
|
||||
end
|
||||
|
||||
# @!macro executor_service_method_auto_terminate_setter
|
||||
def auto_terminate=(value)
|
||||
synchronize { self.ns_auto_terminate = value }
|
||||
deprecated "Method #auto_terminate= has no effect. Set :auto_terminate option when executor is initialized."
|
||||
end
|
||||
|
||||
private
|
||||
@ -110,25 +121,8 @@ module Concurrent
|
||||
end
|
||||
|
||||
def ns_auto_terminate?
|
||||
!!@auto_terminate
|
||||
@auto_terminate
|
||||
end
|
||||
|
||||
def ns_auto_terminate=(value)
|
||||
case value
|
||||
when true
|
||||
AtExit.add(self) { terminate_at_exit }
|
||||
@auto_terminate = true
|
||||
when false
|
||||
AtExit.delete(self)
|
||||
@auto_terminate = false
|
||||
else
|
||||
raise ArgumentError
|
||||
end
|
||||
end
|
||||
|
||||
def terminate_at_exit
|
||||
kill # TODO be gentle first
|
||||
wait_for_termination(10)
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -52,10 +52,10 @@ module Concurrent
|
||||
super(opts)
|
||||
if Concurrent.on_jruby?
|
||||
@max_queue = 0
|
||||
@executor = java.util.concurrent.Executors.newCachedThreadPool
|
||||
@executor = java.util.concurrent.Executors.newCachedThreadPool(
|
||||
DaemonThreadFactory.new(ns_auto_terminate?))
|
||||
@executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new)
|
||||
@executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS)
|
||||
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -111,10 +111,10 @@ module Concurrent
|
||||
|
||||
# @!macro executor_service_method_auto_terminate_setter
|
||||
#
|
||||
#
|
||||
# Set the auto-terminate behavior for this executor.
|
||||
#
|
||||
# @deprecated Has no effect
|
||||
# @param [Boolean] value The new auto-terminate value to set for this executor.
|
||||
#
|
||||
# @return [Boolean] `true` when auto-termination is enabled else `false`.
|
||||
|
||||
###################################################################
|
||||
@ -115,12 +115,13 @@ module Concurrent
|
||||
# Thread pools support several configuration options:
|
||||
#
|
||||
# * `idletime`: The number of seconds that a thread may be idle before being reclaimed.
|
||||
# * `name`: The name of the executor (optional). Printed in the executor's `#to_s` output and
|
||||
# a `<name>-worker-<id>` name is given to its threads if supported by used Ruby
|
||||
# implementation. `<id>` is uniq for each thread.
|
||||
# * `max_queue`: The maximum number of tasks that may be waiting in the work queue at
|
||||
# any one time. When the queue size reaches `max_queue` and no new threads can be created,
|
||||
# subsequent tasks will be rejected in accordance with the configured `fallback_policy`.
|
||||
# * `auto_terminate`: When true (default) an `at_exit` handler will be registered which
|
||||
# will stop the thread pool when the application exits. See below for more information
|
||||
# on shutting down thread pools.
|
||||
# * `auto_terminate`: When true (default), the threads started will be marked as daemon.
|
||||
# * `fallback_policy`: The policy defining how rejected tasks are handled.
|
||||
#
|
||||
# Three fallback policies are supported:
|
||||
@ -145,16 +146,12 @@ module Concurrent
|
||||
#
|
||||
# On some runtime platforms (most notably the JVM) the application will not
|
||||
# exit until all thread pools have been shutdown. To prevent applications from
|
||||
# "hanging" on exit all thread pools include an `at_exit` handler that will
|
||||
# stop the thread pool when the application exits. This handler uses a brute
|
||||
# force method to stop the pool and makes no guarantees regarding resources being
|
||||
# used by any tasks still running. Registration of this `at_exit` handler can be
|
||||
# prevented by setting the thread pool's constructor `:auto_terminate` option to
|
||||
# `false` when the thread pool is created. All thread pools support this option.
|
||||
# "hanging" on exit, all threads can be marked as daemon according to the
|
||||
# `:auto_terminate` option.
|
||||
#
|
||||
# ```ruby
|
||||
# pool1 = Concurrent::FixedThreadPool.new(5) # an `at_exit` handler will be registered
|
||||
# pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # prevent `at_exit` handler registration
|
||||
# pool1 = Concurrent::FixedThreadPool.new(5) # threads will be marked as daemon
|
||||
# pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # mark threads as non-daemon
|
||||
# ```
|
||||
#
|
||||
# @note Failure to properly shutdown a thread pool can lead to unpredictable results.
|
||||
@ -163,7 +160,7 @@ module Concurrent
|
||||
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools
|
||||
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class
|
||||
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface
|
||||
# @see http://ruby-doc.org//core-2.2.0/Kernel.html#method-i-at_exit Kernel#at_exit
|
||||
# @see https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html#setDaemon-boolean-
|
||||
|
||||
|
||||
|
||||
@ -18,10 +18,6 @@ if Concurrent.on_jruby?
|
||||
}.freeze
|
||||
private_constant :FALLBACK_POLICY_CLASSES
|
||||
|
||||
def initialize(*args, &block)
|
||||
super
|
||||
end
|
||||
|
||||
def post(*args, &task)
|
||||
raise ArgumentError.new('no block given') unless block_given?
|
||||
return handle_fallback(*args, &task) unless running?
|
||||
@ -42,7 +38,6 @@ if Concurrent.on_jruby?
|
||||
|
||||
def shutdown
|
||||
synchronize do
|
||||
self.ns_auto_terminate = false
|
||||
@executor.shutdown
|
||||
nil
|
||||
end
|
||||
@ -50,7 +45,6 @@ if Concurrent.on_jruby?
|
||||
|
||||
def kill
|
||||
synchronize do
|
||||
self.ns_auto_terminate = false
|
||||
@executor.shutdownNow
|
||||
nil
|
||||
end
|
||||
@ -87,5 +81,23 @@ if Concurrent.on_jruby?
|
||||
end
|
||||
private_constant :Job
|
||||
end
|
||||
|
||||
class DaemonThreadFactory
|
||||
# hide include from YARD
|
||||
send :include, java.util.concurrent.ThreadFactory
|
||||
|
||||
def initialize(daemonize = true)
|
||||
@daemonize = daemonize
|
||||
end
|
||||
|
||||
def newThread(runnable)
|
||||
thread = java.util.concurrent.Executors.defaultThreadFactory().newThread(runnable)
|
||||
thread.setDaemon(@daemonize)
|
||||
return thread
|
||||
end
|
||||
end
|
||||
|
||||
private_constant :DaemonThreadFactory
|
||||
|
||||
end
|
||||
end
|
||||
@ -19,10 +19,11 @@ if Concurrent.on_jruby?
|
||||
private
|
||||
|
||||
def ns_initialize(opts)
|
||||
@executor = java.util.concurrent.Executors.newSingleThreadExecutor
|
||||
@executor = java.util.concurrent.Executors.newSingleThreadExecutor(
|
||||
DaemonThreadFactory.new(ns_auto_terminate?)
|
||||
)
|
||||
@fallback_policy = opts.fetch(:fallback_policy, :discard)
|
||||
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy)
|
||||
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -114,10 +114,11 @@ if Concurrent.on_jruby?
|
||||
idletime,
|
||||
java.util.concurrent.TimeUnit::SECONDS,
|
||||
queue,
|
||||
DaemonThreadFactory.new(ns_auto_terminate?),
|
||||
FALLBACK_POLICY_CLASSES[@fallback_policy].new)
|
||||
|
||||
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
@ -27,7 +27,6 @@ module Concurrent
|
||||
def shutdown
|
||||
synchronize do
|
||||
break unless running?
|
||||
self.ns_auto_terminate = false
|
||||
stop_event.set
|
||||
ns_shutdown_execution
|
||||
end
|
||||
@ -37,7 +36,6 @@ module Concurrent
|
||||
def kill
|
||||
synchronize do
|
||||
break if shutdown?
|
||||
self.ns_auto_terminate = false
|
||||
stop_event.set
|
||||
ns_kill_execution
|
||||
stopped_event.set
|
||||
@ -15,7 +15,6 @@ module Concurrent
|
||||
max_queue: 0,
|
||||
idletime: DEFAULT_THREAD_IDLETIMEOUT,
|
||||
fallback_policy: opts.fetch(:fallback_policy, :discard),
|
||||
auto_terminate: opts.fetch(:auto_terminate, true)
|
||||
)
|
||||
end
|
||||
end
|
||||
@ -122,8 +122,6 @@ module Concurrent
|
||||
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE
|
||||
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
|
||||
|
||||
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
||||
|
||||
@pool = [] # all workers
|
||||
@ready = [] # used as a stash (most idle worker is at the start)
|
||||
@queue = [] # used as queue
|
||||
@ -131,6 +129,7 @@ module Concurrent
|
||||
@scheduled_task_count = 0
|
||||
@completed_task_count = 0
|
||||
@largest_length = 0
|
||||
@workers_counter = 0
|
||||
@ruby_pid = $$ # detects if Ruby has forked
|
||||
|
||||
@gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented
|
||||
@ -224,7 +223,8 @@ module Concurrent
|
||||
def ns_add_busy_worker
|
||||
return if @pool.size >= @max_length
|
||||
|
||||
@pool << (worker = Worker.new(self))
|
||||
@workers_counter += 1
|
||||
@pool << (worker = Worker.new(self, @workers_counter))
|
||||
@largest_length = @pool.length if @pool.length > @largest_length
|
||||
worker
|
||||
end
|
||||
@ -284,6 +284,7 @@ module Concurrent
|
||||
@scheduled_task_count = 0
|
||||
@completed_task_count = 0
|
||||
@largest_length = 0
|
||||
@workers_counter = 0
|
||||
@ruby_pid = $$
|
||||
end
|
||||
end
|
||||
@ -292,11 +293,15 @@ module Concurrent
|
||||
class Worker
|
||||
include Concern::Logging
|
||||
|
||||
def initialize(pool)
|
||||
def initialize(pool, id)
|
||||
# instance variables accessed only under pool's lock so no need to sync here again
|
||||
@queue = Queue.new
|
||||
@pool = pool
|
||||
@thread = create_worker @queue, pool, pool.idletime
|
||||
|
||||
if @thread.respond_to?(:name=)
|
||||
@thread.name = [pool.name, 'worker', id].compact.join('-')
|
||||
end
|
||||
end
|
||||
|
||||
def <<(message)
|
||||
@ -91,7 +91,7 @@ module Concurrent
|
||||
|
||||
private
|
||||
|
||||
def ns_initialize
|
||||
def ns_initialize(*args)
|
||||
@running = Concurrent::AtomicBoolean.new(true)
|
||||
@stopped = Concurrent::Event.new
|
||||
@count = Concurrent::AtomicFixnum.new(0)
|
||||
@ -1,3 +1,4 @@
|
||||
require 'concurrent/utility/engine'
|
||||
require 'concurrent/executor/ruby_single_thread_executor'
|
||||
|
||||
module Concurrent
|
||||
@ -77,7 +77,6 @@ module Concurrent
|
||||
@timer_executor = SingleThreadExecutor.new
|
||||
@condition = Event.new
|
||||
@ruby_pid = $$ # detects if Ruby has forked
|
||||
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
||||
end
|
||||
|
||||
# Post the task to the internal queue.
|
||||
@ -70,6 +70,14 @@ module Concurrent
|
||||
ns_select(&block)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# @!visibility private
|
||||
def initialize_copy(original)
|
||||
super(original)
|
||||
ns_initialize_copy
|
||||
end
|
||||
|
||||
# @!macro struct_new
|
||||
def self.new(*args, &block)
|
||||
clazz_name = nil
|
||||
@ -197,7 +197,7 @@ module Concurrent
|
||||
# Insert value into map with key if key is absent in one atomic step.
|
||||
# @param [Object] key
|
||||
# @param [Object] value
|
||||
# @return [Object, nil] the value or nil when key was present
|
||||
# @return [Object, nil] the previous value when key was present or nil when there was no key
|
||||
def put_if_absent(key, value)
|
||||
computed = false
|
||||
result = compute_if_absent(key) do
|
||||
@ -196,6 +196,16 @@ module Concurrent
|
||||
raise NameError.new("no member '#{member}' in struct")
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# @!visibility private
|
||||
def initialize_copy(original)
|
||||
synchronize do
|
||||
super(original)
|
||||
ns_initialize_copy
|
||||
end
|
||||
end
|
||||
|
||||
# @!macro struct_new
|
||||
def self.new(*args, &block)
|
||||
clazz_name = nil
|
||||
@ -91,6 +91,16 @@ module Concurrent
|
||||
raise NameError.new("no member '#{member}' in struct")
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# @!visibility private
|
||||
def initialize_copy(original)
|
||||
synchronize do
|
||||
super(original)
|
||||
ns_initialize_copy
|
||||
end
|
||||
end
|
||||
|
||||
# @!macro struct_new
|
||||
def self.new(*args, &block)
|
||||
clazz_name = nil
|
||||
@ -115,6 +115,17 @@ module Concurrent
|
||||
self.class.new(*self.to_h.merge(other, &block).values)
|
||||
end
|
||||
|
||||
# @!visibility private
|
||||
def ns_initialize_copy
|
||||
@values = @values.map do |val|
|
||||
begin
|
||||
val.clone
|
||||
rescue TypeError
|
||||
val
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# @!visibility private
|
||||
def pr_underscore(clazz)
|
||||
word = clazz.to_s.dup # dup string to workaround JRuby 9.2.0.0 bug https://github.com/jruby/jruby/issues/5229
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user