Merge pull request #13026 from Homebrew/dependabot/bundler/Library/Homebrew/concurrent-ruby-1.1.10

build(deps): bump concurrent-ruby from 1.1.9 to 1.1.10 in /Library/Homebrew
This commit is contained in:
Mike McQuaid 2022-03-22 19:06:33 +00:00 committed by GitHub
commit 649a9357f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
128 changed files with 318 additions and 311 deletions

View File

@ -17,7 +17,7 @@ GEM
coderay (1.1.3) coderay (1.1.3)
commander (4.6.0) commander (4.6.0)
highline (~> 2.0.0) highline (~> 2.0.0)
concurrent-ruby (1.1.9) concurrent-ruby (1.1.10)
connection_pool (2.2.5) connection_pool (2.2.5)
did_you_mean (1.6.1) did_you_mean (1.6.1)
diff-lcs (1.5.0) diff-lcs (1.5.0)

View File

@ -1,9 +1,9 @@
# typed: true
# DO NOT EDIT MANUALLY # DO NOT EDIT MANUALLY
# This is an autogenerated file for types exported from the `concurrent-ruby` gem. # This is an autogenerated file for types exported from the `concurrent-ruby` gem.
# Please instead update this file by running `bin/tapioca gem concurrent-ruby`. # Please instead update this file by running `bin/tapioca gem concurrent-ruby`.
# typed: true
module Concurrent module Concurrent
extend ::Concurrent::Utility::EngineDetector extend ::Concurrent::Utility::EngineDetector
extend ::Concurrent::Utility::NativeExtensionLoader extend ::Concurrent::Utility::NativeExtensionLoader
@ -21,7 +21,7 @@ module Concurrent
def dataflow_with(executor, *inputs, &block); end def dataflow_with(executor, *inputs, &block); end
def dataflow_with!(executor, *inputs, &block); end def dataflow_with!(executor, *inputs, &block); end
def leave_transaction; end def leave_transaction; end
def monotonic_time; end def monotonic_time(unit = T.unsafe(nil)); end
class << self class << self
def abort_transaction; end def abort_transaction; end
@ -42,7 +42,7 @@ module Concurrent
def global_logger=(value); end def global_logger=(value); end
def global_timer_set; end def global_timer_set; end
def leave_transaction; end def leave_transaction; end
def monotonic_time; end def monotonic_time(unit = T.unsafe(nil)); end
def new_fast_executor(opts = T.unsafe(nil)); end def new_fast_executor(opts = T.unsafe(nil)); end
def new_io_executor(opts = T.unsafe(nil)); end def new_io_executor(opts = T.unsafe(nil)); end
def physical_processor_count; end def physical_processor_count; end
@ -89,7 +89,7 @@ class Concurrent::AbstractExecutorService < ::Concurrent::Synchronization::Locka
private private
def handle_fallback(*args); end def fallback_action(*args); end
def ns_auto_terminate?; end def ns_auto_terminate?; end
def ns_execute(*args, &task); end def ns_execute(*args, &task); end
def ns_kill_execution; end def ns_kill_execution; end
@ -722,7 +722,6 @@ Concurrent::GLOBAL_FAST_EXECUTOR = T.let(T.unsafe(nil), Concurrent::Delay)
Concurrent::GLOBAL_IMMEDIATE_EXECUTOR = T.let(T.unsafe(nil), Concurrent::ImmediateExecutor) Concurrent::GLOBAL_IMMEDIATE_EXECUTOR = T.let(T.unsafe(nil), Concurrent::ImmediateExecutor)
Concurrent::GLOBAL_IO_EXECUTOR = T.let(T.unsafe(nil), Concurrent::Delay) Concurrent::GLOBAL_IO_EXECUTOR = T.let(T.unsafe(nil), Concurrent::Delay)
Concurrent::GLOBAL_LOGGER = T.let(T.unsafe(nil), Concurrent::AtomicReference) Concurrent::GLOBAL_LOGGER = T.let(T.unsafe(nil), Concurrent::AtomicReference)
Concurrent::GLOBAL_MONOTONIC_CLOCK = T.let(T.unsafe(nil), T.untyped)
Concurrent::GLOBAL_TIMER_SET = T.let(T.unsafe(nil), Concurrent::Delay) Concurrent::GLOBAL_TIMER_SET = T.let(T.unsafe(nil), Concurrent::Delay)
class Concurrent::Hash < ::Hash; end class Concurrent::Hash < ::Hash; end
Concurrent::HashImplementation = Hash Concurrent::HashImplementation = Hash
@ -1784,14 +1783,14 @@ class Concurrent::RubyThreadPoolExecutor < ::Concurrent::RubyExecutorService
def max_length; end def max_length; end
def max_queue; end def max_queue; end
def min_length; end def min_length; end
def prune_pool; end
def queue_length; end def queue_length; end
def ready_worker(worker); end def ready_worker(worker, last_message); end
def remaining_capacity; end def remaining_capacity; end
def remove_busy_worker(worker); end def remove_busy_worker(worker); end
def scheduled_task_count; end def scheduled_task_count; end
def synchronous; end def synchronous; end
def worker_died(worker); end def worker_died(worker); end
def worker_not_old_enough(worker); end
def worker_task_completed; end def worker_task_completed; end
private private
@ -1804,12 +1803,11 @@ class Concurrent::RubyThreadPoolExecutor < ::Concurrent::RubyExecutorService
def ns_kill_execution; end def ns_kill_execution; end
def ns_limited_queue?; end def ns_limited_queue?; end
def ns_prune_pool; end def ns_prune_pool; end
def ns_ready_worker(worker, success = T.unsafe(nil)); end def ns_ready_worker(worker, last_message, success = T.unsafe(nil)); end
def ns_remove_busy_worker(worker); end def ns_remove_busy_worker(worker); end
def ns_reset_if_forked; end def ns_reset_if_forked; end
def ns_shutdown_execution; end def ns_shutdown_execution; end
def ns_worker_died(worker); end def ns_worker_died(worker); end
def ns_worker_not_old_enough(worker); end
end end
Concurrent::RubyThreadPoolExecutor::DEFAULT_MAX_POOL_SIZE = T.let(T.unsafe(nil), Integer) Concurrent::RubyThreadPoolExecutor::DEFAULT_MAX_POOL_SIZE = T.let(T.unsafe(nil), Integer)
@ -2216,11 +2214,9 @@ end
class Concurrent::TVar < ::Concurrent::Synchronization::Object class Concurrent::TVar < ::Concurrent::Synchronization::Object
def initialize(value); end def initialize(value); end
def unsafe_increment_version; end
def unsafe_lock; end def unsafe_lock; end
def unsafe_value; end def unsafe_value; end
def unsafe_value=(value); end def unsafe_value=(value); end
def unsafe_version; end
def value; end def value; end
def value=(value); end def value=(value); end
@ -2285,7 +2281,6 @@ class Concurrent::TimerTask < ::Concurrent::RubyExecutorService
def ns_kill_execution; end def ns_kill_execution; end
def ns_shutdown_execution; end def ns_shutdown_execution; end
def schedule_next_task(interval = T.unsafe(nil)); end def schedule_next_task(interval = T.unsafe(nil)); end
def timeout_task(completion); end
class << self class << self
def execute(opts = T.unsafe(nil), &task); end def execute(opts = T.unsafe(nil), &task); end
@ -2300,9 +2295,9 @@ class Concurrent::Transaction
def abort; end def abort; end
def commit; end def commit; end
def open(tvar); end
def read(tvar); end def read(tvar); end
def unlock; end def unlock; end
def valid?; end
def write(tvar, value); end def write(tvar, value); end
class << self class << self
@ -2315,11 +2310,11 @@ Concurrent::Transaction::ABORTED = T.let(T.unsafe(nil), Object)
class Concurrent::Transaction::AbortError < ::StandardError; end class Concurrent::Transaction::AbortError < ::StandardError; end
class Concurrent::Transaction::LeaveError < ::StandardError; end class Concurrent::Transaction::LeaveError < ::StandardError; end
class Concurrent::Transaction::ReadLogEntry < ::Struct class Concurrent::Transaction::OpenEntry < ::Struct
def tvar; end def modified; end
def tvar=(_); end def modified=(_); end
def version; end def value; end
def version=(_); end def value=(_); end
class << self class << self
def [](*_arg0); end def [](*_arg0); end

View File

@ -3488,7 +3488,13 @@ end
Net::HTTPFatalErrorCode = Net::HTTPClientError Net::HTTPFatalErrorCode = Net::HTTPClientError
Net::HTTPInformationCode = Net::HTTPInformation class Net::HTTPInformation
end
Net::HTTPInformationCode::EXCEPTION_TYPE = Net::HTTPError
class Net::HTTPInformation
end
class Net::HTTPLoopDetected class Net::HTTPLoopDetected
HAS_BODY = ::T.let(nil, ::T.untyped) HAS_BODY = ::T.let(nil, ::T.untyped)
@ -3548,7 +3554,13 @@ Net::HTTPServerErrorCode = Net::HTTPServerError
Net::HTTPSession = Net::HTTP Net::HTTPSession = Net::HTTP
Net::HTTPSuccessCode = Net::HTTPSuccess class Net::HTTPSuccess
end
Net::HTTPSuccessCode::EXCEPTION_TYPE = Net::HTTPError
class Net::HTTPSuccess
end
class Net::HTTPURITooLong class Net::HTTPURITooLong
HAS_BODY = ::T.let(nil, ::T.untyped) HAS_BODY = ::T.let(nil, ::T.untyped)

View File

@ -3,7 +3,7 @@ require 'rbconfig'
ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby' ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
ruby_version = RbConfig::CONFIG["ruby_version"] ruby_version = RbConfig::CONFIG["ruby_version"]
path = File.expand_path('..', __FILE__) path = File.expand_path('..', __FILE__)
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.10/lib/concurrent-ruby"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/i18n-1.10.0/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/i18n-1.10.0/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/minitest-5.15.0/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/minitest-5.15.0/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tzinfo-2.0.4/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tzinfo-2.0.4/lib"

View File

@ -0,0 +1,5 @@
# This file is here so that there is a file with the same name as the gem that
# can be required by Bundler.require. Applications should normally
# require 'concurrent'.
require_relative "concurrent"

View File

@ -272,6 +272,7 @@ module Concurrent
obj.send(:init_synchronization) obj.send(:init_synchronization)
obj obj
end end
ruby2_keywords :new if respond_to?(:ruby2_keywords, true)
end end
private_constant :ClassMethods private_constant :ClassMethods

View File

@ -170,6 +170,7 @@ module Concurrent
alias_method :compare_and_swap, :compare_and_set alias_method :compare_and_swap, :compare_and_set
alias_method :swap, :get_and_set alias_method :swap, :get_and_set
end end
TruffleRubyAtomicReference
when Concurrent.on_rbx? when Concurrent.on_rbx?
# @note Extends `Rubinius::AtomicReference` version adding aliases # @note Extends `Rubinius::AtomicReference` version adding aliases
# and numeric logic. # and numeric logic.

View File

@ -19,7 +19,7 @@ module Concurrent
# t1 = Thread.new do # t1 = Thread.new do
# puts "t1 is waiting" # puts "t1 is waiting"
# event.wait(1) # event.wait(1)
# puts "event ocurred" # puts "event occurred"
# end # end
# #
# t2 = Thread.new do # t2 = Thread.new do
@ -30,8 +30,8 @@ module Concurrent
# [t1, t2].each(&:join) # [t1, t2].each(&:join)
# #
# # prints: # # prints:
# # t2 calling set
# # t1 is waiting # # t1 is waiting
# # t2 calling set
# # event occurred # # event occurred
class Event < Synchronization::LockableObject class Event < Synchronization::LockableObject

View File

@ -23,7 +23,14 @@ module Concurrent
synchronize do synchronize do
try_acquire_timed(permits, nil) try_acquire_timed(permits, nil)
nil end
return unless block_given?
begin
yield
ensure
release(permits)
end end
end end
@ -48,13 +55,22 @@ module Concurrent
Utility::NativeInteger.ensure_integer_and_bounds permits Utility::NativeInteger.ensure_integer_and_bounds permits
Utility::NativeInteger.ensure_positive permits Utility::NativeInteger.ensure_positive permits
synchronize do acquired = synchronize do
if timeout.nil? if timeout.nil?
try_acquire_now(permits) try_acquire_now(permits)
else else
try_acquire_timed(permits, timeout) try_acquire_timed(permits, timeout)
end end
end end
return acquired unless block_given?
return unless acquired
begin
yield
ensure
release(permits)
end
end end
# @!macro semaphore_method_release # @!macro semaphore_method_release

View File

@ -267,12 +267,10 @@ module Concurrent
# running right now, AND no writers who came before us still waiting to # running right now, AND no writers who came before us still waiting to
# acquire the lock # acquire the lock
# Additionally, if any read locks have been taken, we must hold all of them # Additionally, if any read locks have been taken, we must hold all of them
if c == held if held > 0 && @Counter.compare_and_set(1, c+RUNNING_WRITER)
# If we successfully swap the RUNNING_WRITER bit on, then we can go ahead # If we are the only one reader and successfully swap the RUNNING_WRITER bit on, then we can go ahead
if @Counter.compare_and_set(c, c+RUNNING_WRITER) @HeldCount.value = held + WRITE_LOCK_HELD
@HeldCount.value = held + WRITE_LOCK_HELD return true
return true
end
elsif @Counter.compare_and_set(c, c+WAITING_WRITER) elsif @Counter.compare_and_set(c, c+WAITING_WRITER)
while true while true
# Now we have successfully incremented, so no more readers will be able to increment # Now we have successfully incremented, so no more readers will be able to increment

View File

@ -16,14 +16,16 @@ module Concurrent
# @!macro semaphore_method_acquire # @!macro semaphore_method_acquire
# #
# Acquires the given number of permits from this semaphore, # Acquires the given number of permits from this semaphore,
# blocking until all are available. # blocking until all are available. If a block is given,
# yields to it and releases the permits afterwards.
# #
# @param [Fixnum] permits Number of permits to acquire # @param [Fixnum] permits Number of permits to acquire
# #
# @raise [ArgumentError] if `permits` is not an integer or is less than # @raise [ArgumentError] if `permits` is not an integer or is less than
# one # one
# #
# @return [nil] # @return [nil, BasicObject] Without a block, `nil` is returned. If a block
# is given, its return value is returned.
# @!macro semaphore_method_available_permits # @!macro semaphore_method_available_permits
# #
@ -41,7 +43,9 @@ module Concurrent
# #
# Acquires the given number of permits from this semaphore, # Acquires the given number of permits from this semaphore,
# only if all are available at the time of invocation or within # only if all are available at the time of invocation or within
# `timeout` interval # `timeout` interval. If a block is given, yields to it if the permits
# were successfully acquired, and releases them afterward, returning the
# block's return value.
# #
# @param [Fixnum] permits the number of permits to acquire # @param [Fixnum] permits the number of permits to acquire
# #
@ -51,8 +55,10 @@ module Concurrent
# @raise [ArgumentError] if `permits` is not an integer or is less than # @raise [ArgumentError] if `permits` is not an integer or is less than
# one # one
# #
# @return [Boolean] `false` if no permits are available, `true` when # @return [true, false, nil, BasicObject] `false` if no permits are
# acquired a permit # available, `true` when acquired a permit. If a block is given, the
# block's return value is returned if the permits were acquired; if not,
# `nil` is returned.
# @!macro semaphore_method_release # @!macro semaphore_method_release
# #
@ -106,6 +112,8 @@ module Concurrent
# releasing a blocking acquirer. # releasing a blocking acquirer.
# However, no actual permit objects are used; the Semaphore just keeps a # However, no actual permit objects are used; the Semaphore just keeps a
# count of the number available and acts accordingly. # count of the number available and acts accordingly.
# Alternatively, permits may be acquired within a block, and automatically
# released after the block finishes executing.
# #
# @!macro semaphore_public_api # @!macro semaphore_public_api
# @example # @example
@ -140,6 +148,19 @@ module Concurrent
# # Thread 4 releasing semaphore # # Thread 4 releasing semaphore
# # Thread 1 acquired semaphore # # Thread 1 acquired semaphore
# #
# @example
# semaphore = Concurrent::Semaphore.new(1)
#
# puts semaphore.available_permits
# semaphore.acquire do
# puts semaphore.available_permits
# end
# puts semaphore.available_permits
#
# # prints:
# # 1
# # 0
# # 1
class Semaphore < SemaphoreImplementation class Semaphore < SemaphoreImplementation
end end
end end

View File

@ -75,28 +75,31 @@ module Concurrent
private private
# Handler which executes the `fallback_policy` once the queue size # Returns an action which executes the `fallback_policy` once the queue
# reaches `max_queue`. # size reaches `max_queue`. The reason for the indirection of an action
# is so that the work can be deferred outside of synchronization.
# #
# @param [Array] args the arguments to the task which is being handled. # @param [Array] args the arguments to the task which is being handled.
# #
# @!visibility private # @!visibility private
def handle_fallback(*args) def fallback_action(*args)
case fallback_policy case fallback_policy
when :abort when :abort
raise RejectedExecutionError lambda { raise RejectedExecutionError }
when :discard when :discard
false lambda { false }
when :caller_runs when :caller_runs
begin lambda {
yield(*args) begin
rescue => ex yield(*args)
# let it fail rescue => ex
log DEBUG, ex # let it fail
end log DEBUG, ex
true end
true
}
else else
fail "Unknown fallback policy #{fallback_policy}" lambda { fail "Unknown fallback policy #{fallback_policy}" }
end end
end end

View File

@ -71,9 +71,16 @@ module Concurrent
# @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting # @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting
# new tasks. A value of -1 indicates that the queue may grow without bound. # new tasks. A value of -1 indicates that the queue may grow without bound.
# @!macro thread_pool_executor_method_prune_pool
# Prune the thread pool of unneeded threads
#
# What is being pruned is controlled by the min_threads and idletime
# parameters passed at pool creation time
#
# This is a no-op on some pool implementation (e.g. the Java one). The Ruby
# pool will auto-prune each time a new job is posted. You will need to call
# this method explicitely in case your application post jobs in bursts (a
# lot of jobs and then nothing for long periods)
# @!macro thread_pool_executor_public_api # @!macro thread_pool_executor_public_api
# #
@ -111,6 +118,9 @@ module Concurrent
# #
# @!method can_overflow? # @!method can_overflow?
# @!macro executor_service_method_can_overflow_question # @!macro executor_service_method_can_overflow_question
#
# @!method prune_pool
# @!macro thread_pool_executor_method_prune_pool

View File

@ -20,7 +20,7 @@ if Concurrent.on_jruby?
def post(*args, &task) def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given? raise ArgumentError.new('no block given') unless block_given?
return handle_fallback(*args, &task) unless running? return fallback_action(*args, &task).call unless running?
@executor.submit Job.new(args, task) @executor.submit Job.new(args, task)
true true
rescue Java::JavaUtilConcurrent::RejectedExecutionException rescue Java::JavaUtilConcurrent::RejectedExecutionException

View File

@ -93,6 +93,10 @@ if Concurrent.on_jruby?
super && !@executor.isTerminating super && !@executor.isTerminating
end end
# @!macro thread_pool_executor_method_prune_pool
def prune_pool
end
private private
def ns_initialize(opts) def ns_initialize(opts)

View File

@ -16,10 +16,16 @@ module Concurrent
def post(*args, &task) def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given? raise ArgumentError.new('no block given') unless block_given?
synchronize do deferred_action = synchronize {
# If the executor is shut down, reject this task if running?
return handle_fallback(*args, &task) unless running? ns_execute(*args, &task)
ns_execute(*args, &task) else
fallback_action(*args, &task)
end
}
if deferred_action
deferred_action.call
else
true true
end end
end end

View File

@ -93,13 +93,8 @@ module Concurrent
end end
# @!visibility private # @!visibility private
def ready_worker(worker) def ready_worker(worker, last_message)
synchronize { ns_ready_worker worker } synchronize { ns_ready_worker worker, last_message }
end
# @!visibility private
def worker_not_old_enough(worker)
synchronize { ns_worker_not_old_enough worker }
end end
# @!visibility private # @!visibility private
@ -112,6 +107,11 @@ module Concurrent
synchronize { @completed_task_count += 1 } synchronize { @completed_task_count += 1 }
end end
# @!macro thread_pool_executor_method_prune_pool
def prune_pool
synchronize { ns_prune_pool }
end
private private
# @!visibility private # @!visibility private
@ -156,10 +156,11 @@ module Concurrent
if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task) if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task)
@scheduled_task_count += 1 @scheduled_task_count += 1
else else
handle_fallback(*args, &task) return fallback_action(*args, &task)
end end
ns_prune_pool if @next_gc_time < Concurrent.monotonic_time ns_prune_pool if @next_gc_time < Concurrent.monotonic_time
nil
end end
# @!visibility private # @!visibility private
@ -192,7 +193,7 @@ module Concurrent
# @!visibility private # @!visibility private
def ns_assign_worker(*args, &task) def ns_assign_worker(*args, &task)
# keep growing if the pool is not at the minimum yet # keep growing if the pool is not at the minimum yet
worker = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker worker, _ = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker
if worker if worker
worker << [task, args] worker << [task, args]
true true
@ -223,7 +224,7 @@ module Concurrent
def ns_worker_died(worker) def ns_worker_died(worker)
ns_remove_busy_worker worker ns_remove_busy_worker worker
replacement_worker = ns_add_busy_worker replacement_worker = ns_add_busy_worker
ns_ready_worker replacement_worker, false if replacement_worker ns_ready_worker replacement_worker, Concurrent.monotonic_time, false if replacement_worker
end end
# creates new worker which has to receive work to do after it's added # creates new worker which has to receive work to do after it's added
@ -242,29 +243,21 @@ module Concurrent
# handle ready worker, giving it new job or assigning back to @ready # handle ready worker, giving it new job or assigning back to @ready
# #
# @!visibility private # @!visibility private
def ns_ready_worker(worker, success = true) def ns_ready_worker(worker, last_message, success = true)
task_and_args = @queue.shift task_and_args = @queue.shift
if task_and_args if task_and_args
worker << task_and_args worker << task_and_args
else else
# stop workers when !running?, do not return them to @ready # stop workers when !running?, do not return them to @ready
if running? if running?
@ready.push(worker) raise unless last_message
@ready.push([worker, last_message])
else else
worker.stop worker.stop
end end
end end
end end
# returns back worker to @ready which was not idle for enough time
#
# @!visibility private
def ns_worker_not_old_enough(worker)
# let's put workers coming from idle_test back to the start (as the oldest worker)
@ready.unshift(worker)
true
end
# removes a worker which is not in not tracked in @ready # removes a worker which is not in not tracked in @ready
# #
# @!visibility private # @!visibility private
@ -278,10 +271,17 @@ module Concurrent
# #
# @!visibility private # @!visibility private
def ns_prune_pool def ns_prune_pool
return if @pool.size <= @min_length now = Concurrent.monotonic_time
stopped_workers = 0
last_used = @ready.shift while !@ready.empty? && (@pool.size - stopped_workers > @min_length)
last_used << :idle_test if last_used worker, last_message = @ready.first
if now - last_message > self.idletime
stopped_workers += 1
@ready.shift
worker << :stop
else break
end
end
@next_gc_time = Concurrent.monotonic_time + @gc_interval @next_gc_time = Concurrent.monotonic_time + @gc_interval
end end
@ -330,19 +330,10 @@ module Concurrent
def create_worker(queue, pool, idletime) def create_worker(queue, pool, idletime)
Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime| Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime|
last_message = Concurrent.monotonic_time
catch(:stop) do catch(:stop) do
loop do loop do
case message = my_queue.pop case message = my_queue.pop
when :idle_test
if (Concurrent.monotonic_time - last_message) > my_idletime
my_pool.remove_busy_worker(self)
throw :stop
else
my_pool.worker_not_old_enough(self)
end
when :stop when :stop
my_pool.remove_busy_worker(self) my_pool.remove_busy_worker(self)
throw :stop throw :stop
@ -350,9 +341,7 @@ module Concurrent
else else
task, args = message task, args = message
run_task my_pool, task, args run_task my_pool, task, args
last_message = Concurrent.monotonic_time my_pool.ready_worker(self, Concurrent.monotonic_time)
my_pool.ready_worker(self)
end end
end end
end end

View File

@ -16,10 +16,10 @@ module Concurrent
# @return [Array] # @return [Array]
def execute(*args) def execute(*args)
synchronize do success = true
success = false value = reason = nil
value = reason = nil
synchronize do
begin begin
value = @task.call(*args) value = @task.call(*args)
success = true success = true
@ -27,9 +27,9 @@ module Concurrent
reason = ex reason = ex
success = false success = false
end end
[success, value, reason]
end end
[success, value, reason]
end end
end end
end end

View File

@ -281,7 +281,6 @@ module Concurrent
each_pair { |k, v| return k if v == value } each_pair { |k, v| return k if v == value }
nil nil
end unless method_defined?(:key) end unless method_defined?(:key)
alias_method :index, :key if RUBY_VERSION < '1.9'
# Is map empty? # Is map empty?
# @return [true, false] # @return [true, false]

View File

@ -58,29 +58,42 @@ module Concurrent
# @example Basic usage # @example Basic usage
# #
# require 'concurrent' # require 'concurrent'
# require 'thread' # for Queue # require 'csv'
# require 'open-uri' # for open(uri) # require 'open-uri'
# #
# class Ticker # class Ticker
# def get_year_end_closing(symbol, year) # def get_year_end_closing(symbol, year, api_key)
# uri = "http://ichart.finance.yahoo.com/table.csv?s=#{symbol}&a=11&b=01&c=#{year}&d=11&e=31&f=#{year}&g=m" # uri = "https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY&symbol=#{symbol}&apikey=#{api_key}&datatype=csv"
# data = open(uri) {|f| f.collect{|line| line.strip } } # data = []
# data[1].split(',')[4].to_f # csv = URI.parse(uri).read
# end # if csv.include?('call frequency')
# return :rate_limit_exceeded
# end
# CSV.parse(csv, headers: true) do |row|
# data << row['close'].to_f if row['timestamp'].include?(year.to_s)
# end
# year_end = data.first
# year_end
# rescue => e
# p e
# end
# end # end
# #
# # Future # api_key = ENV['ALPHAVANTAGE_KEY']
# price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013) } # abort(error_message) unless api_key
# price.state #=> :pending
# sleep(1) # do other stuff
# price.value #=> 63.65
# price.state #=> :fulfilled
# #
# # ScheduledTask # # Future
# task = Concurrent::ScheduledTask.execute(2){ Ticker.new.get_year_end_closing('INTC', 2013) } # price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013, api_key) }
# task.state #=> :pending # price.state #=> :pending
# sleep(3) # do other stuff # price.pending? #=> true
# task.value #=> 25.96 # price.value(0) #=> nil (does not block)
#
# sleep(1) # do other stuff
#
# price.value #=> 63.65 (after blocking if necessary)
# price.state #=> :fulfilled
# price.fulfilled? #=> true
# price.value #=> 63.65
# #
# @example Successful task execution # @example Successful task execution
# #

View File

@ -4,9 +4,7 @@ module Concurrent
# @!visibility private # @!visibility private
# @!macro internal_implementation_note # @!macro internal_implementation_note
LockableObjectImplementation = case LockableObjectImplementation = case
when Concurrent.on_cruby? && Concurrent.ruby_version(:<=, 1, 9, 3) when Concurrent.on_cruby?
MonitorLockableObject
when Concurrent.on_cruby? && Concurrent.ruby_version(:>, 1, 9, 3)
MutexLockableObject MutexLockableObject
when Concurrent.on_jruby? when Concurrent.on_jruby?
JRubyLockableObject JRubyLockableObject

Some files were not shown because too many files have changed in this diff Show More