Merge pull request #13026 from Homebrew/dependabot/bundler/Library/Homebrew/concurrent-ruby-1.1.10

build(deps): bump concurrent-ruby from 1.1.9 to 1.1.10 in /Library/Homebrew
This commit is contained in:
Mike McQuaid 2022-03-22 19:06:33 +00:00 committed by GitHub
commit 649a9357f9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
128 changed files with 318 additions and 311 deletions

View File

@ -17,7 +17,7 @@ GEM
coderay (1.1.3)
commander (4.6.0)
highline (~> 2.0.0)
concurrent-ruby (1.1.9)
concurrent-ruby (1.1.10)
connection_pool (2.2.5)
did_you_mean (1.6.1)
diff-lcs (1.5.0)

View File

@ -1,9 +1,9 @@
# typed: true
# DO NOT EDIT MANUALLY
# This is an autogenerated file for types exported from the `concurrent-ruby` gem.
# Please instead update this file by running `bin/tapioca gem concurrent-ruby`.
# typed: true
module Concurrent
extend ::Concurrent::Utility::EngineDetector
extend ::Concurrent::Utility::NativeExtensionLoader
@ -21,7 +21,7 @@ module Concurrent
def dataflow_with(executor, *inputs, &block); end
def dataflow_with!(executor, *inputs, &block); end
def leave_transaction; end
def monotonic_time; end
def monotonic_time(unit = T.unsafe(nil)); end
class << self
def abort_transaction; end
@ -42,7 +42,7 @@ module Concurrent
def global_logger=(value); end
def global_timer_set; end
def leave_transaction; end
def monotonic_time; end
def monotonic_time(unit = T.unsafe(nil)); end
def new_fast_executor(opts = T.unsafe(nil)); end
def new_io_executor(opts = T.unsafe(nil)); end
def physical_processor_count; end
@ -89,7 +89,7 @@ class Concurrent::AbstractExecutorService < ::Concurrent::Synchronization::Locka
private
def handle_fallback(*args); end
def fallback_action(*args); end
def ns_auto_terminate?; end
def ns_execute(*args, &task); end
def ns_kill_execution; end
@ -722,7 +722,6 @@ Concurrent::GLOBAL_FAST_EXECUTOR = T.let(T.unsafe(nil), Concurrent::Delay)
Concurrent::GLOBAL_IMMEDIATE_EXECUTOR = T.let(T.unsafe(nil), Concurrent::ImmediateExecutor)
Concurrent::GLOBAL_IO_EXECUTOR = T.let(T.unsafe(nil), Concurrent::Delay)
Concurrent::GLOBAL_LOGGER = T.let(T.unsafe(nil), Concurrent::AtomicReference)
Concurrent::GLOBAL_MONOTONIC_CLOCK = T.let(T.unsafe(nil), T.untyped)
Concurrent::GLOBAL_TIMER_SET = T.let(T.unsafe(nil), Concurrent::Delay)
class Concurrent::Hash < ::Hash; end
Concurrent::HashImplementation = Hash
@ -1784,14 +1783,14 @@ class Concurrent::RubyThreadPoolExecutor < ::Concurrent::RubyExecutorService
def max_length; end
def max_queue; end
def min_length; end
def prune_pool; end
def queue_length; end
def ready_worker(worker); end
def ready_worker(worker, last_message); end
def remaining_capacity; end
def remove_busy_worker(worker); end
def scheduled_task_count; end
def synchronous; end
def worker_died(worker); end
def worker_not_old_enough(worker); end
def worker_task_completed; end
private
@ -1804,12 +1803,11 @@ class Concurrent::RubyThreadPoolExecutor < ::Concurrent::RubyExecutorService
def ns_kill_execution; end
def ns_limited_queue?; end
def ns_prune_pool; end
def ns_ready_worker(worker, success = T.unsafe(nil)); end
def ns_ready_worker(worker, last_message, success = T.unsafe(nil)); end
def ns_remove_busy_worker(worker); end
def ns_reset_if_forked; end
def ns_shutdown_execution; end
def ns_worker_died(worker); end
def ns_worker_not_old_enough(worker); end
end
Concurrent::RubyThreadPoolExecutor::DEFAULT_MAX_POOL_SIZE = T.let(T.unsafe(nil), Integer)
@ -2216,11 +2214,9 @@ end
class Concurrent::TVar < ::Concurrent::Synchronization::Object
def initialize(value); end
def unsafe_increment_version; end
def unsafe_lock; end
def unsafe_value; end
def unsafe_value=(value); end
def unsafe_version; end
def value; end
def value=(value); end
@ -2285,7 +2281,6 @@ class Concurrent::TimerTask < ::Concurrent::RubyExecutorService
def ns_kill_execution; end
def ns_shutdown_execution; end
def schedule_next_task(interval = T.unsafe(nil)); end
def timeout_task(completion); end
class << self
def execute(opts = T.unsafe(nil), &task); end
@ -2300,9 +2295,9 @@ class Concurrent::Transaction
def abort; end
def commit; end
def open(tvar); end
def read(tvar); end
def unlock; end
def valid?; end
def write(tvar, value); end
class << self
@ -2315,11 +2310,11 @@ Concurrent::Transaction::ABORTED = T.let(T.unsafe(nil), Object)
class Concurrent::Transaction::AbortError < ::StandardError; end
class Concurrent::Transaction::LeaveError < ::StandardError; end
class Concurrent::Transaction::ReadLogEntry < ::Struct
def tvar; end
def tvar=(_); end
def version; end
def version=(_); end
class Concurrent::Transaction::OpenEntry < ::Struct
def modified; end
def modified=(_); end
def value; end
def value=(_); end
class << self
def [](*_arg0); end

View File

@ -3488,7 +3488,13 @@ end
Net::HTTPFatalErrorCode = Net::HTTPClientError
Net::HTTPInformationCode = Net::HTTPInformation
class Net::HTTPInformation
end
Net::HTTPInformationCode::EXCEPTION_TYPE = Net::HTTPError
class Net::HTTPInformation
end
class Net::HTTPLoopDetected
HAS_BODY = ::T.let(nil, ::T.untyped)
@ -3548,7 +3554,13 @@ Net::HTTPServerErrorCode = Net::HTTPServerError
Net::HTTPSession = Net::HTTP
Net::HTTPSuccessCode = Net::HTTPSuccess
class Net::HTTPSuccess
end
Net::HTTPSuccessCode::EXCEPTION_TYPE = Net::HTTPError
class Net::HTTPSuccess
end
class Net::HTTPURITooLong
HAS_BODY = ::T.let(nil, ::T.untyped)

View File

@ -3,7 +3,7 @@ require 'rbconfig'
ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby'
ruby_version = RbConfig::CONFIG["ruby_version"]
path = File.expand_path('..', __FILE__)
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.9/lib/concurrent-ruby"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.10/lib/concurrent-ruby"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/i18n-1.10.0/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/minitest-5.15.0/lib"
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tzinfo-2.0.4/lib"

View File

@ -0,0 +1,5 @@
# This file is here so that there is a file with the same name as the gem that
# can be required by Bundler.require. Applications should normally
# require 'concurrent'.
require_relative "concurrent"

View File

@ -272,6 +272,7 @@ module Concurrent
obj.send(:init_synchronization)
obj
end
ruby2_keywords :new if respond_to?(:ruby2_keywords, true)
end
private_constant :ClassMethods

View File

@ -170,6 +170,7 @@ module Concurrent
alias_method :compare_and_swap, :compare_and_set
alias_method :swap, :get_and_set
end
TruffleRubyAtomicReference
when Concurrent.on_rbx?
# @note Extends `Rubinius::AtomicReference` version adding aliases
# and numeric logic.

View File

@ -19,7 +19,7 @@ module Concurrent
# t1 = Thread.new do
# puts "t1 is waiting"
# event.wait(1)
# puts "event ocurred"
# puts "event occurred"
# end
#
# t2 = Thread.new do
@ -30,8 +30,8 @@ module Concurrent
# [t1, t2].each(&:join)
#
# # prints:
# # t2 calling set
# # t1 is waiting
# # t2 calling set
# # event occurred
class Event < Synchronization::LockableObject

View File

@ -23,7 +23,14 @@ module Concurrent
synchronize do
try_acquire_timed(permits, nil)
nil
end
return unless block_given?
begin
yield
ensure
release(permits)
end
end
@ -48,13 +55,22 @@ module Concurrent
Utility::NativeInteger.ensure_integer_and_bounds permits
Utility::NativeInteger.ensure_positive permits
synchronize do
acquired = synchronize do
if timeout.nil?
try_acquire_now(permits)
else
try_acquire_timed(permits, timeout)
end
end
return acquired unless block_given?
return unless acquired
begin
yield
ensure
release(permits)
end
end
# @!macro semaphore_method_release

View File

@ -267,12 +267,10 @@ module Concurrent
# running right now, AND no writers who came before us still waiting to
# acquire the lock
# Additionally, if any read locks have been taken, we must hold all of them
if c == held
# If we successfully swap the RUNNING_WRITER bit on, then we can go ahead
if @Counter.compare_and_set(c, c+RUNNING_WRITER)
@HeldCount.value = held + WRITE_LOCK_HELD
return true
end
if held > 0 && @Counter.compare_and_set(1, c+RUNNING_WRITER)
# If we are the only one reader and successfully swap the RUNNING_WRITER bit on, then we can go ahead
@HeldCount.value = held + WRITE_LOCK_HELD
return true
elsif @Counter.compare_and_set(c, c+WAITING_WRITER)
while true
# Now we have successfully incremented, so no more readers will be able to increment

View File

@ -16,14 +16,16 @@ module Concurrent
# @!macro semaphore_method_acquire
#
# Acquires the given number of permits from this semaphore,
# blocking until all are available.
# blocking until all are available. If a block is given,
# yields to it and releases the permits afterwards.
#
# @param [Fixnum] permits Number of permits to acquire
#
# @raise [ArgumentError] if `permits` is not an integer or is less than
# one
#
# @return [nil]
# @return [nil, BasicObject] Without a block, `nil` is returned. If a block
# is given, its return value is returned.
# @!macro semaphore_method_available_permits
#
@ -41,7 +43,9 @@ module Concurrent
#
# Acquires the given number of permits from this semaphore,
# only if all are available at the time of invocation or within
# `timeout` interval
# `timeout` interval. If a block is given, yields to it if the permits
# were successfully acquired, and releases them afterward, returning the
# block's return value.
#
# @param [Fixnum] permits the number of permits to acquire
#
@ -51,8 +55,10 @@ module Concurrent
# @raise [ArgumentError] if `permits` is not an integer or is less than
# one
#
# @return [Boolean] `false` if no permits are available, `true` when
# acquired a permit
# @return [true, false, nil, BasicObject] `false` if no permits are
# available, `true` when acquired a permit. If a block is given, the
# block's return value is returned if the permits were acquired; if not,
# `nil` is returned.
# @!macro semaphore_method_release
#
@ -106,6 +112,8 @@ module Concurrent
# releasing a blocking acquirer.
# However, no actual permit objects are used; the Semaphore just keeps a
# count of the number available and acts accordingly.
# Alternatively, permits may be acquired within a block, and automatically
# released after the block finishes executing.
#
# @!macro semaphore_public_api
# @example
@ -140,6 +148,19 @@ module Concurrent
# # Thread 4 releasing semaphore
# # Thread 1 acquired semaphore
#
# @example
# semaphore = Concurrent::Semaphore.new(1)
#
# puts semaphore.available_permits
# semaphore.acquire do
# puts semaphore.available_permits
# end
# puts semaphore.available_permits
#
# # prints:
# # 1
# # 0
# # 1
class Semaphore < SemaphoreImplementation
end
end

View File

@ -75,28 +75,31 @@ module Concurrent
private
# Handler which executes the `fallback_policy` once the queue size
# reaches `max_queue`.
# Returns an action which executes the `fallback_policy` once the queue
# size reaches `max_queue`. The reason for the indirection of an action
# is so that the work can be deferred outside of synchronization.
#
# @param [Array] args the arguments to the task which is being handled.
#
# @!visibility private
def handle_fallback(*args)
def fallback_action(*args)
case fallback_policy
when :abort
raise RejectedExecutionError
lambda { raise RejectedExecutionError }
when :discard
false
lambda { false }
when :caller_runs
begin
yield(*args)
rescue => ex
# let it fail
log DEBUG, ex
end
true
lambda {
begin
yield(*args)
rescue => ex
# let it fail
log DEBUG, ex
end
true
}
else
fail "Unknown fallback policy #{fallback_policy}"
lambda { fail "Unknown fallback policy #{fallback_policy}" }
end
end

View File

@ -71,9 +71,16 @@ module Concurrent
# @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting
# new tasks. A value of -1 indicates that the queue may grow without bound.
# @!macro thread_pool_executor_method_prune_pool
# Prune the thread pool of unneeded threads
#
# What is being pruned is controlled by the min_threads and idletime
# parameters passed at pool creation time
#
# This is a no-op on some pool implementation (e.g. the Java one). The Ruby
# pool will auto-prune each time a new job is posted. You will need to call
# this method explicitely in case your application post jobs in bursts (a
# lot of jobs and then nothing for long periods)
# @!macro thread_pool_executor_public_api
#
@ -111,6 +118,9 @@ module Concurrent
#
# @!method can_overflow?
# @!macro executor_service_method_can_overflow_question
#
# @!method prune_pool
# @!macro thread_pool_executor_method_prune_pool

View File

@ -20,7 +20,7 @@ if Concurrent.on_jruby?
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
return handle_fallback(*args, &task) unless running?
return fallback_action(*args, &task).call unless running?
@executor.submit Job.new(args, task)
true
rescue Java::JavaUtilConcurrent::RejectedExecutionException

View File

@ -93,6 +93,10 @@ if Concurrent.on_jruby?
super && !@executor.isTerminating
end
# @!macro thread_pool_executor_method_prune_pool
def prune_pool
end
private
def ns_initialize(opts)

View File

@ -16,10 +16,16 @@ module Concurrent
def post(*args, &task)
raise ArgumentError.new('no block given') unless block_given?
synchronize do
# If the executor is shut down, reject this task
return handle_fallback(*args, &task) unless running?
ns_execute(*args, &task)
deferred_action = synchronize {
if running?
ns_execute(*args, &task)
else
fallback_action(*args, &task)
end
}
if deferred_action
deferred_action.call
else
true
end
end

View File

@ -93,13 +93,8 @@ module Concurrent
end
# @!visibility private
def ready_worker(worker)
synchronize { ns_ready_worker worker }
end
# @!visibility private
def worker_not_old_enough(worker)
synchronize { ns_worker_not_old_enough worker }
def ready_worker(worker, last_message)
synchronize { ns_ready_worker worker, last_message }
end
# @!visibility private
@ -112,6 +107,11 @@ module Concurrent
synchronize { @completed_task_count += 1 }
end
# @!macro thread_pool_executor_method_prune_pool
def prune_pool
synchronize { ns_prune_pool }
end
private
# @!visibility private
@ -156,10 +156,11 @@ module Concurrent
if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task)
@scheduled_task_count += 1
else
handle_fallback(*args, &task)
return fallback_action(*args, &task)
end
ns_prune_pool if @next_gc_time < Concurrent.monotonic_time
nil
end
# @!visibility private
@ -192,7 +193,7 @@ module Concurrent
# @!visibility private
def ns_assign_worker(*args, &task)
# keep growing if the pool is not at the minimum yet
worker = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker
worker, _ = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker
if worker
worker << [task, args]
true
@ -223,7 +224,7 @@ module Concurrent
def ns_worker_died(worker)
ns_remove_busy_worker worker
replacement_worker = ns_add_busy_worker
ns_ready_worker replacement_worker, false if replacement_worker
ns_ready_worker replacement_worker, Concurrent.monotonic_time, false if replacement_worker
end
# creates new worker which has to receive work to do after it's added
@ -242,29 +243,21 @@ module Concurrent
# handle ready worker, giving it new job or assigning back to @ready
#
# @!visibility private
def ns_ready_worker(worker, success = true)
def ns_ready_worker(worker, last_message, success = true)
task_and_args = @queue.shift
if task_and_args
worker << task_and_args
else
# stop workers when !running?, do not return them to @ready
if running?
@ready.push(worker)
raise unless last_message
@ready.push([worker, last_message])
else
worker.stop
end
end
end
# returns back worker to @ready which was not idle for enough time
#
# @!visibility private
def ns_worker_not_old_enough(worker)
# let's put workers coming from idle_test back to the start (as the oldest worker)
@ready.unshift(worker)
true
end
# removes a worker which is not in not tracked in @ready
#
# @!visibility private
@ -278,10 +271,17 @@ module Concurrent
#
# @!visibility private
def ns_prune_pool
return if @pool.size <= @min_length
last_used = @ready.shift
last_used << :idle_test if last_used
now = Concurrent.monotonic_time
stopped_workers = 0
while !@ready.empty? && (@pool.size - stopped_workers > @min_length)
worker, last_message = @ready.first
if now - last_message > self.idletime
stopped_workers += 1
@ready.shift
worker << :stop
else break
end
end
@next_gc_time = Concurrent.monotonic_time + @gc_interval
end
@ -330,19 +330,10 @@ module Concurrent
def create_worker(queue, pool, idletime)
Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime|
last_message = Concurrent.monotonic_time
catch(:stop) do
loop do
case message = my_queue.pop
when :idle_test
if (Concurrent.monotonic_time - last_message) > my_idletime
my_pool.remove_busy_worker(self)
throw :stop
else
my_pool.worker_not_old_enough(self)
end
when :stop
my_pool.remove_busy_worker(self)
throw :stop
@ -350,9 +341,7 @@ module Concurrent
else
task, args = message
run_task my_pool, task, args
last_message = Concurrent.monotonic_time
my_pool.ready_worker(self)
my_pool.ready_worker(self, Concurrent.monotonic_time)
end
end
end

View File

@ -16,10 +16,10 @@ module Concurrent
# @return [Array]
def execute(*args)
synchronize do
success = false
value = reason = nil
success = true
value = reason = nil
synchronize do
begin
value = @task.call(*args)
success = true
@ -27,9 +27,9 @@ module Concurrent
reason = ex
success = false
end
[success, value, reason]
end
[success, value, reason]
end
end
end

View File

@ -281,7 +281,6 @@ module Concurrent
each_pair { |k, v| return k if v == value }
nil
end unless method_defined?(:key)
alias_method :index, :key if RUBY_VERSION < '1.9'
# Is map empty?
# @return [true, false]

View File

@ -58,29 +58,42 @@ module Concurrent
# @example Basic usage
#
# require 'concurrent'
# require 'thread' # for Queue
# require 'open-uri' # for open(uri)
# require 'csv'
# require 'open-uri'
#
# class Ticker
# def get_year_end_closing(symbol, year)
# uri = "http://ichart.finance.yahoo.com/table.csv?s=#{symbol}&a=11&b=01&c=#{year}&d=11&e=31&f=#{year}&g=m"
# data = open(uri) {|f| f.collect{|line| line.strip } }
# data[1].split(',')[4].to_f
# end
# def get_year_end_closing(symbol, year, api_key)
# uri = "https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY&symbol=#{symbol}&apikey=#{api_key}&datatype=csv"
# data = []
# csv = URI.parse(uri).read
# if csv.include?('call frequency')
# return :rate_limit_exceeded
# end
# CSV.parse(csv, headers: true) do |row|
# data << row['close'].to_f if row['timestamp'].include?(year.to_s)
# end
# year_end = data.first
# year_end
# rescue => e
# p e
# end
# end
#
# # Future
# price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013) }
# price.state #=> :pending
# sleep(1) # do other stuff
# price.value #=> 63.65
# price.state #=> :fulfilled
# api_key = ENV['ALPHAVANTAGE_KEY']
# abort(error_message) unless api_key
#
# # ScheduledTask
# task = Concurrent::ScheduledTask.execute(2){ Ticker.new.get_year_end_closing('INTC', 2013) }
# task.state #=> :pending
# sleep(3) # do other stuff
# task.value #=> 25.96
# # Future
# price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013, api_key) }
# price.state #=> :pending
# price.pending? #=> true
# price.value(0) #=> nil (does not block)
#
# sleep(1) # do other stuff
#
# price.value #=> 63.65 (after blocking if necessary)
# price.state #=> :fulfilled
# price.fulfilled? #=> true
# price.value #=> 63.65
#
# @example Successful task execution
#

View File

@ -4,9 +4,7 @@ module Concurrent
# @!visibility private
# @!macro internal_implementation_note
LockableObjectImplementation = case
when Concurrent.on_cruby? && Concurrent.ruby_version(:<=, 1, 9, 3)
MonitorLockableObject
when Concurrent.on_cruby? && Concurrent.ruby_version(:>, 1, 9, 3)
when Concurrent.on_cruby?
MutexLockableObject
when Concurrent.on_jruby?
JRubyLockableObject

Some files were not shown because too many files have changed in this diff Show More