From 9347c0272ae181b747593bc3e8df5b122c6c41fb Mon Sep 17 00:00:00 2001 From: Mike McQuaid Date: Wed, 5 Dec 2018 16:32:24 +0000 Subject: [PATCH] Vendor concurrent-ruby 1.1.3. --- .../vendor/bundle-standalone/bundler/setup.rb | 4 +- .../lib/concurrent/map.rb | 240 -- .../truffle_lockable_object.rb | 9 - .../synchronization/truffle_object.rb | 31 - .../utility/native_extension_loader.rb | 73 - .../lib/concurrent-ruby.rb | 1 + .../concurrent-ruby-1.1.3/lib/concurrent.rb | 134 ++ .../lib/concurrent/agent.rb | 587 +++++ .../lib/concurrent/array.rb | 66 + .../lib/concurrent/async.rb | 445 ++++ .../lib/concurrent/atom.rb | 222 ++ .../atomic/abstract_thread_local_var.rb | 66 + .../lib/concurrent/atomic/atomic_boolean.rb | 126 ++ .../lib/concurrent/atomic/atomic_fixnum.rb | 143 ++ .../atomic/atomic_markable_reference.rb | 164 ++ .../lib/concurrent/atomic/atomic_reference.rb | 204 ++ .../lib/concurrent/atomic/count_down_latch.rb | 100 + .../lib/concurrent/atomic/cyclic_barrier.rb | 128 ++ .../lib/concurrent/atomic/event.rb | 109 + .../atomic/java_count_down_latch.rb | 42 + .../atomic/java_thread_local_var.rb | 37 + .../concurrent/atomic/mutex_atomic_boolean.rb | 62 + .../concurrent/atomic/mutex_atomic_fixnum.rb | 75 + .../atomic/mutex_count_down_latch.rb | 44 + .../lib/concurrent/atomic/mutex_semaphore.rb | 115 + .../lib/concurrent/atomic/read_write_lock.rb | 254 +++ .../atomic/reentrant_read_write_lock.rb | 379 ++++ .../atomic/ruby_thread_local_var.rb | 161 ++ .../lib/concurrent/atomic/semaphore.rb | 145 ++ .../lib/concurrent/atomic/thread_local_var.rb | 104 + .../atomic_reference/mutex_atomic.rb | 56 + .../atomic_reference/numeric_cas_wrapper.rb | 28 + .../lib/concurrent/atomics.rb | 10 + .../collection/copy_on_notify_observer_set.rb | 107 + .../collection/copy_on_write_observer_set.rb | 111 + .../java_non_concurrent_priority_queue.rb | 84 + .../concurrent/collection/lock_free_stack.rb | 158 ++ .../map/atomic_reference_map_backend.rb | 927 ++++++++ .../collection/map/mri_map_backend.rb | 0 .../map/non_concurrent_map_backend.rb | 3 +- .../map/synchronized_map_backend.rb | 82 + .../non_concurrent_priority_queue.rb | 143 ++ .../ruby_non_concurrent_priority_queue.rb | 150 ++ .../lib/concurrent/concern/deprecation.rb | 34 + .../lib/concurrent/concern/dereferenceable.rb | 73 + .../lib/concurrent/concern/logging.rb | 32 + .../lib/concurrent/concern/obligation.rb | 220 ++ .../lib/concurrent/concern/observable.rb | 110 + .../lib/concurrent/concurrent_ruby.jar | Bin 0 -> 137023 bytes .../lib/concurrent/configuration.rb | 184 ++ .../lib/concurrent/constants.rb | 2 +- .../lib/concurrent/dataflow.rb | 81 + .../lib/concurrent/delay.rb | 199 ++ .../lib/concurrent/errors.rb | 69 + .../lib/concurrent/exchanger.rb | 352 +++ .../executor/abstract_executor_service.rb | 134 ++ .../concurrent/executor/cached_thread_pool.rb | 62 + .../concurrent/executor/executor_service.rb | 185 ++ .../concurrent/executor/fixed_thread_pool.rb | 206 ++ .../concurrent/executor/immediate_executor.rb | 66 + .../executor/indirect_immediate_executor.rb | 44 + .../executor/java_executor_service.rb | 100 + .../executor/java_single_thread_executor.rb | 29 + .../executor/java_thread_pool_executor.rb | 123 ++ .../executor/ruby_executor_service.rb | 78 + .../executor/ruby_single_thread_executor.rb | 22 + .../executor/ruby_thread_pool_executor.rb | 362 +++ .../concurrent/executor/safe_task_executor.rb | 35 + .../executor/serial_executor_service.rb | 34 + .../executor/serialized_execution.rb | 107 + .../serialized_execution_delegator.rb | 28 + .../executor/simple_executor_service.rb | 100 + .../executor/single_thread_executor.rb | 56 + .../executor/thread_pool_executor.rb | 87 + .../lib/concurrent/executor/timer_set.rb | 175 ++ .../lib/concurrent/executors.rb | 20 + .../lib/concurrent/future.rb | 141 ++ .../lib/concurrent/hash.rb | 59 + .../lib/concurrent/immutable_struct.rb | 93 + .../lib/concurrent/ivar.rb | 207 ++ .../lib/concurrent/map.rb | 337 +++ .../lib/concurrent/maybe.rb | 229 ++ .../lib/concurrent/mutable_struct.rb | 229 ++ .../lib/concurrent/mvar.rb | 242 +++ .../lib/concurrent/options.rb | 42 + .../lib/concurrent/promise.rb | 579 +++++ .../lib/concurrent/promises.rb | 1936 +++++++++++++++++ .../lib/concurrent/re_include.rb | 58 + .../lib/concurrent/scheduled_task.rb | 318 +++ .../lib/concurrent/set.rb | 66 + .../lib/concurrent/settable_struct.rb | 129 ++ .../lib/concurrent/synchronization.rb | 9 +- .../abstract_lockable_object.rb | 10 +- .../synchronization/abstract_object.rb | 0 .../synchronization/abstract_struct.rb | 159 ++ .../concurrent/synchronization/condition.rb | 0 .../synchronization/jruby_lockable_object.rb | 0 .../synchronization/jruby_object.rb | 0 .../lib/concurrent/synchronization/lock.rb | 0 .../synchronization/lockable_object.rb | 12 +- .../concurrent/synchronization/mri_object.rb | 0 .../synchronization/mutex_lockable_object.rb} | 33 +- .../lib/concurrent/synchronization/object.rb | 12 +- .../synchronization/rbx_lockable_object.rb | 0 .../concurrent/synchronization/rbx_object.rb | 0 .../synchronization/truffleruby_object.rb | 46 + .../concurrent/synchronization/volatile.rb | 20 +- .../thread_safe/synchronized_delegator.rb | 50 + .../lib/concurrent/thread_safe/util.rb | 16 + .../lib/concurrent/thread_safe/util/adder.rb | 74 + .../thread_safe/util/cheap_lockable.rb | 118 + .../thread_safe/util/data_structures.rb | 63 + .../thread_safe/util/power_of_two_tuple.rb | 38 + .../concurrent/thread_safe/util/striped64.rb | 246 +++ .../concurrent/thread_safe/util/volatile.rb | 75 + .../thread_safe/util/xor_shift_random.rb | 50 + .../lib/concurrent/timer_task.rb | 334 +++ .../lib/concurrent/tuple.rb | 86 + .../lib/concurrent/tvar.rb | 258 +++ .../lib/concurrent/utility/at_exit.rb | 97 + .../lib/concurrent/utility/engine.rb | 8 +- .../lib/concurrent/utility/monotonic_time.rb | 58 + .../utility/native_extension_loader.rb | 79 + .../lib/concurrent/utility/native_integer.rb | 53 + .../concurrent/utility/processor_counter.rb | 158 ++ .../lib/concurrent/version.rb | 4 + 126 files changed, 15964 insertions(+), 405 deletions(-) delete mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/map.rb delete mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/truffle_lockable_object.rb delete mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/truffle_object.rb delete mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/utility/native_extension_loader.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent-ruby.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/agent.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/array.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/async.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atom.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/abstract_thread_local_var.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_boolean.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_fixnum.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_markable_reference.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_reference.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/count_down_latch.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/cyclic_barrier.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/event.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/java_count_down_latch.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/java_thread_local_var.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_atomic_boolean.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_atomic_fixnum.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_count_down_latch.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_semaphore.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/read_write_lock.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/reentrant_read_write_lock.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/ruby_thread_local_var.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/semaphore.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/thread_local_var.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic_reference/mutex_atomic.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomics.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/copy_on_notify_observer_set.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/copy_on_write_observer_set.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/java_non_concurrent_priority_queue.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/lock_free_stack.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/atomic_reference_map_backend.rb rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/collection/map/mri_map_backend.rb (100%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/collection/map/non_concurrent_map_backend.rb (97%) create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/synchronized_map_backend.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/non_concurrent_priority_queue.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/deprecation.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/dereferenceable.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/logging.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/obligation.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/observable.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concurrent_ruby.jar create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/configuration.rb rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/constants.rb (89%) create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/dataflow.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/delay.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/errors.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/exchanger.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/abstract_executor_service.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/cached_thread_pool.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/executor_service.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/fixed_thread_pool.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/immediate_executor.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/indirect_immediate_executor.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_executor_service.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_single_thread_executor.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_thread_pool_executor.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_executor_service.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_single_thread_executor.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_thread_pool_executor.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/safe_task_executor.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serial_executor_service.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serialized_execution.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serialized_execution_delegator.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/simple_executor_service.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/single_thread_executor.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/thread_pool_executor.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/timer_set.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executors.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/future.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/hash.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/immutable_struct.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/ivar.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/map.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/maybe.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/mutable_struct.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/mvar.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/options.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/promise.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/promises.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/re_include.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/scheduled_task.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/set.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/settable_struct.rb rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization.rb (75%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/abstract_lockable_object.rb (89%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/abstract_object.rb (100%) create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/abstract_struct.rb rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/condition.rb (100%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/jruby_lockable_object.rb (100%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/jruby_object.rb (100%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/lock.rb (100%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/lockable_object.rb (87%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/mri_object.rb (100%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5/lib/concurrent/synchronization/mri_lockable_object.rb => concurrent-ruby-1.1.3/lib/concurrent/synchronization/mutex_lockable_object.rb} (56%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/object.rb (93%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/rbx_lockable_object.rb (100%) rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/rbx_object.rb (100%) create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/truffleruby_object.rb rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/synchronization/volatile.rb (53%) create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/synchronized_delegator.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/adder.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/cheap_lockable.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/data_structures.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/power_of_two_tuple.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/striped64.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/volatile.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/xor_shift_random.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/timer_task.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/tuple.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/tvar.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/at_exit.rb rename Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/{concurrent-ruby-1.0.5 => concurrent-ruby-1.1.3}/lib/concurrent/utility/engine.rb (84%) create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/monotonic_time.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/native_extension_loader.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/native_integer.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/processor_counter.rb create mode 100644 Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/version.rb diff --git a/Library/Homebrew/vendor/bundle-standalone/bundler/setup.rb b/Library/Homebrew/vendor/bundle-standalone/bundler/setup.rb index 7d69ff23d5..7aee97e471 100644 --- a/Library/Homebrew/vendor/bundle-standalone/bundler/setup.rb +++ b/Library/Homebrew/vendor/bundle-standalone/bundler/setup.rb @@ -3,7 +3,7 @@ require 'rbconfig' ruby_engine = defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby' ruby_version = RbConfig::CONFIG["ruby_version"] path = File.expand_path('..', __FILE__) -$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.0.5/lib" +$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.3/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/i18n-1.1.1/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/minitest-5.11.3/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/thread_safe-0.3.6/lib" @@ -21,6 +21,6 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/powerpack-0.1.2/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rainbow-3.0.0/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-progressbar-1.10.0/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/unicode-display_width-1.4.0/lib" -$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-0.60.0/lib" +$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-0.61.0/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-rspec-1.30.1/lib" $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-macho-2.1.0/lib" diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/map.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/map.rb deleted file mode 100644 index 814deb0085..0000000000 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/map.rb +++ /dev/null @@ -1,240 +0,0 @@ -require 'thread' -require 'concurrent/constants' -require 'concurrent/synchronization' - -module Concurrent - # @!visibility private - module Collection - - # @!visibility private - MapImplementation = if Concurrent.java_extensions_loaded? - # noinspection RubyResolve - JRubyMapBackend - elsif defined?(RUBY_ENGINE) - case RUBY_ENGINE - when 'ruby' - require 'concurrent/collection/map/mri_map_backend' - MriMapBackend - when 'rbx' - require 'concurrent/collection/map/atomic_reference_map_backend' - AtomicReferenceMapBackend - when 'jruby+truffle' - require 'concurrent/collection/map/atomic_reference_map_backend' - AtomicReferenceMapBackend - else - warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation' if $VERBOSE - require 'concurrent/collection/map/synchronized_map_backend' - SynchronizedMapBackend - end - else - MriMapBackend - end - end - - # `Concurrent::Map` is a hash-like object and should have much better performance - # characteristics, especially under high concurrency, than `Concurrent::Hash`. - # However, `Concurrent::Map `is not strictly semantically equivalent to a ruby `Hash` - # -- for instance, it does not necessarily retain ordering by insertion time as `Hash` - # does. For most uses it should do fine though, and we recommend you consider - # `Concurrent::Map` instead of `Concurrent::Hash` for your concurrency-safe hash needs. - # - # > require 'concurrent' - # > - # > map = Concurrent::Map.new - class Map < Collection::MapImplementation - - # @!macro [new] map_method_is_atomic - # This method is atomic. Atomic methods of `Map` which accept a block - # do not allow the `self` instance to be used within the block. Doing - # so will cause a deadlock. - - # @!method put_if_absent - # @!macro map_method_is_atomic - - # @!method compute_if_absent - # @!macro map_method_is_atomic - - # @!method compute_if_present - # @!macro map_method_is_atomic - - # @!method compute - # @!macro map_method_is_atomic - - # @!method merge_pair - # @!macro map_method_is_atomic - - # @!method replace_pair - # @!macro map_method_is_atomic - - # @!method replace_if_exists - # @!macro map_method_is_atomic - - # @!method get_and_set - # @!macro map_method_is_atomic - - # @!method delete - # @!macro map_method_is_atomic - - # @!method delete_pair - # @!macro map_method_is_atomic - - def initialize(options = nil, &block) - if options.kind_of?(::Hash) - validate_options_hash!(options) - else - options = nil - end - - super(options) - @default_proc = block - end - - def [](key) - if value = super # non-falsy value is an existing mapping, return it right away - value - # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call - # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value - # would be returned) - # note: nil == value check is not technically necessary - elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) - @default_proc.call(self, key) - else - value - end - end - - alias_method :get, :[] - alias_method :put, :[]= - - # @!macro [attach] map_method_not_atomic - # The "fetch-then-act" methods of `Map` are not atomic. `Map` is intended - # to be use as a concurrency primitive with strong happens-before - # guarantees. It is not intended to be used as a high-level abstraction - # supporting complex operations. All read and write operations are - # thread safe, but no guarantees are made regarding race conditions - # between the fetch operation and yielding to the block. Additionally, - # this method does not support recursion. This is due to internal - # constraints that are very unlikely to change in the near future. - def fetch(key, default_value = NULL) - if NULL != (value = get_or_default(key, NULL)) - value - elsif block_given? - yield key - elsif NULL != default_value - default_value - else - raise_fetch_no_key - end - end - - # @!macro map_method_not_atomic - def fetch_or_store(key, default_value = NULL) - fetch(key) do - put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) - end - end - - # @!macro map_method_is_atomic - def put_if_absent(key, value) - computed = false - result = compute_if_absent(key) do - computed = true - value - end - computed ? nil : result - end unless method_defined?(:put_if_absent) - - def value?(value) - each_value do |v| - return true if value.equal?(v) - end - false - end - - def keys - arr = [] - each_pair {|k, v| arr << k} - arr - end unless method_defined?(:keys) - - def values - arr = [] - each_pair {|k, v| arr << v} - arr - end unless method_defined?(:values) - - def each_key - each_pair {|k, v| yield k} - end unless method_defined?(:each_key) - - def each_value - each_pair {|k, v| yield v} - end unless method_defined?(:each_value) - - alias_method :each, :each_pair unless method_defined?(:each) - - def key(value) - each_pair {|k, v| return k if v == value} - nil - end unless method_defined?(:key) - alias_method :index, :key if RUBY_VERSION < '1.9' - - def empty? - each_pair {|k, v| return false} - true - end unless method_defined?(:empty?) - - def size - count = 0 - each_pair {|k, v| count += 1} - count - end unless method_defined?(:size) - - def marshal_dump - raise TypeError, "can't dump hash with default proc" if @default_proc - h = {} - each_pair {|k, v| h[k] = v} - h - end - - def marshal_load(hash) - initialize - populate_from(hash) - end - - undef :freeze - - # @!visibility private - DEFAULT_OBJ_ID_STR_WIDTH = 0.size == 4 ? 7 : 14 # we want to look "native", 7 for 32-bit, 14 for 64-bit - # override default #inspect() method: firstly, we don't want to be spilling our guts (i-vars), secondly, MRI backend's - # #inspect() call on its @backend i-var will bump @backend's iter level while possibly yielding GVL - def inspect - id_str = (object_id << 1).to_s(16).rjust(DEFAULT_OBJ_ID_STR_WIDTH, '0') - "#<#{self.class.name}:0x#{id_str} entries=#{size} default_proc=#{@default_proc.inspect}>" - end - - private - def raise_fetch_no_key - raise KeyError, 'key not found' - end - - def initialize_copy(other) - super - populate_from(other) - end - - def populate_from(hash) - hash.each_pair {|k, v| self[k] = v} - self - end - - def validate_options_hash!(options) - if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0) - raise ArgumentError, ":initial_capacity must be a positive Integer" - end - if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) - raise ArgumentError, ":load_factor must be a number between 0 and 1" - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/truffle_lockable_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/truffle_lockable_object.rb deleted file mode 100644 index c9328f21f8..0000000000 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/truffle_lockable_object.rb +++ /dev/null @@ -1,9 +0,0 @@ -module Concurrent - module Synchronization - class TruffleLockableObject < AbstractLockableObject - def new(*) - raise NotImplementedError - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/truffle_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/truffle_object.rb deleted file mode 100644 index 9b1c3fc76e..0000000000 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/truffle_object.rb +++ /dev/null @@ -1,31 +0,0 @@ -module Concurrent - module Synchronization - - module TruffleAttrVolatile - def self.included(base) - base.extend(ClassMethods) - end - - module ClassMethods - def attr_volatile(*names) - # TODO may not always be available - attr_atomic(*names) - end - end - - def full_memory_barrier - Truffle::System.full_memory_barrier - end - end - - # @!visibility private - # @!macro internal_implementation_note - class TruffleObject < AbstractObject - include TruffleAttrVolatile - - def initialize - # nothing to do - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/utility/native_extension_loader.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/utility/native_extension_loader.rb deleted file mode 100644 index f2fcd8c5a9..0000000000 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/utility/native_extension_loader.rb +++ /dev/null @@ -1,73 +0,0 @@ -require 'concurrent/utility/engine' - -module Concurrent - - module Utility - - # @!visibility private - module NativeExtensionLoader - - def allow_c_extensions? - Concurrent.on_cruby? - end - - def c_extensions_loaded? - @c_extensions_loaded ||= false - end - - def java_extensions_loaded? - @java_extensions_loaded ||= false - end - - def set_c_extensions_loaded - @c_extensions_loaded = true - end - - def set_java_extensions_loaded - @java_extensions_loaded = true - end - - def load_native_extensions - unless defined? Synchronization::AbstractObject - raise 'native_extension_loader loaded before Synchronization::AbstractObject' - end - - if Concurrent.on_cruby? && !c_extensions_loaded? - tries = [ - lambda do - require 'concurrent/extension' - set_c_extensions_loaded - end, - lambda do - # may be a Windows cross-compiled native gem - require "concurrent/#{RUBY_VERSION[0..2]}/extension" - set_c_extensions_loaded - end] - - tries.each do |try| - begin - try.call - break - rescue LoadError - next - end - end - end - - if Concurrent.on_jruby? && !java_extensions_loaded? - begin - require 'concurrent_ruby_ext' - set_java_extensions_loaded - rescue LoadError - # move on with pure-Ruby implementations - raise 'On JRuby but Java extensions failed to load.' - end - end - end - end - end - - # @!visibility private - extend Utility::NativeExtensionLoader -end - diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent-ruby.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent-ruby.rb new file mode 100644 index 0000000000..08917e3bb7 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent-ruby.rb @@ -0,0 +1 @@ +require_relative "./concurrent" diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent.rb new file mode 100644 index 0000000000..87de46f1b8 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent.rb @@ -0,0 +1,134 @@ +require 'concurrent/version' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/configuration' + +require 'concurrent/atomics' +require 'concurrent/executors' +require 'concurrent/synchronization' + +require 'concurrent/atomic/atomic_markable_reference' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/agent' +require 'concurrent/atom' +require 'concurrent/array' +require 'concurrent/hash' +require 'concurrent/set' +require 'concurrent/map' +require 'concurrent/tuple' +require 'concurrent/async' +require 'concurrent/dataflow' +require 'concurrent/delay' +require 'concurrent/exchanger' +require 'concurrent/future' +require 'concurrent/immutable_struct' +require 'concurrent/ivar' +require 'concurrent/maybe' +require 'concurrent/mutable_struct' +require 'concurrent/mvar' +require 'concurrent/promise' +require 'concurrent/scheduled_task' +require 'concurrent/settable_struct' +require 'concurrent/timer_task' +require 'concurrent/tvar' +require 'concurrent/promises' + +require 'concurrent/thread_safe/synchronized_delegator' +require 'concurrent/thread_safe/util' + +require 'concurrent/options' + +# @!macro internal_implementation_note +# +# @note **Private Implementation:** This abstraction is a private, internal +# implementation detail. It should never be used directly. + +# @!macro monotonic_clock_warning +# +# @note Time calculations on all platforms and languages are sensitive to +# changes to the system clock. To alleviate the potential problems +# associated with changing the system clock while an application is running, +# most modern operating systems provide a monotonic clock that operates +# independently of the system clock. A monotonic clock cannot be used to +# determine human-friendly clock times. A monotonic clock is used exclusively +# for calculating time intervals. Not all Ruby platforms provide access to an +# operating system monotonic clock. On these platforms a pure-Ruby monotonic +# clock will be used as a fallback. An operating system monotonic clock is both +# faster and more reliable than the pure-Ruby implementation. The pure-Ruby +# implementation should be fast and reliable enough for most non-realtime +# operations. At this time the common Ruby platforms that provide access to an +# operating system monotonic clock are MRI 2.1 and above and JRuby (all versions). +# +# @see http://linux.die.net/man/3/clock_gettime Linux clock_gettime(3) + +# @!macro copy_options +# +# ## Copy Options +# +# Object references in Ruby are mutable. This can lead to serious +# problems when the {#value} of an object is a mutable reference. Which +# is always the case unless the value is a `Fixnum`, `Symbol`, or similar +# "primitive" data type. Each instance can be configured with a few +# options that can help protect the program from potentially dangerous +# operations. Each of these options can be optionally set when the object +# instance is created: +# +# * `:dup_on_deref` When true the object will call the `#dup` method on +# the `value` object every time the `#value` method is called +# (default: false) +# * `:freeze_on_deref` When true the object will call the `#freeze` +# method on the `value` object every time the `#value` method is called +# (default: false) +# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run +# every time the `#value` method is called. The `Proc` will be given +# the current `value` as its only argument and the result returned by +# the block will be the return value of the `#value` call. When `nil` +# this option will be ignored (default: nil) +# +# When multiple deref options are set the order of operations is strictly defined. +# The order of deref operations is: +# * `:copy_on_deref` +# * `:dup_on_deref` +# * `:freeze_on_deref` +# +# Because of this ordering there is no need to `#freeze` an object created by a +# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`. +# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is +# as close to the behavior of a "pure" functional language (like Erlang, Clojure, +# or Haskell) as we are likely to get in Ruby. + +# @!macro deref_options +# +# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before +# returning the data from {#value} +# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before +# returning the data from {#value} +# @option opts [Proc] :copy_on_deref (nil) When calling the {#value} +# method, call the given proc passing the internal value as the sole +# argument then return the new value returned from the proc. + +# @!macro executor_and_deref_options +# +# @param [Hash] opts the options used to define the behavior at update and deref +# and to specify the executor on which to perform actions +# @option opts [Executor] :executor when set use the given `Executor` instance. +# Three special values are also supported: `:io` returns the global pool for +# long, blocking (IO) tasks, `:fast` returns the global pool for short, fast +# operations, and `:immediate` returns the global `ImmediateExecutor` object. +# @!macro deref_options + +# @!macro warn.edge +# @api Edge +# @note **Edge Features** are under active development and may change frequently. +# +# - Deprecations are not added before incompatible changes. +# - Edge version: _major_ is always 0, _minor_ bump means incompatible change, +# _patch_ bump means compatible change. +# - Edge features may also lack tests and documentation. +# - Features developed in `concurrent-ruby-edge` are expected to move +# to `concurrent-ruby` when finalised. + + +# {include:file:README.md} +module Concurrent +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/agent.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/agent.rb new file mode 100644 index 0000000000..815dca008c --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/agent.rb @@ -0,0 +1,587 @@ +require 'concurrent/configuration' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/thread_local_var' +require 'concurrent/collection/copy_on_write_observer_set' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +module Concurrent + + # `Agent` is inspired by Clojure's [agent](http://clojure.org/agents) + # function. An agent is a shared, mutable variable providing independent, + # uncoordinated, *asynchronous* change of individual values. Best used when + # the value will undergo frequent, complex updates. Suitable when the result + # of an update does not need to be known immediately. `Agent` is (mostly) + # functionally equivalent to Clojure's agent, except where the runtime + # prevents parity. + # + # Agents are reactive, not autonomous - there is no imperative message loop + # and no blocking receive. The state of an Agent should be itself immutable + # and the `#value` of an Agent is always immediately available for reading by + # any thread without any messages, i.e. observation does not require + # cooperation or coordination. + # + # Agent action dispatches are made using the various `#send` methods. These + # methods always return immediately. At some point later, in another thread, + # the following will happen: + # + # 1. The given `action` will be applied to the state of the Agent and the + # `args`, if any were supplied. + # 2. The return value of `action` will be passed to the validator lambda, + # if one has been set on the Agent. + # 3. If the validator succeeds or if no validator was given, the return value + # of the given `action` will become the new `#value` of the Agent. See + # `#initialize` for details. + # 4. If any observers were added to the Agent, they will be notified. See + # `#add_observer` for details. + # 5. If during the `action` execution any other dispatches are made (directly + # or indirectly), they will be held until after the `#value` of the Agent + # has been changed. + # + # If any exceptions are thrown by an action function, no nested dispatches + # will occur, and the exception will be cached in the Agent itself. When an + # Agent has errors cached, any subsequent interactions will immediately throw + # an exception, until the agent's errors are cleared. Agent errors can be + # examined with `#error` and the agent restarted with `#restart`. + # + # The actions of all Agents get interleaved amongst threads in a thread pool. + # At any point in time, at most one action for each Agent is being executed. + # Actions dispatched to an agent from another single agent or thread will + # occur in the order they were sent, potentially interleaved with actions + # dispatched to the same agent from other sources. The `#send` method should + # be used for actions that are CPU limited, while the `#send_off` method is + # appropriate for actions that may block on IO. + # + # Unlike in Clojure, `Agent` cannot participate in `Concurrent::TVar` transactions. + # + # ## Example + # + # ``` + # def next_fibonacci(set = nil) + # return [0, 1] if set.nil? + # set + [set[-2..-1].reduce{|sum,x| sum + x }] + # end + # + # # create an agent with an initial value + # agent = Concurrent::Agent.new(next_fibonacci) + # + # # send a few update requests + # 5.times do + # agent.send{|set| next_fibonacci(set) } + # end + # + # # wait for them to complete + # agent.await + # + # # get the current value + # agent.value #=> [0, 1, 1, 2, 3, 5, 8] + # ``` + # + # ## Observation + # + # Agents support observers through the {Concurrent::Observable} mixin module. + # Notification of observers occurs every time an action dispatch returns and + # the new value is successfully validated. Observation will *not* occur if the + # action raises an exception, if validation fails, or when a {#restart} occurs. + # + # When notified the observer will receive three arguments: `time`, `old_value`, + # and `new_value`. The `time` argument is the time at which the value change + # occurred. The `old_value` is the value of the Agent when the action began + # processing. The `new_value` is the value to which the Agent was set when the + # action completed. Note that `old_value` and `new_value` may be the same. + # This is not an error. It simply means that the action returned the same + # value. + # + # ## Nested Actions + # + # It is possible for an Agent action to post further actions back to itself. + # The nested actions will be enqueued normally then processed *after* the + # outer action completes, in the order they were sent, possibly interleaved + # with action dispatches from other threads. Nested actions never deadlock + # with one another and a failure in a nested action will never affect the + # outer action. + # + # Nested actions can be called using the Agent reference from the enclosing + # scope or by passing the reference in as a "send" argument. Nested actions + # cannot be post using `self` from within the action block/proc/lambda; `self` + # in this context will not reference the Agent. The preferred method for + # dispatching nested actions is to pass the Agent as an argument. This allows + # Ruby to more effectively manage the closing scope. + # + # Prefer this: + # + # ``` + # agent = Concurrent::Agent.new(0) + # agent.send(agent) do |value, this| + # this.send {|v| v + 42 } + # 3.14 + # end + # agent.value #=> 45.14 + # ``` + # + # Over this: + # + # ``` + # agent = Concurrent::Agent.new(0) + # agent.send do |value| + # agent.send {|v| v + 42 } + # 3.14 + # end + # ``` + # + # @!macro agent_await_warning + # + # **NOTE** Never, *under any circumstances*, call any of the "await" methods + # ({#await}, {#await_for}, {#await_for!}, and {#wait}) from within an action + # block/proc/lambda. The call will block the Agent and will always fail. + # Calling either {#await} or {#wait} (with a timeout of `nil`) will + # hopelessly deadlock the Agent with no possibility of recovery. + # + # @!macro thread_safe_variable_comparison + # + # @see http://clojure.org/Agents Clojure Agents + # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State + class Agent < Synchronization::LockableObject + include Concern::Observable + + ERROR_MODES = [:continue, :fail].freeze + private_constant :ERROR_MODES + + AWAIT_FLAG = ::Object.new + private_constant :AWAIT_FLAG + + AWAIT_ACTION = ->(value, latch) { latch.count_down; AWAIT_FLAG } + private_constant :AWAIT_ACTION + + DEFAULT_ERROR_HANDLER = ->(agent, error) { nil } + private_constant :DEFAULT_ERROR_HANDLER + + DEFAULT_VALIDATOR = ->(value) { true } + private_constant :DEFAULT_VALIDATOR + + Job = Struct.new(:action, :args, :executor, :caller) + private_constant :Job + + # Raised during action processing or any other time in an Agent's lifecycle. + class Error < StandardError + def initialize(message = nil) + message ||= 'agent must be restarted before jobs can post' + super(message) + end + end + + # Raised when a new value obtained during action processing or at `#restart` + # fails validation. + class ValidationError < Error + def initialize(message = nil) + message ||= 'invalid value' + super(message) + end + end + + # The error mode this Agent is operating in. See {#initialize} for details. + attr_reader :error_mode + + # Create a new `Agent` with the given initial value and options. + # + # The `:validator` option must be `nil` or a side-effect free proc/lambda + # which takes one argument. On any intended value change the validator, if + # provided, will be called. If the new value is invalid the validator should + # return `false` or raise an error. + # + # The `:error_handler` option must be `nil` or a proc/lambda which takes two + # arguments. When an action raises an error or validation fails, either by + # returning false or raising an error, the error handler will be called. The + # arguments to the error handler will be a reference to the agent itself and + # the error object which was raised. + # + # The `:error_mode` may be either `:continue` (the default if an error + # handler is given) or `:fail` (the default if error handler nil or not + # given). + # + # If an action being run by the agent throws an error or doesn't pass + # validation the error handler, if present, will be called. After the + # handler executes if the error mode is `:continue` the Agent will continue + # as if neither the action that caused the error nor the error itself ever + # happened. + # + # If the mode is `:fail` the Agent will become {#failed?} and will stop + # accepting new action dispatches. Any previously queued actions will be + # held until {#restart} is called. The {#value} method will still work, + # returning the value of the Agent before the error. + # + # @param [Object] initial the initial value + # @param [Hash] opts the configuration options + # + # @option opts [Symbol] :error_mode either `:continue` or `:fail` + # @option opts [nil, Proc] :error_handler the (optional) error handler + # @option opts [nil, Proc] :validator the (optional) validation procedure + def initialize(initial, opts = {}) + super() + synchronize { ns_initialize(initial, opts) } + end + + # The current value (state) of the Agent, irrespective of any pending or + # in-progress actions. The value is always available and is non-blocking. + # + # @return [Object] the current value + def value + @current.value # TODO (pitr 12-Sep-2015): broken unsafe read? + end + + alias_method :deref, :value + + # When {#failed?} and {#error_mode} is `:fail`, returns the error object + # which caused the failure, else `nil`. When {#error_mode} is `:continue` + # will *always* return `nil`. + # + # @return [nil, Error] the error which caused the failure when {#failed?} + def error + @error.value + end + + alias_method :reason, :error + + # @!macro agent_send + # + # Dispatches an action to the Agent and returns immediately. Subsequently, + # in a thread from a thread pool, the {#value} will be set to the return + # value of the action. Action dispatches are only allowed when the Agent + # is not {#failed?}. + # + # The action must be a block/proc/lambda which takes 1 or more arguments. + # The first argument is the current {#value} of the Agent. Any arguments + # passed to the send method via the `args` parameter will be passed to the + # action as the remaining arguments. The action must return the new value + # of the Agent. + # + # * {#send} and {#send!} should be used for actions that are CPU limited + # * {#send_off}, {#send_off!}, and {#<<} are appropriate for actions that + # may block on IO + # * {#send_via} and {#send_via!} are used when a specific executor is to + # be used for the action + # + # @param [Array] args zero or more arguments to be passed to + # the action + # @param [Proc] action the action dispatch to be enqueued + # + # @yield [agent, value, *args] process the old value and return the new + # @yieldparam [Object] value the current {#value} of the Agent + # @yieldparam [Array] args zero or more arguments to pass to the + # action + # @yieldreturn [Object] the new value of the Agent + # + # @!macro send_return + # @return [Boolean] true if the action is successfully enqueued, false if + # the Agent is {#failed?} + def send(*args, &action) + enqueue_action_job(action, args, Concurrent.global_fast_executor) + end + + # @!macro agent_send + # + # @!macro send_bang_return_and_raise + # @return [Boolean] true if the action is successfully enqueued + # @raise [Concurrent::Agent::Error] if the Agent is {#failed?} + def send!(*args, &action) + raise Error.new unless send(*args, &action) + true + end + + # @!macro agent_send + # @!macro send_return + def send_off(*args, &action) + enqueue_action_job(action, args, Concurrent.global_io_executor) + end + + alias_method :post, :send_off + + # @!macro agent_send + # @!macro send_bang_return_and_raise + def send_off!(*args, &action) + raise Error.new unless send_off(*args, &action) + true + end + + # @!macro agent_send + # @!macro send_return + # @param [Concurrent::ExecutorService] executor the executor on which the + # action is to be dispatched + def send_via(executor, *args, &action) + enqueue_action_job(action, args, executor) + end + + # @!macro agent_send + # @!macro send_bang_return_and_raise + # @param [Concurrent::ExecutorService] executor the executor on which the + # action is to be dispatched + def send_via!(executor, *args, &action) + raise Error.new unless send_via(executor, *args, &action) + true + end + + # Dispatches an action to the Agent and returns immediately. Subsequently, + # in a thread from a thread pool, the {#value} will be set to the return + # value of the action. Appropriate for actions that may block on IO. + # + # @param [Proc] action the action dispatch to be enqueued + # @return [Concurrent::Agent] self + # @see #send_off + def <<(action) + send_off(&action) + self + end + + # Blocks the current thread (indefinitely!) until all actions dispatched + # thus far, from this thread or nested by the Agent, have occurred. Will + # block when {#failed?}. Will never return if a failed Agent is {#restart} + # with `:clear_actions` true. + # + # Returns a reference to `self` to support method chaining: + # + # ``` + # current_value = agent.await.value + # ``` + # + # @return [Boolean] self + # + # @!macro agent_await_warning + def await + wait(nil) + self + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def await_for(timeout) + wait(timeout.to_f) + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout + # + # @raise [Concurrent::TimeoutError] when timout is reached + # + # @!macro agent_await_warning + def await_for!(timeout) + raise Concurrent::TimeoutError unless wait(timeout.to_f) + true + end + + # Blocks the current thread until all actions dispatched thus far, from this + # thread or nested by the Agent, have occurred, or the timeout (in seconds) + # has elapsed. Will block indefinitely when timeout is nil or not given. + # + # Provided mainly for consistency with other classes in this library. Prefer + # the various `await` methods instead. + # + # @param [Float] timeout the maximum number of seconds to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def wait(timeout = nil) + latch = Concurrent::CountDownLatch.new(1) + enqueue_await_job(latch) + latch.wait(timeout) + end + + # Is the Agent in a failed state? + # + # @see #restart + def failed? + !@error.value.nil? + end + + alias_method :stopped?, :failed? + + # When an Agent is {#failed?}, changes the Agent {#value} to `new_value` + # then un-fails the Agent so that action dispatches are allowed again. If + # the `:clear_actions` option is give and true, any actions queued on the + # Agent that were being held while it was failed will be discarded, + # otherwise those held actions will proceed. The `new_value` must pass the + # validator if any, or `restart` will raise an exception and the Agent will + # remain failed with its old {#value} and {#error}. Observers, if any, will + # not be notified of the new state. + # + # @param [Object] new_value the new value for the Agent once restarted + # @param [Hash] opts the configuration options + # @option opts [Symbol] :clear_actions true if all enqueued but unprocessed + # actions should be discarded on restart, else false (default: false) + # @return [Boolean] true + # + # @raise [Concurrent:AgentError] when not failed + def restart(new_value, opts = {}) + clear_actions = opts.fetch(:clear_actions, false) + synchronize do + raise Error.new('agent is not failed') unless failed? + raise ValidationError unless ns_validate(new_value) + @current.value = new_value + @error.value = nil + @queue.clear if clear_actions + ns_post_next_job unless @queue.empty? + end + true + end + + class << self + + # Blocks the current thread (indefinitely!) until all actions dispatched + # thus far to all the given Agents, from this thread or nested by the + # given Agents, have occurred. Will block when any of the agents are + # failed. Will never return if a failed Agent is restart with + # `:clear_actions` true. + # + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true + # + # @!macro agent_await_warning + def await(*agents) + agents.each { |agent| agent.await } + true + end + + # Blocks the current thread until all actions dispatched thus far to all + # the given Agents, from this thread or nested by the given Agents, have + # occurred, or the timeout (in seconds) has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true if all actions complete before timeout else false + # + # @!macro agent_await_warning + def await_for(timeout, *agents) + end_at = Concurrent.monotonic_time + timeout.to_f + ok = agents.length.times do |i| + break false if (delay = end_at - Concurrent.monotonic_time) < 0 + break false unless agents[i].await_for(delay) + end + !!ok + end + + # Blocks the current thread until all actions dispatched thus far to all + # the given Agents, from this thread or nested by the given Agents, have + # occurred, or the timeout (in seconds) has elapsed. + # + # @param [Float] timeout the maximum number of seconds to wait + # @param [Array] agents the Agents on which to wait + # @return [Boolean] true if all actions complete before timeout + # + # @raise [Concurrent::TimeoutError] when timout is reached + # @!macro agent_await_warning + def await_for!(timeout, *agents) + raise Concurrent::TimeoutError unless await_for(timeout, *agents) + true + end + end + + private + + def ns_initialize(initial, opts) + @error_mode = opts[:error_mode] + @error_handler = opts[:error_handler] + + if @error_mode && !ERROR_MODES.include?(@error_mode) + raise ArgumentError.new('unrecognized error mode') + elsif @error_mode.nil? + @error_mode = @error_handler ? :continue : :fail + end + + @error_handler ||= DEFAULT_ERROR_HANDLER + @validator = opts.fetch(:validator, DEFAULT_VALIDATOR) + @current = Concurrent::AtomicReference.new(initial) + @error = Concurrent::AtomicReference.new(nil) + @caller = Concurrent::ThreadLocalVar.new(nil) + @queue = [] + + self.observers = Collection::CopyOnNotifyObserverSet.new + end + + def enqueue_action_job(action, args, executor) + raise ArgumentError.new('no action given') unless action + job = Job.new(action, args, executor, @caller.value || Thread.current.object_id) + synchronize { ns_enqueue_job(job) } + end + + def enqueue_await_job(latch) + synchronize do + if (index = ns_find_last_job_for_thread) + job = Job.new(AWAIT_ACTION, [latch], Concurrent.global_immediate_executor, + Thread.current.object_id) + ns_enqueue_job(job, index+1) + else + latch.count_down + true + end + end + end + + def ns_enqueue_job(job, index = nil) + # a non-nil index means this is an await job + return false if index.nil? && failed? + index ||= @queue.length + @queue.insert(index, job) + # if this is the only job, post to executor + ns_post_next_job if @queue.length == 1 + true + end + + def ns_post_next_job + @queue.first.executor.post { execute_next_job } + end + + def execute_next_job + job = synchronize { @queue.first } + old_value = @current.value + + @caller.value = job.caller # for nested actions + new_value = job.action.call(old_value, *job.args) + @caller.value = nil + + return if new_value == AWAIT_FLAG + + if ns_validate(new_value) + @current.value = new_value + observers.notify_observers(Time.now, old_value, new_value) + else + handle_error(ValidationError.new) + end + rescue => error + handle_error(error) + ensure + synchronize do + @queue.shift + unless failed? || @queue.empty? + ns_post_next_job + end + end + end + + def ns_validate(value) + @validator.call(value) + rescue + false + end + + def handle_error(error) + # stop new jobs from posting + @error.value = error if @error_mode == :fail + @error_handler.call(self, error) + rescue + # do nothing + end + + def ns_find_last_job_for_thread + @queue.rindex { |job| job.caller == Thread.current.object_id } + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/array.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/array.rb new file mode 100644 index 0000000000..56931794ba --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/array.rb @@ -0,0 +1,66 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!macro concurrent_array + # + # A thread-safe subclass of Array. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`. + # + # @note `a += b` is **not** a **thread-safe** operation on + # `Concurrent::Array`. It reads array `a`, then it creates new `Concurrent::Array` + # which is concatenation of `a` and `b`, then it writes the concatenation to `a`. + # The read and write are independent operations they do not form a single atomic + # operation therefore when two `+=` operations are executed concurrently updates + # may be lost. Use `#concat` instead. + # + # @see http://ruby-doc.org/core-2.2.0/Array.html Ruby standard library `Array` + + # @!macro internal_implementation_note + ArrayImplementation = case + when Concurrent.on_cruby? + # Because MRI never runs code in parallel, the existing + # non-thread-safe structures should usually work fine. + ::Array + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubyArray < ::Array + include JRuby::Synchronized + end + JRubyArray + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxArray < ::Array + end + + ThreadSafe::Util.make_synchronized_on_rbx RbxArray + RbxArray + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubyArray < ::Array + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyArray + TruffleRubyArray + + else + warn 'Possibly unsupported Ruby implementation' + ::Array + end + private_constant :ArrayImplementation + + # @!macro concurrent_array + class Array < ArrayImplementation + end + +end + diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/async.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/async.rb new file mode 100644 index 0000000000..9e75ca92e6 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/async.rb @@ -0,0 +1,445 @@ +require 'concurrent/configuration' +require 'concurrent/ivar' +require 'concurrent/synchronization/lockable_object' + +module Concurrent + + # A mixin module that provides simple asynchronous behavior to a class, + # turning it into a simple actor. Loosely based on Erlang's + # [gen_server](http://www.erlang.org/doc/man/gen_server.html), but without + # supervision or linking. + # + # A more feature-rich {Concurrent::Actor} is also available when the + # capabilities of `Async` are too limited. + # + # ```cucumber + # Feature: + # As a stateful, plain old Ruby class + # I want safe, asynchronous behavior + # So my long-running methods don't block the main thread + # ``` + # + # The `Async` module is a way to mix simple yet powerful asynchronous + # capabilities into any plain old Ruby object or class, turning each object + # into a simple Actor. Method calls are processed on a background thread. The + # caller is free to perform other actions while processing occurs in the + # background. + # + # Method calls to the asynchronous object are made via two proxy methods: + # `async` (alias `cast`) and `await` (alias `call`). These proxy methods post + # the method call to the object's background thread and return a "future" + # which will eventually contain the result of the method call. + # + # This behavior is loosely patterned after Erlang's `gen_server` behavior. + # When an Erlang module implements the `gen_server` behavior it becomes + # inherently asynchronous. The `start` or `start_link` function spawns a + # process (similar to a thread but much more lightweight and efficient) and + # returns the ID of the process. Using the process ID, other processes can + # send messages to the `gen_server` via the `cast` and `call` methods. Unlike + # Erlang's `gen_server`, however, `Async` classes do not support linking or + # supervision trees. + # + # ## Basic Usage + # + # When this module is mixed into a class, objects of the class become inherently + # asynchronous. Each object gets its own background thread on which to post + # asynchronous method calls. Asynchronous method calls are executed in the + # background one at a time in the order they are received. + # + # To create an asynchronous class, simply mix in the `Concurrent::Async` module: + # + # ``` + # class Hello + # include Concurrent::Async + # + # def hello(name) + # "Hello, #{name}!" + # end + # end + # ``` + # + # When defining a constructor it is critical that the first line be a call to + # `super` with no arguments. The `super` method initializes the background + # thread and other asynchronous components. + # + # ``` + # class BackgroundLogger + # include Concurrent::Async + # + # def initialize(level) + # super() + # @logger = Logger.new(STDOUT) + # @logger.level = level + # end + # + # def info(msg) + # @logger.info(msg) + # end + # end + # ``` + # + # Mixing this module into a class provides each object two proxy methods: + # `async` and `await`. These methods are thread safe with respect to the + # enclosing object. The former proxy allows methods to be called + # asynchronously by posting to the object's internal thread. The latter proxy + # allows a method to be called synchronously but does so safely with respect + # to any pending asynchronous method calls and ensures proper ordering. Both + # methods return a {Concurrent::IVar} which can be inspected for the result + # of the proxied method call. Calling a method with `async` will return a + # `:pending` `IVar` whereas `await` will return a `:complete` `IVar`. + # + # ``` + # class Echo + # include Concurrent::Async + # + # def echo(msg) + # print "#{msg}\n" + # end + # end + # + # horn = Echo.new + # horn.echo('zero') # synchronous, not thread-safe + # # returns the actual return value of the method + # + # horn.async.echo('one') # asynchronous, non-blocking, thread-safe + # # returns an IVar in the :pending state + # + # horn.await.echo('two') # synchronous, blocking, thread-safe + # # returns an IVar in the :complete state + # ``` + # + # ## Let It Fail + # + # The `async` and `await` proxy methods have built-in error protection based + # on Erlang's famous "let it fail" philosophy. Instance methods should not be + # programmed defensively. When an exception is raised by a delegated method + # the proxy will rescue the exception, expose it to the caller as the `reason` + # attribute of the returned future, then process the next method call. + # + # ## Calling Methods Internally + # + # External method calls should *always* use the `async` and `await` proxy + # methods. When one method calls another method, the `async` proxy should + # rarely be used and the `await` proxy should *never* be used. + # + # When an object calls one of its own methods using the `await` proxy the + # second call will be enqueued *behind* the currently running method call. + # Any attempt to wait on the result will fail as the second call will never + # run until after the current call completes. + # + # Calling a method using the `await` proxy from within a method that was + # itself called using `async` or `await` will irreversibly deadlock the + # object. Do *not* do this, ever. + # + # ## Instance Variables and Attribute Accessors + # + # Instance variables do not need to be thread-safe so long as they are private. + # Asynchronous method calls are processed in the order they are received and + # are processed one at a time. Therefore private instance variables can only + # be accessed by one thread at a time. This is inherently thread-safe. + # + # When using private instance variables within asynchronous methods, the best + # practice is to read the instance variable into a local variable at the start + # of the method then update the instance variable at the *end* of the method. + # This way, should an exception be raised during method execution the internal + # state of the object will not have been changed. + # + # ### Reader Attributes + # + # The use of `attr_reader` is discouraged. Internal state exposed externally, + # when necessary, should be done through accessor methods. The instance + # variables exposed by these methods *must* be thread-safe, or they must be + # called using the `async` and `await` proxy methods. These two approaches are + # subtly different. + # + # When internal state is accessed via the `async` and `await` proxy methods, + # the returned value represents the object's state *at the time the call is + # processed*, which may *not* be the state of the object at the time the call + # is made. + # + # To get the state *at the current* time, irrespective of an enqueued method + # calls, a reader method must be called directly. This is inherently unsafe + # unless the instance variable is itself thread-safe, preferably using one + # of the thread-safe classes within this library. Because the thread-safe + # classes within this library are internally-locking or non-locking, they can + # be safely used from within asynchronous methods without causing deadlocks. + # + # Generally speaking, the best practice is to *not* expose internal state via + # reader methods. The best practice is to simply use the method's return value. + # + # ### Writer Attributes + # + # Writer attributes should never be used with asynchronous classes. Changing + # the state externally, even when done in the thread-safe way, is not logically + # consistent. Changes to state need to be timed with respect to all asynchronous + # method calls which my be in-process or enqueued. The only safe practice is to + # pass all necessary data to each method as arguments and let the method update + # the internal state as necessary. + # + # ## Class Constants, Variables, and Methods + # + # ### Class Constants + # + # Class constants do not need to be thread-safe. Since they are read-only and + # immutable they may be safely read both externally and from within + # asynchronous methods. + # + # ### Class Variables + # + # Class variables should be avoided. Class variables represent shared state. + # Shared state is anathema to concurrency. Should there be a need to share + # state using class variables they *must* be thread-safe, preferably + # using the thread-safe classes within this library. When updating class + # variables, never assign a new value/object to the variable itself. Assignment + # is not thread-safe in Ruby. Instead, use the thread-safe update functions + # of the variable itself to change the value. + # + # The best practice is to *never* use class variables with `Async` classes. + # + # ### Class Methods + # + # Class methods which are pure functions are safe. Class methods which modify + # class variables should be avoided, for all the reasons listed above. + # + # ## An Important Note About Thread Safe Guarantees + # + # > Thread safe guarantees can only be made when asynchronous method calls + # > are not mixed with direct method calls. Use only direct method calls + # > when the object is used exclusively on a single thread. Use only + # > `async` and `await` when the object is shared between threads. Once you + # > call a method using `async` or `await`, you should no longer call methods + # > directly on the object. Use `async` and `await` exclusively from then on. + # + # @example + # + # class Echo + # include Concurrent::Async + # + # def echo(msg) + # print "#{msg}\n" + # end + # end + # + # horn = Echo.new + # horn.echo('zero') # synchronous, not thread-safe + # # returns the actual return value of the method + # + # horn.async.echo('one') # asynchronous, non-blocking, thread-safe + # # returns an IVar in the :pending state + # + # horn.await.echo('two') # synchronous, blocking, thread-safe + # # returns an IVar in the :complete state + # + # @see Concurrent::Actor + # @see https://en.wikipedia.org/wiki/Actor_model "Actor Model" at Wikipedia + # @see http://www.erlang.org/doc/man/gen_server.html Erlang gen_server + # @see http://c2.com/cgi/wiki?LetItCrash "Let It Crash" at http://c2.com/ + module Async + + # @!method self.new(*args, &block) + # + # Instanciate a new object and ensure proper initialization of the + # synchronization mechanisms. + # + # @param [Array] args Zero or more arguments to be passed to the + # object's initializer. + # @param [Proc] block Optional block to pass to the object's initializer. + # @return [Object] A properly initialized object of the asynchronous class. + + # Check for the presence of a method on an object and determine if a given + # set of arguments matches the required arity. + # + # @param [Object] obj the object to check against + # @param [Symbol] method the method to check the object for + # @param [Array] args zero or more arguments for the arity check + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + # + # @note This check is imperfect because of the way Ruby reports the arity of + # methods with a variable number of arguments. It is possible to determine + # if too few arguments are given but impossible to determine if too many + # arguments are given. This check may also fail to recognize dynamic behavior + # of the object, such as methods simulated with `method_missing`. + # + # @see http://www.ruby-doc.org/core-2.1.1/Method.html#method-i-arity Method#arity + # @see http://ruby-doc.org/core-2.1.0/Object.html#method-i-respond_to-3F Object#respond_to? + # @see http://www.ruby-doc.org/core-2.1.0/BasicObject.html#method-i-method_missing BasicObject#method_missing + # + # @!visibility private + def self.validate_argc(obj, method, *args) + argc = args.length + arity = obj.method(method).arity + + if arity >= 0 && argc != arity + raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity})") + elsif arity < 0 && (arity = (arity + 1).abs) > argc + raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity}..*)") + end + end + + # @!visibility private + def self.included(base) + base.singleton_class.send(:alias_method, :original_new, :new) + base.extend(ClassMethods) + super(base) + end + + # @!visibility private + module ClassMethods + def new(*args, &block) + obj = original_new(*args, &block) + obj.send(:init_synchronization) + obj + end + end + private_constant :ClassMethods + + # Delegates asynchronous, thread-safe method calls to the wrapped object. + # + # @!visibility private + class AsyncDelegator < Synchronization::LockableObject + safe_initialization! + + # Create a new delegator object wrapping the given delegate. + # + # @param [Object] delegate the object to wrap and delegate method calls to + def initialize(delegate) + super() + @delegate = delegate + @queue = [] + @executor = Concurrent.global_io_executor + end + + # Delegates method calls to the wrapped object. + # + # @param [Symbol] method the method being called + # @param [Array] args zero or more arguments to the method + # + # @return [IVar] the result of the method call + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + def method_missing(method, *args, &block) + super unless @delegate.respond_to?(method) + Async::validate_argc(@delegate, method, *args) + + ivar = Concurrent::IVar.new + synchronize do + @queue.push [ivar, method, args, block] + @executor.post { perform } if @queue.length == 1 + end + + ivar + end + + # Perform all enqueued tasks. + # + # This method must be called from within the executor. It must not be + # called while already running. It will loop until the queue is empty. + def perform + loop do + ivar, method, args, block = synchronize { @queue.first } + break unless ivar # queue is empty + + begin + ivar.set(@delegate.send(method, *args, &block)) + rescue => error + ivar.fail(error) + end + + synchronize do + @queue.shift + return if @queue.empty? + end + end + end + end + private_constant :AsyncDelegator + + # Delegates synchronous, thread-safe method calls to the wrapped object. + # + # @!visibility private + class AwaitDelegator + + # Create a new delegator object wrapping the given delegate. + # + # @param [AsyncDelegator] delegate the object to wrap and delegate method calls to + def initialize(delegate) + @delegate = delegate + end + + # Delegates method calls to the wrapped object. + # + # @param [Symbol] method the method being called + # @param [Array] args zero or more arguments to the method + # + # @return [IVar] the result of the method call + # + # @raise [NameError] the object does not respond to `method` method + # @raise [ArgumentError] the given `args` do not match the arity of `method` + def method_missing(method, *args, &block) + ivar = @delegate.send(method, *args, &block) + ivar.wait + ivar + end + end + private_constant :AwaitDelegator + + # Causes the chained method call to be performed asynchronously on the + # object's thread. The delegated method will return a future in the + # `:pending` state and the method call will have been scheduled on the + # object's thread. The final disposition of the method call can be obtained + # by inspecting the returned future. + # + # @!macro async_thread_safety_warning + # @note The method call is guaranteed to be thread safe with respect to + # all other method calls against the same object that are called with + # either `async` or `await`. The mutable nature of Ruby references + # (and object orientation in general) prevent any other thread safety + # guarantees. Do NOT mix direct method calls with delegated method calls. + # Use *only* delegated method calls when sharing the object between threads. + # + # @return [Concurrent::IVar] the pending result of the asynchronous operation + # + # @raise [NameError] the object does not respond to the requested method + # @raise [ArgumentError] the given `args` do not match the arity of + # the requested method + def async + @__async_delegator__ + end + alias_method :cast, :async + + # Causes the chained method call to be performed synchronously on the + # current thread. The delegated will return a future in either the + # `:fulfilled` or `:rejected` state and the delegated method will have + # completed. The final disposition of the delegated method can be obtained + # by inspecting the returned future. + # + # @!macro async_thread_safety_warning + # + # @return [Concurrent::IVar] the completed result of the synchronous operation + # + # @raise [NameError] the object does not respond to the requested method + # @raise [ArgumentError] the given `args` do not match the arity of the + # requested method + def await + @__await_delegator__ + end + alias_method :call, :await + + # Initialize the internal serializer and other stnchronization mechanisms. + # + # @note This method *must* be called immediately upon object construction. + # This is the only way thread-safe initialization can be guaranteed. + # + # @!visibility private + def init_synchronization + return self if defined?(@__async_initialized__) && @__async_initialized__ + @__async_initialized__ = true + @__async_delegator__ = AsyncDelegator.new(self) + @__await_delegator__ = AwaitDelegator.new(@__async_delegator__) + self + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atom.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atom.rb new file mode 100644 index 0000000000..abef1cb0d4 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atom.rb @@ -0,0 +1,222 @@ +require 'concurrent/atomic/atomic_reference' +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +# @!macro thread_safe_variable_comparison +# +# ## Thread-safe Variable Classes +# +# Each of the thread-safe variable classes is designed to solve a different +# problem. In general: +# +# * *{Concurrent::Agent}:* Shared, mutable variable providing independent, +# uncoordinated, *asynchronous* change of individual values. Best used when +# the value will undergo frequent, complex updates. Suitable when the result +# of an update does not need to be known immediately. +# * *{Concurrent::Atom}:* Shared, mutable variable providing independent, +# uncoordinated, *synchronous* change of individual values. Best used when +# the value will undergo frequent reads but only occasional, though complex, +# updates. Suitable when the result of an update must be known immediately. +# * *{Concurrent::AtomicReference}:* A simple object reference that can be +# atomically. Updates are synchronous but fast. Best used when updates a +# simple set operations. Not suitable when updates are complex. +# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar +# but optimized for the given data type. +# * *{Concurrent::Exchanger}:* Shared, stateless synchronization point. Used +# when two or more threads need to exchange data. The threads will pair then +# block on each other until the exchange is complete. +# * *{Concurrent::MVar}:* Shared synchronization point. Used when one thread +# must give a value to another, which must take the value. The threads will +# block on each other until the exchange is complete. +# * *{Concurrent::ThreadLocalVar}:* Shared, mutable, isolated variable which +# holds a different value for each thread which has access. Often used as +# an instance variable in objects which must maintain different state +# for different threads. +# * *{Concurrent::TVar}:* Shared, mutable variables which provide +# *coordinated*, *synchronous*, change of *many* stated. Used when multiple +# value must change together, in an all-or-nothing transaction. + + +module Concurrent + + # Atoms provide a way to manage shared, synchronous, independent state. + # + # An atom is initialized with an initial value and an optional validation + # proc. At any time the value of the atom can be synchronously and safely + # changed. If a validator is given at construction then any new value + # will be checked against the validator and will be rejected if the + # validator returns false or raises an exception. + # + # There are two ways to change the value of an atom: {#compare_and_set} and + # {#swap}. The former will set the new value if and only if it validates and + # the current value matches the new value. The latter will atomically set the + # new value to the result of running the given block if and only if that + # value validates. + # + # ## Example + # + # ``` + # def next_fibonacci(set = nil) + # return [0, 1] if set.nil? + # set + [set[-2..-1].reduce{|sum,x| sum + x }] + # end + # + # # create an atom with an initial value + # atom = Concurrent::Atom.new(next_fibonacci) + # + # # send a few update requests + # 5.times do + # atom.swap{|set| next_fibonacci(set) } + # end + # + # # get the current value + # atom.value #=> [0, 1, 1, 2, 3, 5, 8] + # ``` + # + # ## Observation + # + # Atoms support observers through the {Concurrent::Observable} mixin module. + # Notification of observers occurs every time the value of the Atom changes. + # When notified the observer will receive three arguments: `time`, `old_value`, + # and `new_value`. The `time` argument is the time at which the value change + # occurred. The `old_value` is the value of the Atom when the change began + # The `new_value` is the value to which the Atom was set when the change + # completed. Note that `old_value` and `new_value` may be the same. This is + # not an error. It simply means that the change operation returned the same + # value. + # + # Unlike in Clojure, `Atom` cannot participate in {Concurrent::TVar} transactions. + # + # @!macro thread_safe_variable_comparison + # + # @see http://clojure.org/atoms Clojure Atoms + # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State + class Atom < Synchronization::Object + include Concern::Observable + + safe_initialization! + attr_atomic(:value) + private :value=, :swap_value, :compare_and_set_value, :update_value + public :value + alias_method :deref, :value + + # @!method value + # The current value of the atom. + # + # @return [Object] The current value. + + # Create a new atom with the given initial value. + # + # @param [Object] value The initial value + # @param [Hash] opts The options used to configure the atom + # @option opts [Proc] :validator (nil) Optional proc used to validate new + # values. It must accept one and only one argument which will be the + # intended new value. The validator will return true if the new value + # is acceptable else return false (preferrably) or raise an exception. + # + # @!macro deref_options + # + # @raise [ArgumentError] if the validator is not a `Proc` (when given) + def initialize(value, opts = {}) + super() + @Validator = opts.fetch(:validator, -> v { true }) + self.observers = Collection::CopyOnNotifyObserverSet.new + self.value = value + end + + # Atomically swaps the value of atom using the given block. The current + # value will be passed to the block, as will any arguments passed as + # arguments to the function. The new value will be validated against the + # (optional) validator proc given at construction. If validation fails the + # value will not be changed. + # + # Internally, {#swap} reads the current value, applies the block to it, and + # attempts to compare-and-set it in. Since another thread may have changed + # the value in the intervening time, it may have to retry, and does so in a + # spin loop. The net effect is that the value will always be the result of + # the application of the supplied block to a current value, atomically. + # However, because the block might be called multiple times, it must be free + # of side effects. + # + # @note The given block may be called multiple times, and thus should be free + # of side effects. + # + # @param [Object] args Zero or more arguments passed to the block. + # + # @yield [value, args] Calculates a new value for the atom based on the + # current value and any supplied arguments. + # @yieldparam value [Object] The current value of the atom. + # @yieldparam args [Object] All arguments passed to the function, in order. + # @yieldreturn [Object] The intended new value of the atom. + # + # @return [Object] The final value of the atom after all operations and + # validations are complete. + # + # @raise [ArgumentError] When no block is given. + def swap(*args) + raise ArgumentError.new('no block given') unless block_given? + + loop do + old_value = value + new_value = yield(old_value, *args) + begin + break old_value unless valid?(new_value) + break new_value if compare_and_set(old_value, new_value) + rescue + break old_value + end + end + end + + # Atomically sets the value of atom to the new value if and only if the + # current value of the atom is identical to the old value and the new + # value successfully validates against the (optional) validator given + # at construction. + # + # @param [Object] old_value The expected current value. + # @param [Object] new_value The intended new value. + # + # @return [Boolean] True if the value is changed else false. + def compare_and_set(old_value, new_value) + if valid?(new_value) && compare_and_set_value(old_value, new_value) + observers.notify_observers(Time.now, old_value, new_value) + true + else + false + end + end + + # Atomically sets the value of atom to the new value without regard for the + # current value so long as the new value successfully validates against the + # (optional) validator given at construction. + # + # @param [Object] new_value The intended new value. + # + # @return [Object] The final value of the atom after all operations and + # validations are complete. + def reset(new_value) + old_value = value + if valid?(new_value) + self.value = new_value + observers.notify_observers(Time.now, old_value, new_value) + new_value + else + old_value + end + end + + private + + # Is the new value valid? + # + # @param [Object] new_value The intended new value. + # @return [Boolean] false if the validator function returns false or raises + # an exception else true + def valid?(new_value) + @Validator.call(new_value) + rescue + false + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/abstract_thread_local_var.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/abstract_thread_local_var.rb new file mode 100644 index 0000000000..fcdeed7f85 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/abstract_thread_local_var.rb @@ -0,0 +1,66 @@ +require 'concurrent/constants' + +module Concurrent + + # @!macro thread_local_var + # @!macro internal_implementation_note + # @!visibility private + class AbstractThreadLocalVar + + # @!macro thread_local_var_method_initialize + def initialize(default = nil, &default_block) + if default && block_given? + raise ArgumentError, "Cannot use both value and block as default value" + end + + if block_given? + @default_block = default_block + @default = nil + else + @default_block = nil + @default = default + end + + allocate_storage + end + + # @!macro thread_local_var_method_get + def value + raise NotImplementedError + end + + # @!macro thread_local_var_method_set + def value=(value) + raise NotImplementedError + end + + # @!macro thread_local_var_method_bind + def bind(value, &block) + if block_given? + old_value = self.value + begin + self.value = value + yield + ensure + self.value = old_value + end + end + end + + protected + + # @!visibility private + def allocate_storage + raise NotImplementedError + end + + # @!visibility private + def default + if @default_block + self.value = @default_block.call + else + @default + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_boolean.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_boolean.rb new file mode 100644 index 0000000000..4da4419b48 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_boolean.rb @@ -0,0 +1,126 @@ +require 'concurrent/atomic/mutex_atomic_boolean' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro atomic_boolean_method_initialize + # + # Creates a new `AtomicBoolean` with the given initial value. + # + # @param [Boolean] initial the initial value + + # @!macro atomic_boolean_method_value_get + # + # Retrieves the current `Boolean` value. + # + # @return [Boolean] the current value + + # @!macro atomic_boolean_method_value_set + # + # Explicitly sets the value. + # + # @param [Boolean] value the new value to be set + # + # @return [Boolean] the current value + + # @!macro atomic_boolean_method_true_question + # + # Is the current value `true` + # + # @return [Boolean] true if the current value is `true`, else false + + # @!macro atomic_boolean_method_false_question + # + # Is the current value `false` + # + # @return [Boolean] true if the current value is `false`, else false + + # @!macro atomic_boolean_method_make_true + # + # Explicitly sets the value to true. + # + # @return [Boolean] true is value has changed, otherwise false + + # @!macro atomic_boolean_method_make_false + # + # Explicitly sets the value to false. + # + # @return [Boolean] true is value has changed, otherwise false + + ################################################################### + + # @!macro atomic_boolean_public_api + # + # @!method initialize(initial = false) + # @!macro atomic_boolean_method_initialize + # + # @!method value + # @!macro atomic_boolean_method_value_get + # + # @!method value=(value) + # @!macro atomic_boolean_method_value_set + # + # @!method true? + # @!macro atomic_boolean_method_true_question + # + # @!method false? + # @!macro atomic_boolean_method_false_question + # + # @!method make_true + # @!macro atomic_boolean_method_make_true + # + # @!method make_false + # @!macro atomic_boolean_method_make_false + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + AtomicBooleanImplementation = case + when defined?(JavaAtomicBoolean) + JavaAtomicBoolean + when defined?(CAtomicBoolean) + CAtomicBoolean + else + MutexAtomicBoolean + end + private_constant :AtomicBooleanImplementation + + # @!macro atomic_boolean + # + # A boolean value that can be updated atomically. Reads and writes to an atomic + # boolean and thread-safe and guaranteed to succeed. Reads and writes may block + # briefly but no explicit locking is required. + # + # @!macro thread_safe_variable_comparison + # + # Performance: + # + # ``` + # Testing with ruby 2.1.2 + # Testing with Concurrent::MutexAtomicBoolean... + # 2.790000 0.000000 2.790000 ( 2.791454) + # Testing with Concurrent::CAtomicBoolean... + # 0.740000 0.000000 0.740000 ( 0.740206) + # + # Testing with jruby 1.9.3 + # Testing with Concurrent::MutexAtomicBoolean... + # 5.240000 2.520000 7.760000 ( 3.683000) + # Testing with Concurrent::JavaAtomicBoolean... + # 3.340000 0.010000 3.350000 ( 0.855000) + # ``` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicBoolean.html java.util.concurrent.atomic.AtomicBoolean + # + # @!macro atomic_boolean_public_api + class AtomicBoolean < AtomicBooleanImplementation + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], value + end + + alias_method :inspect, :to_s + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_fixnum.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_fixnum.rb new file mode 100644 index 0000000000..553aae06e5 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_fixnum.rb @@ -0,0 +1,143 @@ +require 'concurrent/atomic/mutex_atomic_fixnum' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro atomic_fixnum_method_initialize + # + # Creates a new `AtomicFixnum` with the given initial value. + # + # @param [Fixnum] initial the initial value + # @raise [ArgumentError] if the initial value is not a `Fixnum` + + # @!macro atomic_fixnum_method_value_get + # + # Retrieves the current `Fixnum` value. + # + # @return [Fixnum] the current value + + # @!macro atomic_fixnum_method_value_set + # + # Explicitly sets the value. + # + # @param [Fixnum] value the new value to be set + # + # @return [Fixnum] the current value + # + # @raise [ArgumentError] if the new value is not a `Fixnum` + + # @!macro atomic_fixnum_method_increment + # + # Increases the current value by the given amount (defaults to 1). + # + # @param [Fixnum] delta the amount by which to increase the current value + # + # @return [Fixnum] the current value after incrementation + + # @!macro atomic_fixnum_method_decrement + # + # Decreases the current value by the given amount (defaults to 1). + # + # @param [Fixnum] delta the amount by which to decrease the current value + # + # @return [Fixnum] the current value after decrementation + + # @!macro atomic_fixnum_method_compare_and_set + # + # Atomically sets the value to the given updated value if the current + # value == the expected value. + # + # @param [Fixnum] expect the expected value + # @param [Fixnum] update the new value + # + # @return [Boolean] true if the value was updated else false + + # @!macro atomic_fixnum_method_update + # + # Pass the current value to the given block, replacing it + # with the block's result. May retry if the value changes + # during the block's execution. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # + # @return [Object] the new value + + ################################################################### + + # @!macro atomic_fixnum_public_api + # + # @!method initialize(initial = 0) + # @!macro atomic_fixnum_method_initialize + # + # @!method value + # @!macro atomic_fixnum_method_value_get + # + # @!method value=(value) + # @!macro atomic_fixnum_method_value_set + # + # @!method increment(delta) + # @!macro atomic_fixnum_method_increment + # + # @!method decrement(delta) + # @!macro atomic_fixnum_method_decrement + # + # @!method compare_and_set(expect, update) + # @!macro atomic_fixnum_method_compare_and_set + # + # @!method update + # @!macro atomic_fixnum_method_update + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + AtomicFixnumImplementation = case + when defined?(JavaAtomicFixnum) + JavaAtomicFixnum + when defined?(CAtomicFixnum) + CAtomicFixnum + else + MutexAtomicFixnum + end + private_constant :AtomicFixnumImplementation + + # @!macro atomic_fixnum + # + # A numeric value that can be updated atomically. Reads and writes to an atomic + # fixnum and thread-safe and guaranteed to succeed. Reads and writes may block + # briefly but no explicit locking is required. + # + # @!macro thread_safe_variable_comparison + # + # Performance: + # + # ``` + # Testing with ruby 2.1.2 + # Testing with Concurrent::MutexAtomicFixnum... + # 3.130000 0.000000 3.130000 ( 3.136505) + # Testing with Concurrent::CAtomicFixnum... + # 0.790000 0.000000 0.790000 ( 0.785550) + # + # Testing with jruby 1.9.3 + # Testing with Concurrent::MutexAtomicFixnum... + # 5.460000 2.460000 7.920000 ( 3.715000) + # Testing with Concurrent::JavaAtomicFixnum... + # 4.520000 0.030000 4.550000 ( 1.187000) + # ``` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html java.util.concurrent.atomic.AtomicLong + # + # @!macro atomic_fixnum_public_api + class AtomicFixnum < AtomicFixnumImplementation + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], value + end + + alias_method :inspect, :to_s + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_markable_reference.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_markable_reference.rb new file mode 100644 index 0000000000..f20cd46a52 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_markable_reference.rb @@ -0,0 +1,164 @@ +module Concurrent + # An atomic reference which maintains an object reference along with a mark bit + # that can be updated atomically. + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicMarkableReference.html + # java.util.concurrent.atomic.AtomicMarkableReference + class AtomicMarkableReference < ::Concurrent::Synchronization::Object + + attr_atomic(:reference) + private :reference, :reference=, :swap_reference, :compare_and_set_reference, :update_reference + + def initialize(value = nil, mark = false) + super() + self.reference = immutable_array(value, mark) + end + + # Atomically sets the value and mark to the given updated value and + # mark given both: + # - the current value == the expected value && + # - the current mark == the expected mark + # + # @param [Object] expected_val the expected value + # @param [Object] new_val the new value + # @param [Boolean] expected_mark the expected mark + # @param [Boolean] new_mark the new mark + # + # @return [Boolean] `true` if successful. A `false` return indicates + # that the actual value was not equal to the expected value or the + # actual mark was not equal to the expected mark + def compare_and_set(expected_val, new_val, expected_mark, new_mark) + # Memoize a valid reference to the current AtomicReference for + # later comparison. + current = reference + curr_val, curr_mark = current + + # Ensure that that the expected marks match. + return false unless expected_mark == curr_mark + + if expected_val.is_a? Numeric + # If the object is a numeric, we need to ensure we are comparing + # the numerical values + return false unless expected_val == curr_val + else + # Otherwise, we need to ensure we are comparing the object identity. + # Theoretically, this could be incorrect if a user monkey-patched + # `Object#equal?`, but they should know that they are playing with + # fire at that point. + return false unless expected_val.equal? curr_val + end + + prospect = immutable_array(new_val, new_mark) + + compare_and_set_reference current, prospect + end + + alias_method :compare_and_swap, :compare_and_set + + # Gets the current reference and marked values. + # + # @return [Array] the current reference and marked values + def get + reference + end + + # Gets the current value of the reference + # + # @return [Object] the current value of the reference + def value + reference[0] + end + + # Gets the current marked value + # + # @return [Boolean] the current marked value + def mark + reference[1] + end + + alias_method :marked?, :mark + + # _Unconditionally_ sets to the given value of both the reference and + # the mark. + # + # @param [Object] new_val the new value + # @param [Boolean] new_mark the new mark + # + # @return [Array] both the new value and the new mark + def set(new_val, new_mark) + self.reference = immutable_array(new_val, new_mark) + end + + # Pass the current value and marked state to the given block, replacing it + # with the block's results. May retry if the value changes during the + # block's execution. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and new mark + def update + loop do + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + if compare_and_set old_val, new_val, old_mark, new_mark + return immutable_array(new_val, new_mark) + end + end + end + + # Pass the current value to the given block, replacing it + # with the block's result. Raise an exception if the update + # fails. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and marked state + # + # @raise [Concurrent::ConcurrentUpdateError] if the update fails + def try_update! + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + unless compare_and_set old_val, new_val, old_mark, new_mark + fail ::Concurrent::ConcurrentUpdateError, + 'AtomicMarkableReference: Update failed due to race condition.', + 'Note: If you would like to guarantee an update, please use ' + + 'the `AtomicMarkableReference#update` method.' + end + + immutable_array(new_val, new_mark) + end + + # Pass the current value to the given block, replacing it with the + # block's result. Simply return nil if update fails. + # + # @yield [Object] Calculate a new value and marked state for the atomic + # reference using given (old) value and (old) marked + # @yieldparam [Object] old_val the starting value of the atomic reference + # @yieldparam [Boolean] old_mark the starting state of marked + # + # @return [Array] the new value and marked state, or nil if + # the update failed + def try_update + old_val, old_mark = reference + new_val, new_mark = yield old_val, old_mark + + return unless compare_and_set old_val, new_val, old_mark, new_mark + + immutable_array(new_val, new_mark) + end + + private + + def immutable_array(*args) + args.freeze + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_reference.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_reference.rb new file mode 100644 index 0000000000..620c0698e7 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/atomic_reference.rb @@ -0,0 +1,204 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/engine' +require 'concurrent/atomic_reference/numeric_cas_wrapper' + +# Shim for TruffleRuby::AtomicReference +if Concurrent.on_truffleruby? && !defined?(TruffleRuby::AtomicReference) + # @!visibility private + module TruffleRuby + AtomicReference = Truffle::AtomicReference + end +end + +module Concurrent + + # Define update methods that use direct paths + # + # @!visibility private + # @!macro internal_implementation_note + module AtomicDirectUpdate + + # @!macro atomic_reference_method_update + # + # Pass the current value to the given block, replacing it + # with the block's result. May retry if the value changes + # during the block's execution. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @return [Object] the new value + def update + true until compare_and_set(old_value = get, new_value = yield(old_value)) + new_value + end + + # @!macro atomic_reference_method_try_update + # + # Pass the current value to the given block, replacing it + # with the block's result. Return nil if the update fails. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @note This method was altered to avoid raising an exception by default. + # Instead, this method now returns `nil` in case of failure. For more info, + # please see: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 + # @return [Object] the new value, or nil if update failed + def try_update + old_value = get + new_value = yield old_value + + return unless compare_and_set old_value, new_value + + new_value + end + + # @!macro atomic_reference_method_try_update! + # + # Pass the current value to the given block, replacing it + # with the block's result. Raise an exception if the update + # fails. + # + # @yield [Object] Calculate a new value for the atomic reference using + # given (old) value + # @yieldparam [Object] old_value the starting value of the atomic reference + # @note This behavior mimics the behavior of the original + # `AtomicReference#try_update` API. The reason this was changed was to + # avoid raising exceptions (which are inherently slow) by default. For more + # info: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 + # @return [Object] the new value + # @raise [Concurrent::ConcurrentUpdateError] if the update fails + def try_update! + old_value = get + new_value = yield old_value + unless compare_and_set(old_value, new_value) + if $VERBOSE + raise ConcurrentUpdateError, "Update failed" + else + raise ConcurrentUpdateError, "Update failed", ConcurrentUpdateError::CONC_UP_ERR_BACKTRACE + end + end + new_value + end + end + + require 'concurrent/atomic_reference/mutex_atomic' + + # @!macro atomic_reference + # + # An object reference that may be updated atomically. All read and write + # operations have java volatile semantic. + # + # @!macro thread_safe_variable_comparison + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html + # + # @!method initialize(value = nil) + # @!macro atomic_reference_method_initialize + # @param [Object] value The initial value. + # + # @!method get + # @!macro atomic_reference_method_get + # Gets the current value. + # @return [Object] the current value + # + # @!method set(new_value) + # @!macro atomic_reference_method_set + # Sets to the given value. + # @param [Object] new_value the new value + # @return [Object] the new value + # + # @!method get_and_set(new_value) + # @!macro atomic_reference_method_get_and_set + # Atomically sets to the given value and returns the old value. + # @param [Object] new_value the new value + # @return [Object] the old value + # + # @!method compare_and_set(old_value, new_value) + # @!macro atomic_reference_method_compare_and_set + # + # Atomically sets the value to the given updated value if + # the current value == the expected value. + # + # @param [Object] old_value the expected value + # @param [Object] new_value the new value + # + # @return [Boolean] `true` if successful. A `false` return indicates + # that the actual value was not equal to the expected value. + # + # @!method update + # @!macro atomic_reference_method_update + # + # @!method try_update + # @!macro atomic_reference_method_try_update + # + # @!method try_update! + # @!macro atomic_reference_method_try_update! + + + # @!macro internal_implementation_note + class ConcurrentUpdateError < ThreadError + # frozen pre-allocated backtrace to speed ConcurrentUpdateError + CONC_UP_ERR_BACKTRACE = ['backtrace elided; set verbose to enable'].freeze + end + + # @!macro internal_implementation_note + AtomicReferenceImplementation = case + when Concurrent.on_cruby? && Concurrent.c_extensions_loaded? + # @!visibility private + # @!macro internal_implementation_note + class CAtomicReference + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :compare_and_swap, :compare_and_set + end + CAtomicReference + when Concurrent.on_jruby? + # @!visibility private + # @!macro internal_implementation_note + class JavaAtomicReference + include AtomicDirectUpdate + end + JavaAtomicReference + when Concurrent.on_truffleruby? + class TruffleRubyAtomicReference < TruffleRuby::AtomicReference + include AtomicDirectUpdate + alias_method :value, :get + alias_method :value=, :set + alias_method :compare_and_swap, :compare_and_set + alias_method :swap, :get_and_set + end + when Concurrent.on_rbx? + # @note Extends `Rubinius::AtomicReference` version adding aliases + # and numeric logic. + # + # @!visibility private + # @!macro internal_implementation_note + class RbxAtomicReference < Rubinius::AtomicReference + alias_method :_compare_and_set, :compare_and_set + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :value, :get + alias_method :value=, :set + alias_method :swap, :get_and_set + alias_method :compare_and_swap, :compare_and_set + end + RbxAtomicReference + else + MutexAtomicReference + end + private_constant :AtomicReferenceImplementation + + # @!macro atomic_reference + class AtomicReference < AtomicReferenceImplementation + + # @return [String] Short string representation. + def to_s + format '%s value:%s>', super[0..-2], get + end + + alias_method :inspect, :to_s + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/count_down_latch.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/count_down_latch.rb new file mode 100644 index 0000000000..4c0158d755 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/count_down_latch.rb @@ -0,0 +1,100 @@ +require 'concurrent/atomic/mutex_count_down_latch' +require 'concurrent/atomic/java_count_down_latch' +require 'concurrent/utility/engine' + +module Concurrent + + ################################################################### + + # @!macro count_down_latch_method_initialize + # + # Create a new `CountDownLatch` with the initial `count`. + # + # @param [new] count the initial count + # + # @raise [ArgumentError] if `count` is not an integer or is less than zero + + # @!macro count_down_latch_method_wait + # + # Block on the latch until the counter reaches zero or until `timeout` is reached. + # + # @param [Fixnum] timeout the number of seconds to wait for the counter or `nil` + # to block indefinitely + # @return [Boolean] `true` if the `count` reaches zero else false on `timeout` + + # @!macro count_down_latch_method_count_down + # + # Signal the latch to decrement the counter. Will signal all blocked threads when + # the `count` reaches zero. + + # @!macro count_down_latch_method_count + # + # The current value of the counter. + # + # @return [Fixnum] the current value of the counter + + ################################################################### + + # @!macro count_down_latch_public_api + # + # @!method initialize(count = 1) + # @!macro count_down_latch_method_initialize + # + # @!method wait(timeout = nil) + # @!macro count_down_latch_method_wait + # + # @!method count_down + # @!macro count_down_latch_method_count_down + # + # @!method count + # @!macro count_down_latch_method_count + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + CountDownLatchImplementation = case + when Concurrent.on_jruby? + JavaCountDownLatch + else + MutexCountDownLatch + end + private_constant :CountDownLatchImplementation + + # @!macro count_down_latch + # + # A synchronization object that allows one thread to wait on multiple other threads. + # The thread that will wait creates a `CountDownLatch` and sets the initial value + # (normally equal to the number of other threads). The initiating thread passes the + # latch to the other threads then waits for the other threads by calling the `#wait` + # method. Each of the other threads calls `#count_down` when done with its work. + # When the latch counter reaches zero the waiting thread is unblocked and continues + # with its work. A `CountDownLatch` can be used only once. Its value cannot be reset. + # + # @!macro count_down_latch_public_api + # @example Waiter and Decrementer + # latch = Concurrent::CountDownLatch.new(3) + # + # waiter = Thread.new do + # latch.wait() + # puts ("Waiter released") + # end + # + # decrementer = Thread.new do + # sleep(1) + # latch.count_down + # puts latch.count + # + # sleep(1) + # latch.count_down + # puts latch.count + # + # sleep(1) + # latch.count_down + # puts latch.count + # end + # + # [waiter, decrementer].each(&:join) + class CountDownLatch < CountDownLatchImplementation + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/cyclic_barrier.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/cyclic_barrier.rb new file mode 100644 index 0000000000..42f5a94967 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/cyclic_barrier.rb @@ -0,0 +1,128 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # A synchronization aid that allows a set of threads to all wait for each + # other to reach a common barrier point. + # @example + # barrier = Concurrent::CyclicBarrier.new(3) + # jobs = Array.new(3) { |i| -> { sleep i; p done: i } } + # process = -> (i) do + # # waiting to start at the same time + # barrier.wait + # # execute job + # jobs[i].call + # # wait for others to finish + # barrier.wait + # end + # threads = 2.times.map do |i| + # Thread.new(i, &process) + # end + # + # # use main as well + # process.call 2 + # + # # here we can be sure that all jobs are processed + class CyclicBarrier < Synchronization::LockableObject + + # @!visibility private + Generation = Struct.new(:status) + private_constant :Generation + + # Create a new `CyclicBarrier` that waits for `parties` threads + # + # @param [Fixnum] parties the number of parties + # @yield an optional block that will be executed that will be executed after + # the last thread arrives and before the others are released + # + # @raise [ArgumentError] if `parties` is not an integer or is less than zero + def initialize(parties, &block) + Utility::NativeInteger.ensure_integer_and_bounds parties + Utility::NativeInteger.ensure_positive_and_no_zero parties + + super(&nil) + synchronize { ns_initialize parties, &block } + end + + # @return [Fixnum] the number of threads needed to pass the barrier + def parties + synchronize { @parties } + end + + # @return [Fixnum] the number of threads currently waiting on the barrier + def number_waiting + synchronize { @number_waiting } + end + + # Blocks on the barrier until the number of waiting threads is equal to + # `parties` or until `timeout` is reached or `reset` is called + # If a block has been passed to the constructor, it will be executed once by + # the last arrived thread before releasing the others + # @param [Fixnum] timeout the number of seconds to wait for the counter or + # `nil` to block indefinitely + # @return [Boolean] `true` if the `count` reaches zero else false on + # `timeout` or on `reset` or if the barrier is broken + def wait(timeout = nil) + synchronize do + + return false unless @generation.status == :waiting + + @number_waiting += 1 + + if @number_waiting == @parties + @action.call if @action + ns_generation_done @generation, :fulfilled + true + else + generation = @generation + if ns_wait_until(timeout) { generation.status != :waiting } + generation.status == :fulfilled + else + ns_generation_done generation, :broken, false + false + end + end + end + end + + # resets the barrier to its initial state + # If there is at least one waiting thread, it will be woken up, the `wait` + # method will return false and the barrier will be broken + # If the barrier is broken, this method restores it to the original state + # + # @return [nil] + def reset + synchronize { ns_generation_done @generation, :reset } + end + + # A barrier can be broken when: + # - a thread called the `reset` method while at least one other thread was waiting + # - at least one thread timed out on `wait` method + # + # A broken barrier can be restored using `reset` it's safer to create a new one + # @return [Boolean] true if the barrier is broken otherwise false + def broken? + synchronize { @generation.status != :waiting } + end + + protected + + def ns_generation_done(generation, status, continue = true) + generation.status = status + ns_next_generation if continue + ns_broadcast + end + + def ns_next_generation + @generation = Generation.new(:waiting) + @number_waiting = 0 + end + + def ns_initialize(parties, &block) + @parties = parties + @action = block + ns_next_generation + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/event.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/event.rb new file mode 100644 index 0000000000..825f38a031 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/event.rb @@ -0,0 +1,109 @@ +require 'thread' +require 'concurrent/synchronization' + +module Concurrent + + # Old school kernel-style event reminiscent of Win32 programming in C++. + # + # When an `Event` is created it is in the `unset` state. Threads can choose to + # `#wait` on the event, blocking until released by another thread. When one + # thread wants to alert all blocking threads it calls the `#set` method which + # will then wake up all listeners. Once an `Event` has been set it remains set. + # New threads calling `#wait` will return immediately. An `Event` may be + # `#reset` at any time once it has been set. + # + # @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682655.aspx + # @example + # event = Concurrent::Event.new + # + # t1 = Thread.new do + # puts "t1 is waiting" + # event.wait(1) + # puts "event ocurred" + # end + # + # t2 = Thread.new do + # puts "t2 calling set" + # event.set + # end + # + # [t1, t2].each(&:join) + # + # # prints: + # # t2 calling set + # # t1 is waiting + # # event occurred + class Event < Synchronization::LockableObject + + # Creates a new `Event` in the unset state. Threads calling `#wait` on the + # `Event` will block. + def initialize + super + synchronize { ns_initialize } + end + + # Is the object in the set state? + # + # @return [Boolean] indicating whether or not the `Event` has been set + def set? + synchronize { @set } + end + + # Trigger the event, setting the state to `set` and releasing all threads + # waiting on the event. Has no effect if the `Event` has already been set. + # + # @return [Boolean] should always return `true` + def set + synchronize { ns_set } + end + + def try? + synchronize { @set ? false : ns_set } + end + + # Reset a previously set event back to the `unset` state. + # Has no effect if the `Event` has not yet been set. + # + # @return [Boolean] should always return `true` + def reset + synchronize do + if @set + @set = false + @iteration +=1 + end + true + end + end + + # Wait a given number of seconds for the `Event` to be set by another + # thread. Will wait forever when no `timeout` value is given. Returns + # immediately if the `Event` has already been set. + # + # @return [Boolean] true if the `Event` was set before timeout else false + def wait(timeout = nil) + synchronize do + unless @set + iteration = @iteration + ns_wait_until(timeout) { iteration < @iteration || @set } + else + true + end + end + end + + protected + + def ns_set + unless @set + @set = true + ns_broadcast + end + true + end + + def ns_initialize + @set = false + @iteration = 0 + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/java_count_down_latch.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/java_count_down_latch.rb new file mode 100644 index 0000000000..cb5b35a567 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/java_count_down_latch.rb @@ -0,0 +1,42 @@ +if Concurrent.on_jruby? + + module Concurrent + + # @!macro count_down_latch + # @!visibility private + # @!macro internal_implementation_note + class JavaCountDownLatch + + # @!macro count_down_latch_method_initialize + def initialize(count = 1) + Utility::NativeInteger.ensure_integer_and_bounds(count) + Utility::NativeInteger.ensure_positive(count) + @latch = java.util.concurrent.CountDownLatch.new(count) + end + + # @!macro count_down_latch_method_wait + def wait(timeout = nil) + result = nil + if timeout.nil? + Synchronization::JRuby.sleep_interruptibly { @latch.await } + result = true + else + Synchronization::JRuby.sleep_interruptibly do + result = @latch.await(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + result + end + + # @!macro count_down_latch_method_count_down + def count_down + @latch.countDown + end + + # @!macro count_down_latch_method_count + def count + @latch.getCount + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/java_thread_local_var.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/java_thread_local_var.rb new file mode 100644 index 0000000000..b41018ffed --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/java_thread_local_var.rb @@ -0,0 +1,37 @@ +require 'concurrent/atomic/abstract_thread_local_var' + +if Concurrent.on_jruby? + + module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class JavaThreadLocalVar < AbstractThreadLocalVar + + # @!macro thread_local_var_method_get + def value + value = @var.get + + if value.nil? + default + elsif value == NULL + nil + else + value + end + end + + # @!macro thread_local_var_method_set + def value=(value) + @var.set(value) + end + + protected + + # @!visibility private + def allocate_storage + @var = java.lang.ThreadLocal.new + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_atomic_boolean.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_atomic_boolean.rb new file mode 100644 index 0000000000..a033de4cad --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_atomic_boolean.rb @@ -0,0 +1,62 @@ +require 'concurrent/synchronization' + +module Concurrent + + # @!macro atomic_boolean + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicBoolean < Synchronization::LockableObject + + # @!macro atomic_boolean_method_initialize + def initialize(initial = false) + super() + synchronize { ns_initialize(initial) } + end + + # @!macro atomic_boolean_method_value_get + def value + synchronize { @value } + end + + # @!macro atomic_boolean_method_value_set + def value=(value) + synchronize { @value = !!value } + end + + # @!macro atomic_boolean_method_true_question + def true? + synchronize { @value } + end + + # @!macro atomic_boolean_method_false_question + def false? + synchronize { !@value } + end + + # @!macro atomic_boolean_method_make_true + def make_true + synchronize { ns_make_value(true) } + end + + # @!macro atomic_boolean_method_make_false + def make_false + synchronize { ns_make_value(false) } + end + + protected + + # @!visibility private + def ns_initialize(initial) + @value = !!initial + end + + private + + # @!visibility private + def ns_make_value(value) + old = @value + @value = value + old != @value + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_atomic_fixnum.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_atomic_fixnum.rb new file mode 100644 index 0000000000..77b91d2dbf --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_atomic_fixnum.rb @@ -0,0 +1,75 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro atomic_fixnum + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicFixnum < Synchronization::LockableObject + + # @!macro atomic_fixnum_method_initialize + def initialize(initial = 0) + super() + synchronize { ns_initialize(initial) } + end + + # @!macro atomic_fixnum_method_value_get + def value + synchronize { @value } + end + + # @!macro atomic_fixnum_method_value_set + def value=(value) + synchronize { ns_set(value) } + end + + # @!macro atomic_fixnum_method_increment + def increment(delta = 1) + synchronize { ns_set(@value + delta.to_i) } + end + + alias_method :up, :increment + + # @!macro atomic_fixnum_method_decrement + def decrement(delta = 1) + synchronize { ns_set(@value - delta.to_i) } + end + + alias_method :down, :decrement + + # @!macro atomic_fixnum_method_compare_and_set + def compare_and_set(expect, update) + synchronize do + if @value == expect.to_i + @value = update.to_i + true + else + false + end + end + end + + # @!macro atomic_fixnum_method_update + def update + synchronize do + @value = yield @value + end + end + + protected + + # @!visibility private + def ns_initialize(initial) + ns_set(initial) + end + + private + + # @!visibility private + def ns_set(value) + Utility::NativeInteger.ensure_integer_and_bounds value + @value = value + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_count_down_latch.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_count_down_latch.rb new file mode 100644 index 0000000000..e99744cef6 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_count_down_latch.rb @@ -0,0 +1,44 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro count_down_latch + # @!visibility private + # @!macro internal_implementation_note + class MutexCountDownLatch < Synchronization::LockableObject + + # @!macro count_down_latch_method_initialize + def initialize(count = 1) + Utility::NativeInteger.ensure_integer_and_bounds count + Utility::NativeInteger.ensure_positive count + + super() + synchronize { ns_initialize count } + end + + # @!macro count_down_latch_method_wait + def wait(timeout = nil) + synchronize { ns_wait_until(timeout) { @count == 0 } } + end + + # @!macro count_down_latch_method_count_down + def count_down + synchronize do + @count -= 1 if @count > 0 + ns_broadcast if @count == 0 + end + end + + # @!macro count_down_latch_method_count + def count + synchronize { @count } + end + + protected + + def ns_initialize(count) + @count = count + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_semaphore.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_semaphore.rb new file mode 100644 index 0000000000..2042f73056 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/mutex_semaphore.rb @@ -0,0 +1,115 @@ +require 'concurrent/synchronization' +require 'concurrent/utility/native_integer' + +module Concurrent + + # @!macro semaphore + # @!visibility private + # @!macro internal_implementation_note + class MutexSemaphore < Synchronization::LockableObject + + # @!macro semaphore_method_initialize + def initialize(count) + Utility::NativeInteger.ensure_integer_and_bounds count + + super() + synchronize { ns_initialize count } + end + + # @!macro semaphore_method_acquire + def acquire(permits = 1) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + try_acquire_timed(permits, nil) + nil + end + end + + # @!macro semaphore_method_available_permits + def available_permits + synchronize { @free } + end + + # @!macro semaphore_method_drain_permits + # + # Acquires and returns all permits that are immediately available. + # + # @return [Integer] + def drain_permits + synchronize do + @free.tap { |_| @free = 0 } + end + end + + # @!macro semaphore_method_try_acquire + def try_acquire(permits = 1, timeout = nil) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + if timeout.nil? + try_acquire_now(permits) + else + try_acquire_timed(permits, timeout) + end + end + end + + # @!macro semaphore_method_release + def release(permits = 1) + Utility::NativeInteger.ensure_integer_and_bounds permits + Utility::NativeInteger.ensure_positive permits + + synchronize do + @free += permits + permits.times { ns_signal } + end + nil + end + + # Shrinks the number of available permits by the indicated reduction. + # + # @param [Fixnum] reduction Number of permits to remove. + # + # @raise [ArgumentError] if `reduction` is not an integer or is negative + # + # @raise [ArgumentError] if `@free` - `@reduction` is less than zero + # + # @return [nil] + # + # @!visibility private + def reduce_permits(reduction) + Utility::NativeInteger.ensure_integer_and_bounds reduction + Utility::NativeInteger.ensure_positive reduction + + synchronize { @free -= reduction } + nil + end + + protected + + # @!visibility private + def ns_initialize(count) + @free = count + end + + private + + # @!visibility private + def try_acquire_now(permits) + if @free >= permits + @free -= permits + true + else + false + end + end + + # @!visibility private + def try_acquire_timed(permits, timeout) + ns_wait_until(timeout) { try_acquire_now(permits) } + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/read_write_lock.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/read_write_lock.rb new file mode 100644 index 0000000000..246f21aac3 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/read_write_lock.rb @@ -0,0 +1,254 @@ +require 'thread' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/errors' +require 'concurrent/synchronization' + +module Concurrent + + # Ruby read-write lock implementation + # + # Allows any number of concurrent readers, but only one concurrent writer + # (And if the "write" lock is taken, any readers who come along will have to wait) + # + # If readers are already active when a writer comes along, the writer will wait for + # all the readers to finish before going ahead. + # Any additional readers that come when the writer is already waiting, will also + # wait (so writers are not starved). + # + # This implementation is based on `java.util.concurrent.ReentrantReadWriteLock`. + # + # @example + # lock = Concurrent::ReadWriteLock.new + # lock.with_read_lock { data.retrieve } + # lock.with_write_lock { data.modify! } + # + # @note Do **not** try to acquire the write lock while already holding a read lock + # **or** try to acquire the write lock while you already have it. + # This will lead to deadlock + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock + class ReadWriteLock < Synchronization::Object + + # @!visibility private + WAITING_WRITER = 1 << 15 + + # @!visibility private + RUNNING_WRITER = 1 << 29 + + # @!visibility private + MAX_READERS = WAITING_WRITER - 1 + + # @!visibility private + MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 + + safe_initialization! + + # Implementation notes: + # A goal is to make the uncontended path for both readers/writers lock-free + # Only if there is reader-writer or writer-writer contention, should locks be used + # Internal state is represented by a single integer ("counter"), and updated + # using atomic compare-and-swap operations + # When the counter is 0, the lock is free + # Each reader increments the counter by 1 when acquiring a read lock + # (and decrements by 1 when releasing the read lock) + # The counter is increased by (1 << 15) for each writer waiting to acquire the + # write lock, and by (1 << 29) if the write lock is taken + + # Create a new `ReadWriteLock` in the unlocked state. + def initialize + super() + @Counter = AtomicFixnum.new(0) # single integer which represents lock state + @ReadLock = Synchronization::Lock.new + @WriteLock = Synchronization::Lock.new + end + + # Execute a block operation within a read lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_read_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_read_lock + begin + yield + ensure + release_read_lock + end + end + + # Execute a block operation within a write lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_write_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_write_lock + begin + yield + ensure + release_write_lock + end + end + + # Acquire a read lock. If a write lock has been acquired will block until + # it is released. Will not block if other read locks have been acquired. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def acquire_read_lock + while true + c = @Counter.value + raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) + + # If a writer is waiting when we first queue up, we need to wait + if waiting_writer?(c) + @ReadLock.wait_until { !waiting_writer? } + + # after a reader has waited once, they are allowed to "barge" ahead of waiting writers + # but if a writer is *running*, the reader still needs to wait (naturally) + while true + c = @Counter.value + if running_writer?(c) + @ReadLock.wait_until { !running_writer? } + else + return if @Counter.compare_and_set(c, c+1) + end + end + else + break if @Counter.compare_and_set(c, c+1) + end + end + true + end + + # Release a previously acquired read lock. + # + # @return [Boolean] true if the lock is successfully released + def release_read_lock + while true + c = @Counter.value + if @Counter.compare_and_set(c, c-1) + # If one or more writers were waiting, and we were the last reader, wake a writer up + if waiting_writer?(c) && running_readers(c) == 1 + @WriteLock.signal + end + break + end + end + true + end + + # Acquire a write lock. Will block and wait for all active readers and writers. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of writers + # is exceeded. + def acquire_write_lock + while true + c = @Counter.value + raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) + + if c == 0 # no readers OR writers running + # if we successfully swap the RUNNING_WRITER bit on, then we can go ahead + break if @Counter.compare_and_set(0, RUNNING_WRITER) + elsif @Counter.compare_and_set(c, c+WAITING_WRITER) + while true + # Now we have successfully incremented, so no more readers will be able to increment + # (they will wait instead) + # However, readers OR writers could decrement right here, OR another writer could increment + @WriteLock.wait_until do + # So we have to do another check inside the synchronized section + # If a writer OR reader is running, then go to sleep + c = @Counter.value + !running_writer?(c) && !running_readers?(c) + end + + # We just came out of a wait + # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, + # Then we are OK to stop waiting and go ahead + # Otherwise go back and wait again + c = @Counter.value + break if !running_writer?(c) && !running_readers?(c) && @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) + end + break + end + end + true + end + + # Release a previously acquired write lock. + # + # @return [Boolean] true if the lock is successfully released + def release_write_lock + return true unless running_writer? + c = @Counter.update { |counter| counter - RUNNING_WRITER } + @ReadLock.broadcast + @WriteLock.signal if waiting_writers(c) > 0 + true + end + + # Queries if the write lock is held by any thread. + # + # @return [Boolean] true if the write lock is held else false` + def write_locked? + @Counter.value >= RUNNING_WRITER + end + + # Queries whether any threads are waiting to acquire the read or write lock. + # + # @return [Boolean] true if any threads are waiting for a lock else false + def has_waiters? + waiting_writer?(@Counter.value) + end + + private + + # @!visibility private + def running_readers(c = @Counter.value) + c & MAX_READERS + end + + # @!visibility private + def running_readers?(c = @Counter.value) + (c & MAX_READERS) > 0 + end + + # @!visibility private + def running_writer?(c = @Counter.value) + c >= RUNNING_WRITER + end + + # @!visibility private + def waiting_writers(c = @Counter.value) + (c & MAX_WRITERS) / WAITING_WRITER + end + + # @!visibility private + def waiting_writer?(c = @Counter.value) + c >= WAITING_WRITER + end + + # @!visibility private + def max_readers?(c = @Counter.value) + (c & MAX_READERS) == MAX_READERS + end + + # @!visibility private + def max_writers?(c = @Counter.value) + (c & MAX_WRITERS) == MAX_WRITERS + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/reentrant_read_write_lock.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/reentrant_read_write_lock.rb new file mode 100644 index 0000000000..42d7f3c3ec --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/reentrant_read_write_lock.rb @@ -0,0 +1,379 @@ +require 'thread' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/errors' +require 'concurrent/synchronization' +require 'concurrent/atomic/thread_local_var' + +module Concurrent + + # Re-entrant read-write lock implementation + # + # Allows any number of concurrent readers, but only one concurrent writer + # (And while the "write" lock is taken, no read locks can be obtained either. + # Hence, the write lock can also be called an "exclusive" lock.) + # + # If another thread has taken a read lock, any thread which wants a write lock + # will block until all the readers release their locks. However, once a thread + # starts waiting to obtain a write lock, any additional readers that come along + # will also wait (so writers are not starved). + # + # A thread can acquire both a read and write lock at the same time. A thread can + # also acquire a read lock OR a write lock more than once. Only when the read (or + # write) lock is released as many times as it was acquired, will the thread + # actually let it go, allowing other threads which might have been waiting + # to proceed. Therefore the lock can be upgraded by first acquiring + # read lock and then write lock and that the lock can be downgraded by first + # having both read and write lock a releasing just the write lock. + # + # If both read and write locks are acquired by the same thread, it is not strictly + # necessary to release them in the same order they were acquired. In other words, + # the following code is legal: + # + # @example + # lock = Concurrent::ReentrantReadWriteLock.new + # lock.acquire_write_lock + # lock.acquire_read_lock + # lock.release_write_lock + # # At this point, the current thread is holding only a read lock, not a write + # # lock. So other threads can take read locks, but not a write lock. + # lock.release_read_lock + # # Now the current thread is not holding either a read or write lock, so + # # another thread could potentially acquire a write lock. + # + # This implementation was inspired by `java.util.concurrent.ReentrantReadWriteLock`. + # + # @example + # lock = Concurrent::ReentrantReadWriteLock.new + # lock.with_read_lock { data.retrieve } + # lock.with_write_lock { data.modify! } + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock + class ReentrantReadWriteLock < Synchronization::Object + + # Implementation notes: + # + # A goal is to make the uncontended path for both readers/writers mutex-free + # Only if there is reader-writer or writer-writer contention, should mutexes be used + # Otherwise, a single CAS operation is all we need to acquire/release a lock + # + # Internal state is represented by a single integer ("counter"), and updated + # using atomic compare-and-swap operations + # When the counter is 0, the lock is free + # Each thread which has one OR MORE read locks increments the counter by 1 + # (and decrements by 1 when releasing the read lock) + # The counter is increased by (1 << 15) for each writer waiting to acquire the + # write lock, and by (1 << 29) if the write lock is taken + # + # Additionally, each thread uses a thread-local variable to count how many times + # it has acquired a read lock, AND how many times it has acquired a write lock. + # It uses a similar trick; an increment of 1 means a read lock was taken, and + # an increment of (1 << 15) means a write lock was taken + # This is what makes re-entrancy possible + # + # 2 rules are followed to ensure good liveness properties: + # 1) Once a writer has queued up and is waiting for a write lock, no other thread + # can take a lock without waiting + # 2) When a write lock is released, readers are given the "first chance" to wake + # up and acquire a read lock + # Following these rules means readers and writers tend to "take turns", so neither + # can starve the other, even under heavy contention + + # @!visibility private + READER_BITS = 15 + # @!visibility private + WRITER_BITS = 14 + + # Used with @Counter: + # @!visibility private + WAITING_WRITER = 1 << READER_BITS + # @!visibility private + RUNNING_WRITER = 1 << (READER_BITS + WRITER_BITS) + # @!visibility private + MAX_READERS = WAITING_WRITER - 1 + # @!visibility private + MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 + + # Used with @HeldCount: + # @!visibility private + WRITE_LOCK_HELD = 1 << READER_BITS + # @!visibility private + READ_LOCK_MASK = WRITE_LOCK_HELD - 1 + # @!visibility private + WRITE_LOCK_MASK = MAX_WRITERS + + safe_initialization! + + # Create a new `ReentrantReadWriteLock` in the unlocked state. + def initialize + super() + @Counter = AtomicFixnum.new(0) # single integer which represents lock state + @ReadQueue = Synchronization::Lock.new # used to queue waiting readers + @WriteQueue = Synchronization::Lock.new # used to queue waiting writers + @HeldCount = ThreadLocalVar.new(0) # indicates # of R & W locks held by this thread + end + + # Execute a block operation within a read lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_read_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_read_lock + begin + yield + ensure + release_read_lock + end + end + + # Execute a block operation within a write lock. + # + # @yield the task to be performed within the lock. + # + # @return [Object] the result of the block operation. + # + # @raise [ArgumentError] when no block is given. + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def with_write_lock + raise ArgumentError.new('no block given') unless block_given? + acquire_write_lock + begin + yield + ensure + release_write_lock + end + end + + # Acquire a read lock. If a write lock is held by another thread, will block + # until it is released. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of readers + # is exceeded. + def acquire_read_lock + if (held = @HeldCount.value) > 0 + # If we already have a lock, there's no need to wait + if held & READ_LOCK_MASK == 0 + # But we do need to update the counter, if we were holding a write + # lock but not a read lock + @Counter.update { |c| c + 1 } + end + @HeldCount.value = held + 1 + return true + end + + while true + c = @Counter.value + raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) + + # If a writer is waiting OR running when we first queue up, we need to wait + if waiting_or_running_writer?(c) + # Before going to sleep, check again with the ReadQueue mutex held + @ReadQueue.synchronize do + @ReadQueue.ns_wait if waiting_or_running_writer? + end + # Note: the above 'synchronize' block could have used #wait_until, + # but that waits repeatedly in a loop, checking the wait condition + # each time it wakes up (to protect against spurious wakeups) + # But we are already in a loop, which is only broken when we successfully + # acquire the lock! So we don't care about spurious wakeups, and would + # rather not pay the extra overhead of using #wait_until + + # After a reader has waited once, they are allowed to "barge" ahead of waiting writers + # But if a writer is *running*, the reader still needs to wait (naturally) + while true + c = @Counter.value + if running_writer?(c) + @ReadQueue.synchronize do + @ReadQueue.ns_wait if running_writer? + end + elsif @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + elsif @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + end + + # Try to acquire a read lock and return true if we succeed. If it cannot be + # acquired immediately, return false. + # + # @return [Boolean] true if the lock is successfully acquired + def try_read_lock + if (held = @HeldCount.value) > 0 + if held & READ_LOCK_MASK == 0 + # If we hold a write lock, but not a read lock... + @Counter.update { |c| c + 1 } + end + @HeldCount.value = held + 1 + return true + else + c = @Counter.value + if !waiting_or_running_writer?(c) && @Counter.compare_and_set(c, c+1) + @HeldCount.value = held + 1 + return true + end + end + false + end + + # Release a previously acquired read lock. + # + # @return [Boolean] true if the lock is successfully released + def release_read_lock + held = @HeldCount.value = @HeldCount.value - 1 + rlocks_held = held & READ_LOCK_MASK + if rlocks_held == 0 + c = @Counter.update { |counter| counter - 1 } + # If one or more writers were waiting, and we were the last reader, wake a writer up + if waiting_or_running_writer?(c) && running_readers(c) == 0 + @WriteQueue.signal + end + elsif rlocks_held == READ_LOCK_MASK + raise IllegalOperationError, "Cannot release a read lock which is not held" + end + true + end + + # Acquire a write lock. Will block and wait for all active readers and writers. + # + # @return [Boolean] true if the lock is successfully acquired + # + # @raise [Concurrent::ResourceLimitError] if the maximum number of writers + # is exceeded. + def acquire_write_lock + if (held = @HeldCount.value) >= WRITE_LOCK_HELD + # if we already have a write (exclusive) lock, there's no need to wait + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + + while true + c = @Counter.value + raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) + + # To go ahead and take the lock without waiting, there must be no writer + # running right now, AND no writers who came before us still waiting to + # acquire the lock + # Additionally, if any read locks have been taken, we must hold all of them + if c == held + # If we successfully swap the RUNNING_WRITER bit on, then we can go ahead + if @Counter.compare_and_set(c, c+RUNNING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + elsif @Counter.compare_and_set(c, c+WAITING_WRITER) + while true + # Now we have successfully incremented, so no more readers will be able to increment + # (they will wait instead) + # However, readers OR writers could decrement right here + @WriteQueue.synchronize do + # So we have to do another check inside the synchronized section + # If a writer OR another reader is running, then go to sleep + c = @Counter.value + @WriteQueue.ns_wait if running_writer?(c) || running_readers(c) != held + end + # Note: if you are thinking of replacing the above 'synchronize' block + # with #wait_until, read the comment in #acquire_read_lock first! + + # We just came out of a wait + # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, + # then we are OK to stop waiting and go ahead + # Otherwise go back and wait again + c = @Counter.value + if !running_writer?(c) && + running_readers(c) == held && + @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + end + end + end + end + + # Try to acquire a write lock and return true if we succeed. If it cannot be + # acquired immediately, return false. + # + # @return [Boolean] true if the lock is successfully acquired + def try_write_lock + if (held = @HeldCount.value) >= WRITE_LOCK_HELD + @HeldCount.value = held + WRITE_LOCK_HELD + return true + else + c = @Counter.value + if !waiting_or_running_writer?(c) && + running_readers(c) == held && + @Counter.compare_and_set(c, c+RUNNING_WRITER) + @HeldCount.value = held + WRITE_LOCK_HELD + return true + end + end + false + end + + # Release a previously acquired write lock. + # + # @return [Boolean] true if the lock is successfully released + def release_write_lock + held = @HeldCount.value = @HeldCount.value - WRITE_LOCK_HELD + wlocks_held = held & WRITE_LOCK_MASK + if wlocks_held == 0 + c = @Counter.update { |counter| counter - RUNNING_WRITER } + @ReadQueue.broadcast + @WriteQueue.signal if waiting_writers(c) > 0 + elsif wlocks_held == WRITE_LOCK_MASK + raise IllegalOperationError, "Cannot release a write lock which is not held" + end + true + end + + private + + # @!visibility private + def running_readers(c = @Counter.value) + c & MAX_READERS + end + + # @!visibility private + def running_readers?(c = @Counter.value) + (c & MAX_READERS) > 0 + end + + # @!visibility private + def running_writer?(c = @Counter.value) + c >= RUNNING_WRITER + end + + # @!visibility private + def waiting_writers(c = @Counter.value) + (c & MAX_WRITERS) >> READER_BITS + end + + # @!visibility private + def waiting_or_running_writer?(c = @Counter.value) + c >= WAITING_WRITER + end + + # @!visibility private + def max_readers?(c = @Counter.value) + (c & MAX_READERS) == MAX_READERS + end + + # @!visibility private + def max_writers?(c = @Counter.value) + (c & MAX_WRITERS) == MAX_WRITERS + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/ruby_thread_local_var.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/ruby_thread_local_var.rb new file mode 100644 index 0000000000..06afae7316 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/ruby_thread_local_var.rb @@ -0,0 +1,161 @@ +require 'thread' +require 'concurrent/atomic/abstract_thread_local_var' + +module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class RubyThreadLocalVar < AbstractThreadLocalVar + + # Each thread has a (lazily initialized) array of thread-local variable values + # Each time a new thread-local var is created, we allocate an "index" for it + # For example, if the allocated index is 1, that means slot #1 in EVERY + # thread's thread-local array will be used for the value of that TLV + # + # The good thing about using a per-THREAD structure to hold values, rather + # than a per-TLV structure, is that no synchronization is needed when + # reading and writing those values (since the structure is only ever + # accessed by a single thread) + # + # Of course, when a TLV is GC'd, 1) we need to recover its index for use + # by other new TLVs (otherwise the thread-local arrays could get bigger + # and bigger with time), and 2) we need to null out all the references + # held in the now-unused slots (both to avoid blocking GC of those objects, + # and also to prevent "stale" values from being passed on to a new TLV + # when the index is reused) + # Because we need to null out freed slots, we need to keep references to + # ALL the thread-local arrays -- ARRAYS is for that + # But when a Thread is GC'd, we need to drop the reference to its thread-local + # array, so we don't leak memory + + # @!visibility private + FREE = [] + LOCK = Mutex.new + ARRAYS = {} # used as a hash set + @@next = 0 + private_constant :FREE, :LOCK, :ARRAYS + + # @!macro thread_local_var_method_get + def value + if array = get_threadlocal_array + value = array[@index] + if value.nil? + default + elsif value.equal?(NULL) + nil + else + value + end + else + default + end + end + + # @!macro thread_local_var_method_set + def value=(value) + me = Thread.current + # We could keep the thread-local arrays in a hash, keyed by Thread + # But why? That would require locking + # Using Ruby's built-in thread-local storage is faster + unless array = get_threadlocal_array(me) + array = set_threadlocal_array([], me) + LOCK.synchronize { ARRAYS[array.object_id] = array } + ObjectSpace.define_finalizer(me, self.class.thread_finalizer(array)) + end + array[@index] = (value.nil? ? NULL : value) + value + end + + protected + + # @!visibility private + def allocate_storage + @index = LOCK.synchronize do + FREE.pop || begin + result = @@next + @@next += 1 + result + end + end + ObjectSpace.define_finalizer(self, self.class.threadlocal_finalizer(@index)) + end + + # @!visibility private + def self.threadlocal_finalizer(index) + proc do + Thread.new do # avoid error: can't be called from trap context + LOCK.synchronize do + FREE.push(index) + # The cost of GC'ing a TLV is linear in the number of threads using TLVs + # But that is natural! More threads means more storage is used per TLV + # So naturally more CPU time is required to free more storage + ARRAYS.each_value do |array| + array[index] = nil + end + end + end + end + end + + # @!visibility private + def self.thread_finalizer(array) + proc do + Thread.new do # avoid error: can't be called from trap context + LOCK.synchronize do + # The thread which used this thread-local array is now gone + # So don't hold onto a reference to the array (thus blocking GC) + ARRAYS.delete(array.object_id) + end + end + end + end + + private + + if Thread.instance_methods.include?(:thread_variable_get) + + def get_threadlocal_array(thread = Thread.current) + thread.thread_variable_get(:__threadlocal_array__) + end + + def set_threadlocal_array(array, thread = Thread.current) + thread.thread_variable_set(:__threadlocal_array__, array) + end + + else + + def get_threadlocal_array(thread = Thread.current) + thread[:__threadlocal_array__] + end + + def set_threadlocal_array(array, thread = Thread.current) + thread[:__threadlocal_array__] = array + end + end + + # This exists only for use in testing + # @!visibility private + def value_for(thread) + if array = get_threadlocal_array(thread) + value = array[@index] + if value.nil? + default_for(thread) + elsif value.equal?(NULL) + nil + else + value + end + else + default_for(thread) + end + end + + def default_for(thread) + if @default_block + raise "Cannot use default_for with default block" + else + @default + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/semaphore.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/semaphore.rb new file mode 100644 index 0000000000..1b2bd8c95d --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/semaphore.rb @@ -0,0 +1,145 @@ +require 'concurrent/atomic/mutex_semaphore' +require 'concurrent/synchronization' + +module Concurrent + + ################################################################### + + # @!macro semaphore_method_initialize + # + # Create a new `Semaphore` with the initial `count`. + # + # @param [Fixnum] count the initial count + # + # @raise [ArgumentError] if `count` is not an integer or is less than zero + + # @!macro semaphore_method_acquire + # + # Acquires the given number of permits from this semaphore, + # blocking until all are available. + # + # @param [Fixnum] permits Number of permits to acquire + # + # @raise [ArgumentError] if `permits` is not an integer or is less than + # one + # + # @return [nil] + + # @!macro semaphore_method_available_permits + # + # Returns the current number of permits available in this semaphore. + # + # @return [Integer] + + # @!macro semaphore_method_drain_permits + # + # Acquires and returns all permits that are immediately available. + # + # @return [Integer] + + # @!macro semaphore_method_try_acquire + # + # Acquires the given number of permits from this semaphore, + # only if all are available at the time of invocation or within + # `timeout` interval + # + # @param [Fixnum] permits the number of permits to acquire + # + # @param [Fixnum] timeout the number of seconds to wait for the counter + # or `nil` to return immediately + # + # @raise [ArgumentError] if `permits` is not an integer or is less than + # one + # + # @return [Boolean] `false` if no permits are available, `true` when + # acquired a permit + + # @!macro semaphore_method_release + # + # Releases the given number of permits, returning them to the semaphore. + # + # @param [Fixnum] permits Number of permits to return to the semaphore. + # + # @raise [ArgumentError] if `permits` is not a number or is less than one + # + # @return [nil] + + ################################################################### + + # @!macro semaphore_public_api + # + # @!method initialize(count) + # @!macro semaphore_method_initialize + # + # @!method acquire(permits = 1) + # @!macro semaphore_method_acquire + # + # @!method available_permits + # @!macro semaphore_method_available_permits + # + # @!method drain_permits + # @!macro semaphore_method_drain_permits + # + # @!method try_acquire(permits = 1, timeout = nil) + # @!macro semaphore_method_try_acquire + # + # @!method release(permits = 1) + # @!macro semaphore_method_release + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + SemaphoreImplementation = case + when defined?(JavaSemaphore) + JavaSemaphore + else + MutexSemaphore + end + private_constant :SemaphoreImplementation + + # @!macro semaphore + # + # A counting semaphore. Conceptually, a semaphore maintains a set of + # permits. Each {#acquire} blocks if necessary until a permit is + # available, and then takes it. Each {#release} adds a permit, potentially + # releasing a blocking acquirer. + # However, no actual permit objects are used; the Semaphore just keeps a + # count of the number available and acts accordingly. + # + # @!macro semaphore_public_api + # @example + # semaphore = Concurrent::Semaphore.new(2) + # + # t1 = Thread.new do + # semaphore.acquire + # puts "Thread 1 acquired semaphore" + # end + # + # t2 = Thread.new do + # semaphore.acquire + # puts "Thread 2 acquired semaphore" + # end + # + # t3 = Thread.new do + # semaphore.acquire + # puts "Thread 3 acquired semaphore" + # end + # + # t4 = Thread.new do + # sleep(2) + # puts "Thread 4 releasing semaphore" + # semaphore.release + # end + # + # [t1, t2, t3, t4].each(&:join) + # + # # prints: + # # Thread 3 acquired semaphore + # # Thread 2 acquired semaphore + # # Thread 4 releasing semaphore + # # Thread 1 acquired semaphore + # + class Semaphore < SemaphoreImplementation + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/thread_local_var.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/thread_local_var.rb new file mode 100644 index 0000000000..9f09e4cd0b --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic/thread_local_var.rb @@ -0,0 +1,104 @@ +require 'concurrent/atomic/ruby_thread_local_var' +require 'concurrent/atomic/java_thread_local_var' +require 'concurrent/utility/engine' + +module Concurrent + + ################################################################### + + # @!macro thread_local_var_method_initialize + # + # Creates a thread local variable. + # + # @param [Object] default the default value when otherwise unset + # @param [Proc] default_block Optional block that gets called to obtain the + # default value for each thread + + # @!macro thread_local_var_method_get + # + # Returns the value in the current thread's copy of this thread-local variable. + # + # @return [Object] the current value + + # @!macro thread_local_var_method_set + # + # Sets the current thread's copy of this thread-local variable to the specified value. + # + # @param [Object] value the value to set + # @return [Object] the new value + + # @!macro thread_local_var_method_bind + # + # Bind the given value to thread local storage during + # execution of the given block. + # + # @param [Object] value the value to bind + # @yield the operation to be performed with the bound variable + # @return [Object] the value + + + ################################################################### + + # @!macro thread_local_var_public_api + # + # @!method initialize(default = nil, &default_block) + # @!macro thread_local_var_method_initialize + # + # @!method value + # @!macro thread_local_var_method_get + # + # @!method value=(value) + # @!macro thread_local_var_method_set + # + # @!method bind(value, &block) + # @!macro thread_local_var_method_bind + + ################################################################### + + # @!visibility private + # @!macro internal_implementation_note + ThreadLocalVarImplementation = case + when Concurrent.on_jruby? + JavaThreadLocalVar + else + RubyThreadLocalVar + end + private_constant :ThreadLocalVarImplementation + + # @!macro thread_local_var + # + # A `ThreadLocalVar` is a variable where the value is different for each thread. + # Each variable may have a default value, but when you modify the variable only + # the current thread will ever see that change. + # + # @!macro thread_safe_variable_comparison + # + # @example + # v = ThreadLocalVar.new(14) + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # + # @example + # v = ThreadLocalVar.new(14) + # + # t1 = Thread.new do + # v.value #=> 14 + # v.value = 1 + # v.value #=> 1 + # end + # + # t2 = Thread.new do + # v.value #=> 14 + # v.value = 2 + # v.value #=> 2 + # end + # + # v.value #=> 14 + # + # @see https://docs.oracle.com/javase/7/docs/api/java/lang/ThreadLocal.html Java ThreadLocal + # + # @!macro thread_local_var_public_api + class ThreadLocalVar < ThreadLocalVarImplementation + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic_reference/mutex_atomic.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic_reference/mutex_atomic.rb new file mode 100644 index 0000000000..d092aedd5b --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic_reference/mutex_atomic.rb @@ -0,0 +1,56 @@ +module Concurrent + + # @!visibility private + # @!macro internal_implementation_note + class MutexAtomicReference < Synchronization::LockableObject + include AtomicDirectUpdate + include AtomicNumericCompareAndSetWrapper + alias_method :compare_and_swap, :compare_and_set + + # @!macro atomic_reference_method_initialize + def initialize(value = nil) + super() + synchronize { ns_initialize(value) } + end + + # @!macro atomic_reference_method_get + def get + synchronize { @value } + end + alias_method :value, :get + + # @!macro atomic_reference_method_set + def set(new_value) + synchronize { @value = new_value } + end + alias_method :value=, :set + + # @!macro atomic_reference_method_get_and_set + def get_and_set(new_value) + synchronize do + old_value = @value + @value = new_value + old_value + end + end + alias_method :swap, :get_and_set + + # @!macro atomic_reference_method_compare_and_set + def _compare_and_set(old_value, new_value) + synchronize do + if @value.equal? old_value + @value = new_value + true + else + false + end + end + end + + protected + + def ns_initialize(value) + @value = value + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb new file mode 100644 index 0000000000..709a382231 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb @@ -0,0 +1,28 @@ +module Concurrent + + # Special "compare and set" handling of numeric values. + # + # @!visibility private + # @!macro internal_implementation_note + module AtomicNumericCompareAndSetWrapper + + # @!macro atomic_reference_method_compare_and_set + def compare_and_set(old_value, new_value) + if old_value.kind_of? Numeric + while true + old = get + + return false unless old.kind_of? Numeric + + return false unless old == old_value + + result = _compare_and_set(old, new_value) + return result if result + end + else + _compare_and_set(old_value, new_value) + end + end + + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomics.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomics.rb new file mode 100644 index 0000000000..16cbe66101 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/atomics.rb @@ -0,0 +1,10 @@ +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/atomic/cyclic_barrier' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/atomic/event' +require 'concurrent/atomic/read_write_lock' +require 'concurrent/atomic/reentrant_read_write_lock' +require 'concurrent/atomic/semaphore' +require 'concurrent/atomic/thread_local_var' diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/copy_on_notify_observer_set.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/copy_on_notify_observer_set.rb new file mode 100644 index 0000000000..50d52a6237 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/copy_on_notify_observer_set.rb @@ -0,0 +1,107 @@ +require 'concurrent/synchronization' + +module Concurrent + module Collection + + # A thread safe observer set implemented using copy-on-read approach: + # observers are added and removed from a thread safe collection; every time + # a notification is required the internal data structure is copied to + # prevent concurrency issues + # + # @api private + class CopyOnNotifyObserverSet < Synchronization::LockableObject + + def initialize + super() + synchronize { ns_initialize } + end + + # @!macro observable_add_observer + def add_observer(observer = nil, func = :update, &block) + if observer.nil? && block.nil? + raise ArgumentError, 'should pass observer as a first argument or block' + elsif observer && block + raise ArgumentError.new('cannot provide both an observer and a block') + end + + if block + observer = block + func = :call + end + + synchronize do + @observers[observer] = func + observer + end + end + + # @!macro observable_delete_observer + def delete_observer(observer) + synchronize do + @observers.delete(observer) + observer + end + end + + # @!macro observable_delete_observers + def delete_observers + synchronize do + @observers.clear + self + end + end + + # @!macro observable_count_observers + def count_observers + synchronize { @observers.count } + end + + # Notifies all registered observers with optional args + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_observers(*args, &block) + observers = duplicate_observers + notify_to(observers, *args, &block) + self + end + + # Notifies all registered observers with optional args and deletes them. + # + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_and_delete_observers(*args, &block) + observers = duplicate_and_clear_observers + notify_to(observers, *args, &block) + self + end + + protected + + def ns_initialize + @observers = {} + end + + private + + def duplicate_and_clear_observers + synchronize do + observers = @observers.dup + @observers.clear + observers + end + end + + def duplicate_observers + synchronize { @observers.dup } + end + + def notify_to(observers, *args) + raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? + observers.each do |observer, function| + args = yield if block_given? + observer.send(function, *args) + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/copy_on_write_observer_set.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/copy_on_write_observer_set.rb new file mode 100644 index 0000000000..3f3f7cccd0 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/copy_on_write_observer_set.rb @@ -0,0 +1,111 @@ +require 'concurrent/synchronization' + +module Concurrent + module Collection + + # A thread safe observer set implemented using copy-on-write approach: + # every time an observer is added or removed the whole internal data structure is + # duplicated and replaced with a new one. + # + # @api private + class CopyOnWriteObserverSet < Synchronization::LockableObject + + def initialize + super() + synchronize { ns_initialize } + end + + # @!macro observable_add_observer + def add_observer(observer = nil, func = :update, &block) + if observer.nil? && block.nil? + raise ArgumentError, 'should pass observer as a first argument or block' + elsif observer && block + raise ArgumentError.new('cannot provide both an observer and a block') + end + + if block + observer = block + func = :call + end + + synchronize do + new_observers = @observers.dup + new_observers[observer] = func + @observers = new_observers + observer + end + end + + # @!macro observable_delete_observer + def delete_observer(observer) + synchronize do + new_observers = @observers.dup + new_observers.delete(observer) + @observers = new_observers + observer + end + end + + # @!macro observable_delete_observers + def delete_observers + self.observers = {} + self + end + + # @!macro observable_count_observers + def count_observers + observers.count + end + + # Notifies all registered observers with optional args + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_observers(*args, &block) + notify_to(observers, *args, &block) + self + end + + # Notifies all registered observers with optional args and deletes them. + # + # @param [Object] args arguments to be passed to each observer + # @return [CopyOnWriteObserverSet] self + def notify_and_delete_observers(*args, &block) + old = clear_observers_and_return_old + notify_to(old, *args, &block) + self + end + + protected + + def ns_initialize + @observers = {} + end + + private + + def notify_to(observers, *args) + raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? + observers.each do |observer, function| + args = yield if block_given? + observer.send(function, *args) + end + end + + def observers + synchronize { @observers } + end + + def observers=(new_set) + synchronize { @observers = new_set } + end + + def clear_observers_and_return_old + synchronize do + old_observers = @observers + @observers = {} + old_observers + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/java_non_concurrent_priority_queue.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/java_non_concurrent_priority_queue.rb new file mode 100644 index 0000000000..2be9e4373a --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/java_non_concurrent_priority_queue.rb @@ -0,0 +1,84 @@ +if Concurrent.on_jruby? + + module Concurrent + module Collection + + + # @!macro priority_queue + # + # @!visibility private + # @!macro internal_implementation_note + class JavaNonConcurrentPriorityQueue + + # @!macro priority_queue_method_initialize + def initialize(opts = {}) + order = opts.fetch(:order, :max) + if [:min, :low].include?(order) + @queue = java.util.PriorityQueue.new(11) # 11 is the default initial capacity + else + @queue = java.util.PriorityQueue.new(11, java.util.Collections.reverseOrder()) + end + end + + # @!macro priority_queue_method_clear + def clear + @queue.clear + true + end + + # @!macro priority_queue_method_delete + def delete(item) + found = false + while @queue.remove(item) do + found = true + end + found + end + + # @!macro priority_queue_method_empty + def empty? + @queue.size == 0 + end + + # @!macro priority_queue_method_include + def include?(item) + @queue.contains(item) + end + alias_method :has_priority?, :include? + + # @!macro priority_queue_method_length + def length + @queue.size + end + alias_method :size, :length + + # @!macro priority_queue_method_peek + def peek + @queue.peek + end + + # @!macro priority_queue_method_pop + def pop + @queue.poll + end + alias_method :deq, :pop + alias_method :shift, :pop + + # @!macro priority_queue_method_push + def push(item) + raise ArgumentError.new('cannot enqueue nil') if item.nil? + @queue.add(item) + end + alias_method :<<, :push + alias_method :enq, :push + + # @!macro priority_queue_method_from_list + def self.from_list(list, opts = {}) + queue = new(opts) + list.each{|item| queue << item } + queue + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/lock_free_stack.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/lock_free_stack.rb new file mode 100644 index 0000000000..9996f78495 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/lock_free_stack.rb @@ -0,0 +1,158 @@ +module Concurrent + + # @!macro warn.edge + class LockFreeStack < Synchronization::Object + + safe_initialization! + + class Node + # TODO (pitr-ch 20-Dec-2016): Could be unified with Stack class? + + # @return [Node] + attr_reader :next_node + + # @return [Object] + attr_reader :value + + # @!visibility private + # allow to nil-ify to free GC when the entry is no longer relevant, not synchronised + attr_writer :value + + def initialize(value, next_node) + @value = value + @next_node = next_node + end + + singleton_class.send :alias_method, :[], :new + end + + # The singleton for empty node + EMPTY = Node[nil, nil] + def EMPTY.next_node + self + end + + attr_atomic(:head) + private :head, :head=, :swap_head, :compare_and_set_head, :update_head + + # @!visibility private + def self.of1(value) + new Node[value, EMPTY] + end + + # @!visibility private + def self.of2(value1, value2) + new Node[value1, Node[value2, EMPTY]] + end + + # @param [Node] head + def initialize(head = EMPTY) + super() + self.head = head + end + + # @param [Node] head + # @return [true, false] + def empty?(head = self.head) + head.equal? EMPTY + end + + # @param [Node] head + # @param [Object] value + # @return [true, false] + def compare_and_push(head, value) + compare_and_set_head head, Node[value, head] + end + + # @param [Object] value + # @return [self] + def push(value) + while true + current_head = head + return self if compare_and_set_head current_head, Node[value, current_head] + end + end + + # @return [Node] + def peek + head + end + + # @param [Node] head + # @return [true, false] + def compare_and_pop(head) + compare_and_set_head head, head.next_node + end + + # @return [Object] + def pop + while true + current_head = head + return current_head.value if compare_and_set_head current_head, current_head.next_node + end + end + + # @param [Node] head + # @return [true, false] + def compare_and_clear(head) + compare_and_set_head head, EMPTY + end + + include Enumerable + + # @param [Node] head + # @return [self] + def each(head = nil) + return to_enum(:each, head) unless block_given? + it = head || peek + until it.equal?(EMPTY) + yield it.value + it = it.next_node + end + self + end + + # @return [true, false] + def clear + while true + current_head = head + return false if current_head == EMPTY + return true if compare_and_set_head current_head, EMPTY + end + end + + # @param [Node] head + # @return [true, false] + def clear_if(head) + compare_and_set_head head, EMPTY + end + + # @param [Node] head + # @param [Node] new_head + # @return [true, false] + def replace_if(head, new_head) + compare_and_set_head head, new_head + end + + # @return [self] + # @yield over the cleared stack + # @yieldparam [Object] value + def clear_each(&block) + while true + current_head = head + return self if current_head == EMPTY + if compare_and_set_head current_head, EMPTY + each current_head, &block + return self + end + end + end + + # @return [String] Short string representation. + def to_s + format '%s %s>', super[0..-2], to_a.to_s + end + + alias_method :inspect, :to_s + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/atomic_reference_map_backend.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/atomic_reference_map_backend.rb new file mode 100644 index 0000000000..dc5189389d --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/atomic_reference_map_backend.rb @@ -0,0 +1,927 @@ +require 'concurrent/constants' +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/adder' +require 'concurrent/thread_safe/util/cheap_lockable' +require 'concurrent/thread_safe/util/power_of_two_tuple' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/thread_safe/util/xor_shift_random' + +module Concurrent + + # @!visibility private + module Collection + + # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59 + # + # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose + # size exceeds a threshold). + # + # A hash table supporting full concurrency of retrievals and high expected + # concurrency for updates. However, even though all operations are + # thread-safe, retrieval operations do _not_ entail locking, and there is + # _not_ any support for locking the entire table in a way that prevents all + # access. + # + # Retrieval operations generally do not block, so may overlap with update + # operations. Retrievals reflect the results of the most recently _completed_ + # update operations holding upon their onset. (More formally, an update + # operation for a given key bears a _happens-before_ relation with any (non + # +nil+) retrieval for that key reporting the updated value.) For aggregate + # operations such as +clear()+, concurrent retrievals may reflect insertion or + # removal of only some entries. Similarly, the +each_pair+ iterator yields + # elements reflecting the state of the hash table at some point at or since + # the start of the +each_pair+. Bear in mind that the results of aggregate + # status methods including +size()+ and +empty?+} are typically useful only + # when a map is not undergoing concurrent updates in other threads. Otherwise + # the results of these methods reflect transient states that may be adequate + # for monitoring or estimation purposes, but not for program control. + # + # The table is dynamically expanded when there are too many collisions (i.e., + # keys that have distinct hash codes but fall into the same slot modulo the + # table size), with the expected average effect of maintaining roughly two + # bins per mapping (corresponding to a 0.75 load factor threshold for + # resizing). There may be much variance around this average as mappings are + # added and removed, but overall, this maintains a commonly accepted + # time/space tradeoff for hash tables. However, resizing this or any other + # kind of hash table may be a relatively slow operation. When possible, it is + # a good idea to provide a size estimate as an optional :initial_capacity + # initializer argument. An additional optional :load_factor constructor + # argument provides a further means of customizing initial table capacity by + # specifying the table density to be used in calculating the amount of space + # to allocate for the given number of elements. Note that using many keys with + # exactly the same +hash+ is a sure way to slow down performance of any hash + # table. + # + # ## Design overview + # + # The primary design goal of this hash table is to maintain concurrent + # readability (typically method +[]+, but also iteration and related methods) + # while minimizing update contention. Secondary goals are to keep space + # consumption about the same or better than plain +Hash+, and to support high + # initial insertion rates on an empty table by many threads. + # + # Each key-value mapping is held in a +Node+. The validation-based approach + # explained below leads to a lot of code sprawl because retry-control + # precludes factoring into smaller methods. + # + # The table is lazily initialized to a power-of-two size upon the first + # insertion. Each bin in the table normally contains a list of +Node+s (most + # often, the list has only zero or one +Node+). Table accesses require + # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are + # always accurately traversable under volatile reads, so long as lookups check + # hash code and non-nullness of value before checking key equality. + # + # We use the top two bits of +Node+ hash fields for control purposes -- they + # are available anyway because of addressing constraints. As explained further + # below, these top bits are used as follows: + # + # - 00 - Normal + # - 01 - Locked + # - 11 - Locked and may have a thread waiting for lock + # - 10 - +Node+ is a forwarding node + # + # The lower 28 bits of each +Node+'s hash field contain a the key's hash code, + # except for forwarding nodes, for which the lower bits are zero (and so + # always have hash field == +MOVED+). + # + # Insertion (via +[]=+ or its variants) of the first node in an empty bin is + # performed by just CASing it to the bin. This is by far the most common case + # for put operations under most key/hash distributions. Other update + # operations (insert, delete, and replace) require locks. We do not want to + # waste the space required to associate a distinct lock object with each bin, + # so instead use the first node of a bin list itself as a lock. Blocking + # support for these locks relies +Concurrent::ThreadSafe::Util::CheapLockable. However, we also need a + # +try_lock+ construction, so we overlay these by using bits of the +Node+ + # hash field for lock control (see above), and so normally use builtin + # monitors only for blocking and signalling using + # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+. + # + # Using the first node of a list as a lock does not by itself suffice though: + # When a node is locked, any update must first validate that it is still the + # first node after locking it, and retry if not. Because new nodes are always + # appended to lists, once a node is first in a bin, it remains first until + # deleted or the bin becomes invalidated (upon resizing). However, operations + # that only conditionally update may inspect nodes until the point of update. + # This is a converse of sorts to the lazy locking technique described by + # Herlihy & Shavit. + # + # The main disadvantage of per-bin locks is that other update operations on + # other nodes in a bin list protected by the same lock can stall, for example + # when user +eql?+ or mapping functions take a long time. However, + # statistically, under random hash codes, this is not a common problem. + # Ideally, the frequency of nodes in bins follows a Poisson distribution + # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of + # about 0.5 on average, given the resizing threshold of 0.75, although with a + # large variance because of resizing granularity. Ignoring variance, the + # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / + # factorial(k)). The first values are: + # + # - 0: 0.60653066 + # - 1: 0.30326533 + # - 2: 0.07581633 + # - 3: 0.01263606 + # - 4: 0.00157952 + # - 5: 0.00015795 + # - 6: 0.00001316 + # - 7: 0.00000094 + # - 8: 0.00000006 + # - more: less than 1 in ten million + # + # Lock contention probability for two threads accessing distinct elements is + # roughly 1 / (8 * #elements) under random hashes. + # + # The table is resized when occupancy exceeds a percentage threshold + # (nominally, 0.75, but see below). Only a single thread performs the resize + # (using field +size_control+, to arrange exclusion), but the table otherwise + # remains usable for reads and updates. Resizing proceeds by transferring + # bins, one by one, from the table to the next table. Because we are using + # power-of-two expansion, the elements from each bin must either stay at same + # index, or move with a power of two offset. We eliminate unnecessary node + # creation by catching cases where old nodes can be reused because their next + # fields won't change. On average, only about one-sixth of them need cloning + # when a table doubles. The nodes they replace will be garbage collectable as + # soon as they are no longer referenced by any reader thread that may be in + # the midst of concurrently traversing table. Upon transfer, the old table bin + # contains only a special forwarding node (with hash field +MOVED+) that + # contains the next table as its key. On encountering a forwarding node, + # access and update operations restart, using the new table. + # + # Each bin transfer requires its bin lock. However, unlike other cases, a + # transfer can skip a bin if it fails to acquire its lock, and revisit it + # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that + # have been skipped because of failure to acquire a lock, and blocks only if + # none are available (i.e., only very rarely). The transfer operation must + # also ensure that all accessible bins in both the old and new table are + # usable by any traversal. When there are no lock acquisition failures, this + # is arranged simply by proceeding from the last bin (+table.size - 1+) up + # towards the first. Upon seeing a forwarding node, traversals arrange to move + # to the new table without revisiting nodes. However, when any node is skipped + # during a transfer, all earlier table bins may have become visible, so are + # initialized with a reverse-forwarding node back to the old table until the + # new ones are established. (This sometimes requires transiently locking a + # forwarding node, which is possible under the above encoding.) These more + # expensive mechanics trigger only when necessary. + # + # The traversal scheme also applies to partial traversals of + # ranges of bins (via an alternate Traverser constructor) + # to support partitioned aggregate operations. Also, read-only + # operations give up if ever forwarded to a null table, which + # provides support for shutdown-style clearing, which is also not + # currently implemented. + # + # Lazy table initialization minimizes footprint until first use. + # + # The element count is maintained using a +Concurrent::ThreadSafe::Util::Adder+, + # which avoids contention on updates but can encounter cache thrashing + # if read too frequently during concurrent access. To avoid reading so + # often, resizing is attempted either when a bin lock is + # contended, or upon adding to a bin already holding two or more + # nodes (checked before adding in the +x_if_absent+ methods, after + # adding in others). Under uniform hash distributions, the + # probability of this occurring at threshold is around 13%, + # meaning that only about 1 in 8 puts check threshold (and after + # resizing, many fewer do so). But this approximation has high + # variance for small table sizes, so we check on any collision + # for sizes <= 64. The bulk putAll operation further reduces + # contention by only committing count updates upon these size + # checks. + # + # @!visibility private + class AtomicReferenceMapBackend + + # @!visibility private + class Table < Concurrent::ThreadSafe::Util::PowerOfTwoTuple + def cas_new_node(i, hash, key, value) + cas(i, nil, Node.new(hash, key, value)) + end + + def try_to_cas_in_computed(i, hash, key) + succeeded = false + new_value = nil + new_node = Node.new(locked_hash = hash | LOCKED, key, NULL) + if cas(i, nil, new_node) + begin + if NULL == (new_value = yield(NULL)) + was_null = true + else + new_node.value = new_value + end + succeeded = true + ensure + volatile_set(i, nil) if !succeeded || was_null + new_node.unlock_via_hash(locked_hash, hash) + end + end + return succeeded, new_value + end + + def try_lock_via_hash(i, node, node_hash) + node.try_lock_via_hash(node_hash) do + yield if volatile_get(i) == node + end + end + + def delete_node_at(i, node, predecessor_node) + if predecessor_node + predecessor_node.next = node.next + else + volatile_set(i, node.next) + end + end + end + + # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do + # not contain user keys or values. Otherwise, keys are never +nil+, and + # +NULL+ +value+ fields indicate that a node is in the process of being + # deleted or created. For purposes of read-only access, a key may be read + # before a value, but can only be used after checking value to be +!= NULL+. + # + # @!visibility private + class Node + extend Concurrent::ThreadSafe::Util::Volatile + attr_volatile :hash, :value, :next + + include Concurrent::ThreadSafe::Util::CheapLockable + + bit_shift = Concurrent::ThreadSafe::Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves + # Encodings for special uses of Node hash fields. See above for explanation. + MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes + LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit + WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together + HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash + + SPIN_LOCK_ATTEMPTS = Concurrent::ThreadSafe::Util::CPU_COUNT > 1 ? Concurrent::ThreadSafe::Util::CPU_COUNT * 2 : 0 + + attr_reader :key + + def initialize(hash, key, value, next_node = nil) + super() + @key = key + self.lazy_set_hash(hash) + self.lazy_set_value(value) + self.next = next_node + end + + # Spins a while if +LOCKED+ bit set and this node is the first of its bin, + # and then sets +WAITING+ bits on hash field and blocks (once) if they are + # still set. It is OK for this method to return even if lock is not + # available upon exit, which enables these simple single-wait mechanics. + # + # The corresponding signalling operation is performed within callers: Upon + # detecting that +WAITING+ has been set when unlocking lock (via a failed + # CAS from non-waiting +LOCKED+ state), unlockers acquire the + # +cheap_synchronize+ lock and perform a +cheap_broadcast+. + def try_await_lock(table, i) + if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking? + spins = SPIN_LOCK_ATTEMPTS + randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.get + while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash) + if spins >= 0 + if (randomizer = (randomizer >> 1)).even? # spin at random + if (spins -= 1) == 0 + Thread.pass # yield before blocking + else + randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero? + end + end + elsif cas_hash(my_hash, my_hash | WAITING) + force_acquire_lock(table, i) + break + end + end + end + end + + def key?(key) + @key.eql?(key) + end + + def matches?(key, hash) + pure_hash == hash && key?(key) + end + + def pure_hash + hash & HASH_BITS + end + + def try_lock_via_hash(node_hash = hash) + if cas_hash(node_hash, locked_hash = node_hash | LOCKED) + begin + yield + ensure + unlock_via_hash(locked_hash, node_hash) + end + end + end + + def locked? + self.class.locked_hash?(hash) + end + + def unlock_via_hash(locked_hash, node_hash) + unless cas_hash(locked_hash, node_hash) + self.hash = node_hash + cheap_synchronize { cheap_broadcast } + end + end + + private + def force_acquire_lock(table, i) + cheap_synchronize do + if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING + cheap_wait + else + cheap_broadcast # possibly won race vs signaller + end + end + end + + class << self + def locked_hash?(hash) + (hash & LOCKED) != 0 + end + end + end + + # shorthands + MOVED = Node::MOVED + LOCKED = Node::LOCKED + WAITING = Node::WAITING + HASH_BITS = Node::HASH_BITS + + NOW_RESIZING = -1 + DEFAULT_CAPACITY = 16 + MAX_CAPACITY = Concurrent::ThreadSafe::Util::MAX_INT + + # The buffer size for skipped bins during transfers. The + # value is arbitrary but should be large enough to avoid + # most locking stalls during resizes. + TRANSFER_BUFFER_SIZE = 32 + + extend Concurrent::ThreadSafe::Util::Volatile + attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two. + + # Table initialization and resizing control. When negative, the + # table is being initialized or resized. Otherwise, when table is + # null, holds the initial table size to use upon creation, or 0 + # for default. After initialization, holds the next element count + # value upon which to resize the table. + :size_control + + def initialize(options = nil) + super() + @counter = Concurrent::ThreadSafe::Util::Adder.new + initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY + self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity + end + + def get_or_default(key, else_value = nil) + hash = key_hash(key) + current_table = table + while current_table + node = current_table.volatile_get_by_hash(hash) + current_table = + while node + if (node_hash = node.hash) == MOVED + break node.key + elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value) + return value + end + node = node.next + end + end + else_value + end + + def [](key) + get_or_default(key) + end + + def key?(key) + get_or_default(key, NULL) != NULL + end + + def []=(key, value) + get_and_set(key, value) + value + end + + def compute_if_absent(key) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield } + if succeeded + increment_size + return new_value + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS)) + return current_value + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield } + return value if succeeded + end + end + end + + def compute_if_present(key) + new_value = nil + internal_replace(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + new_value + end + + def compute(key) + internal_compute(key) do |old_value| + if (new_value = yield(NULL == old_value ? nil : old_value)).nil? + NULL + else + new_value + end + end + end + + def merge_pair(key, value) + internal_compute(key) do |old_value| + if NULL == old_value || !(value = yield(old_value)).nil? + value + else + NULL + end + end + end + + def replace_pair(key, old_value, new_value) + NULL != internal_replace(key, old_value) { new_value } + end + + def replace_if_exists(key, new_value) + if (result = internal_replace(key) { new_value }) && NULL != result + result + end + end + + def get_and_set(key, value) # internalPut in the original CHMV8 + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + if current_table.cas_new_node(i, hash, key, value) + increment_size + break + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + break old_value if succeeded + end + end + end + + def delete(key) + replace_if_exists(key, NULL) + end + + def delete_pair(key, value) + result = internal_replace(key, value) { NULL } + if result && NULL != result + !!result + else + false + end + end + + def each_pair + return self unless current_table = table + current_table_size = base_size = current_table.size + i = base_index = 0 + while base_index < base_size + if node = current_table.volatile_get(i) + if node.hash == MOVED + current_table = node.key + current_table_size = current_table.size + else + begin + if NULL != (value = node.value) # skip deleted or special nodes + yield node.key, value + end + end while node = node.next + end + end + + if (i_with_base = i + base_size) < current_table_size + i = i_with_base # visit upper slots if present + else + i = base_index += 1 + end + end + self + end + + def size + (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values + end + + def empty? + size == 0 + end + + # Implementation for clear. Steps through each bin, removing all nodes. + def clear + return self unless current_table = table + current_table_size = current_table.size + deleted_count = i = 0 + while i < current_table_size + if !(node = current_table.volatile_get(i)) + i += 1 + elsif (node_hash = node.hash) == MOVED + current_table = node.key + current_table_size = current_table.size + elsif Node.locked_hash?(node_hash) + decrement_size(deleted_count) # opportunistically update count + deleted_count = 0 + node.try_await_lock(current_table, i) + else + current_table.try_lock_via_hash(i, node, node_hash) do + begin + deleted_count += 1 if NULL != node.value # recheck under lock + node.value = nil + end while node = node.next + current_table.volatile_set(i, nil) + i += 1 + end + end + end + decrement_size(deleted_count) + self + end + + private + # Internal versions of the insertion methods, each a + # little more complicated than the last. All have + # the same basic structure: + # 1. If table uninitialized, create + # 2. If bin empty, try to CAS new node + # 3. If bin stale, use new table + # 4. Lock and validate; if valid, scan and add or update + # + # The others interweave other checks and/or alternative actions: + # * Plain +get_and_set+ checks for and performs resize after insertion. + # * compute_if_absent prescans for mapping without lock (and fails to add + # if present), which also makes pre-emptive resize checks worthwhile. + # + # Someday when details settle down a bit more, it might be worth + # some factoring to reduce sprawl. + def internal_replace(key, expected_old_value = NULL, &block) + hash = key_hash(key) + current_table = table + while current_table + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + break + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif (node_hash & HASH_BITS) != hash && !node.next # precheck + break # rules out possible existence + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block) + return old_value if succeeded + end + end + NULL + end + + def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash) + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + old_value = NULL + begin + if node.matches?(key, hash) && NULL != (current_value = node.value) + if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value + old_value = current_value + if NULL == (node.value = yield(old_value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + end + end + break + end + + predecessor_node = node + end while node = node.next + + return true, old_value + end + end + + def find_value_in_node_list(node, key, hash, pure_hash) + do_check_for_resize = false + while true + if pure_hash == hash && node.key?(key) && NULL != (value = node.value) + return value + elsif node = node.next + do_check_for_resize = true # at least 2 nodes -> check for resize + pure_hash = node.pure_hash + else + return NULL + end + end + ensure + check_for_resize if do_check_for_resize + end + + def internal_compute(key, &block) + hash = key_hash(key) + current_table = table || initialize_table + while true + if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) + succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block) + if succeeded + if NULL == new_value + break nil + else + increment_size + break new_value + end + end + elsif (node_hash = node.hash) == MOVED + current_table = node.key + elsif Node.locked_hash?(node_hash) + try_await_lock(current_table, i, node) + else + succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block) + break new_value if succeeded + end + end + end + + def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + while true + if node.matches?(key, hash) && NULL != (value = node.value) + return true, value + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value = yield) + added = true + increment_size + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_compute(key, hash, current_table, i, node, node_hash) + added = false + current_table.try_lock_via_hash(i, node, node_hash) do + predecessor_node = nil + while true + if node.matches?(key, hash) && NULL != (value = node.value) + if NULL == (node.value = value = yield(value)) + current_table.delete_node_at(i, node, predecessor_node) + decrement_size + value = nil + end + return true, value + end + predecessor_node = node + unless node = node.next + if NULL == (value = yield(NULL)) + value = nil + else + predecessor_node.next = Node.new(hash, key, value) + added = true + increment_size + end + return true, value + end + end + end + ensure + check_for_resize if added + end + + def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) + node_nesting = nil + current_table.try_lock_via_hash(i, node, node_hash) do + node_nesting = 1 + old_value = nil + found_old_value = false + while node + if node.matches?(key, hash) && NULL != (old_value = node.value) + found_old_value = true + node.value = value + break + end + last = node + unless node = node.next + last.next = Node.new(hash, key, value) + break + end + node_nesting += 1 + end + + return true, old_value if found_old_value + increment_size + true + end + ensure + check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64) + end + + def initialize_copy(other) + super + @counter = Concurrent::ThreadSafe::Util::Adder.new + self.table = nil + self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY + self + end + + def try_await_lock(current_table, i, node) + check_for_resize # try resizing if can't get lock + node.try_await_lock(current_table, i) + end + + def key_hash(key) + key.hash & HASH_BITS + end + + # Returns a power of two table size for the given desired capacity. + def table_size_for(entry_count) + size = 2 + size <<= 1 while size < entry_count + size + end + + # Initializes table, using the size recorded in +size_control+. + def initialize_table + until current_table ||= table + if (size_ctrl = size_control) == NOW_RESIZING + Thread.pass # lost initialization race; just spin + else + try_in_resize_lock(current_table, size_ctrl) do + initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY + current_table = self.table = Table.new(initial_size) + initial_size - (initial_size >> 2) # 75% load factor + end + end + end + current_table + end + + # If table is too small and not already resizing, creates next table and + # transfers bins. Rechecks occupancy after a transfer to see if another + # resize is already needed because resizings are lagging additions. + def check_for_resize + while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum + try_in_resize_lock(current_table, size_ctrl) do + self.table = rebuild(current_table) + (table_size << 1) - (table_size >> 1) # 75% load factor + end + end + end + + def try_in_resize_lock(current_table, size_ctrl) + if cas_size_control(size_ctrl, NOW_RESIZING) + begin + if current_table == table # recheck under lock + size_ctrl = yield # get new size_control + end + ensure + self.size_control = size_ctrl + end + end + end + + # Moves and/or copies the nodes in each bin to new table. See above for explanation. + def rebuild(table) + old_table_size = table.size + new_table = table.next_in_size_table + # puts "#{old_table_size} -> #{new_table.size}" + forwarder = Node.new(MOVED, new_table, NULL) + rev_forwarder = nil + locked_indexes = nil # holds bins to revisit; nil until needed + locked_arr_idx = 0 + bin = old_table_size - 1 + i = bin + while true + if !(node = table.volatile_get(i)) + # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table + redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder)) + elsif Node.locked_hash?(node_hash = node.hash) + locked_indexes ||= ::Array.new + if bin < 0 && locked_arr_idx > 0 + locked_arr_idx -= 1 + i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin + redo + end + if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE + node.try_await_lock(table, i) # no other options -- block + redo + end + rev_forwarder ||= Node.new(MOVED, table, NULL) + redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list + locked_indexes << i + new_table.volatile_set(i, rev_forwarder) + new_table.volatile_set(i + old_table_size, rev_forwarder) + else + redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder) + end + + if bin > 0 + i = (bin -= 1) + elsif locked_indexes && !locked_indexes.empty? + bin = -1 + i = locked_indexes.pop + locked_arr_idx = locked_indexes.size - 1 + else + return new_table + end + end + end + + def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) + # transiently use a locked forwarding node + locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL) + if old_table.cas(i, nil, locked_forwarder) + new_table.volatile_set(i, nil) # kill the potential reverse forwarders + new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders + old_table.volatile_set(i, forwarder) + locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED) + true + end + end + + # Splits a normal bin with list headed by e into lo and hi parts; installs in given table. + def split_old_bin(table, new_table, i, node, node_hash, forwarder) + table.try_lock_via_hash(i, node, node_hash) do + split_bin(new_table, i, node, node_hash) + table.volatile_set(i, forwarder) + end + end + + def split_bin(new_table, i, node, node_hash) + bit = new_table.size >> 1 # bit to split on + run_bit = node_hash & bit + last_run = nil + low = nil + high = nil + current_node = node + # this optimises for the lowest amount of volatile writes and objects created + while current_node = current_node.next + unless (b = current_node.hash & bit) == run_bit + run_bit = b + last_run = current_node + end + end + if run_bit == 0 + low = last_run + else + high = last_run + end + current_node = node + until current_node == last_run + pure_hash = current_node.pure_hash + if (pure_hash & bit) == 0 + low = Node.new(pure_hash, current_node.key, current_node.value, low) + else + high = Node.new(pure_hash, current_node.key, current_node.value, high) + end + current_node = current_node.next + end + new_table.volatile_set(i, low) + new_table.volatile_set(i + bit, high) + end + + def increment_size + @counter.increment + end + + def decrement_size(by = 1) + @counter.add(-by) + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/collection/map/mri_map_backend.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/mri_map_backend.rb similarity index 100% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/collection/map/mri_map_backend.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/mri_map_backend.rb diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/collection/map/non_concurrent_map_backend.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/non_concurrent_map_backend.rb similarity index 97% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/collection/map/non_concurrent_map_backend.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/non_concurrent_map_backend.rb index ba86d7c0fb..e7c62e6d19 100644 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/collection/map/non_concurrent_map_backend.rb +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/non_concurrent_map_backend.rb @@ -10,7 +10,7 @@ module Concurrent # WARNING: all public methods of the class must operate on the @backend # directly without calling each other. This is important because of the - # SynchronizedMapBackend which uses a non-reentrant mutex for perfomance + # SynchronizedMapBackend which uses a non-reentrant mutex for performance # reasons. def initialize(options = nil) @backend = {} @@ -95,7 +95,6 @@ module Concurrent end def each_pair - return enum_for :each_pair unless block_given? dupped_backend.each_pair do |k, v| yield k, v end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/synchronized_map_backend.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/synchronized_map_backend.rb new file mode 100644 index 0000000000..190c8d98d9 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/map/synchronized_map_backend.rb @@ -0,0 +1,82 @@ +require 'concurrent/collection/map/non_concurrent_map_backend' + +module Concurrent + + # @!visibility private + module Collection + + # @!visibility private + class SynchronizedMapBackend < NonConcurrentMapBackend + + require 'mutex_m' + include Mutex_m + # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are + # not allowed to call each other. + + def [](key) + synchronize { super } + end + + def []=(key, value) + synchronize { super } + end + + def compute_if_absent(key) + synchronize { super } + end + + def compute_if_present(key) + synchronize { super } + end + + def compute(key) + synchronize { super } + end + + def merge_pair(key, value) + synchronize { super } + end + + def replace_pair(key, old_value, new_value) + synchronize { super } + end + + def replace_if_exists(key, new_value) + synchronize { super } + end + + def get_and_set(key, value) + synchronize { super } + end + + def key?(key) + synchronize { super } + end + + def delete(key) + synchronize { super } + end + + def delete_pair(key, value) + synchronize { super } + end + + def clear + synchronize { super } + end + + def size + synchronize { super } + end + + def get_or_default(key, default_value) + synchronize { super } + end + + private + def dupped_backend + synchronize { super } + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/non_concurrent_priority_queue.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/non_concurrent_priority_queue.rb new file mode 100644 index 0000000000..695ffdf2b2 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/non_concurrent_priority_queue.rb @@ -0,0 +1,143 @@ +require 'concurrent/collection/java_non_concurrent_priority_queue' +require 'concurrent/collection/ruby_non_concurrent_priority_queue' +require 'concurrent/utility/engine' + +module Concurrent + module Collection + + # @!visibility private + # @!macro internal_implementation_note + NonConcurrentPriorityQueueImplementation = case + when Concurrent.on_jruby? + JavaNonConcurrentPriorityQueue + else + RubyNonConcurrentPriorityQueue + end + private_constant :NonConcurrentPriorityQueueImplementation + + # @!macro priority_queue + # + # A queue collection in which the elements are sorted based on their + # comparison (spaceship) operator `<=>`. Items are added to the queue + # at a position relative to their priority. On removal the element + # with the "highest" priority is removed. By default the sort order is + # from highest to lowest, but a lowest-to-highest sort order can be + # set on construction. + # + # The API is based on the `Queue` class from the Ruby standard library. + # + # The pure Ruby implementation, `RubyNonConcurrentPriorityQueue` uses a heap algorithm + # stored in an array. The algorithm is based on the work of Robert Sedgewick + # and Kevin Wayne. + # + # The JRuby native implementation is a thin wrapper around the standard + # library `java.util.NonConcurrentPriorityQueue`. + # + # When running under JRuby the class `NonConcurrentPriorityQueue` extends `JavaNonConcurrentPriorityQueue`. + # When running under all other interpreters it extends `RubyNonConcurrentPriorityQueue`. + # + # @note This implementation is *not* thread safe. + # + # @see http://en.wikipedia.org/wiki/Priority_queue + # @see http://ruby-doc.org/stdlib-2.0.0/libdoc/thread/rdoc/Queue.html + # + # @see http://algs4.cs.princeton.edu/24pq/index.php#2.6 + # @see http://algs4.cs.princeton.edu/24pq/MaxPQ.java.html + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/PriorityQueue.html + # + # @!visibility private + class NonConcurrentPriorityQueue < NonConcurrentPriorityQueueImplementation + + alias_method :has_priority?, :include? + + alias_method :size, :length + + alias_method :deq, :pop + alias_method :shift, :pop + + alias_method :<<, :push + alias_method :enq, :push + + # @!method initialize(opts = {}) + # @!macro priority_queue_method_initialize + # + # Create a new priority queue with no items. + # + # @param [Hash] opts the options for creating the queue + # @option opts [Symbol] :order (:max) dictates the order in which items are + # stored: from highest to lowest when `:max` or `:high`; from lowest to + # highest when `:min` or `:low` + + # @!method clear + # @!macro priority_queue_method_clear + # + # Removes all of the elements from this priority queue. + + # @!method delete(item) + # @!macro priority_queue_method_delete + # + # Deletes all items from `self` that are equal to `item`. + # + # @param [Object] item the item to be removed from the queue + # @return [Object] true if the item is found else false + + # @!method empty? + # @!macro priority_queue_method_empty + # + # Returns `true` if `self` contains no elements. + # + # @return [Boolean] true if there are no items in the queue else false + + # @!method include?(item) + # @!macro priority_queue_method_include + # + # Returns `true` if the given item is present in `self` (that is, if any + # element == `item`), otherwise returns false. + # + # @param [Object] item the item to search for + # + # @return [Boolean] true if the item is found else false + + # @!method length + # @!macro priority_queue_method_length + # + # The current length of the queue. + # + # @return [Fixnum] the number of items in the queue + + # @!method peek + # @!macro priority_queue_method_peek + # + # Retrieves, but does not remove, the head of this queue, or returns `nil` + # if this queue is empty. + # + # @return [Object] the head of the queue or `nil` when empty + + # @!method pop + # @!macro priority_queue_method_pop + # + # Retrieves and removes the head of this queue, or returns `nil` if this + # queue is empty. + # + # @return [Object] the head of the queue or `nil` when empty + + # @!method push(item) + # @!macro priority_queue_method_push + # + # Inserts the specified element into this priority queue. + # + # @param [Object] item the item to insert onto the queue + + # @!method self.from_list(list, opts = {}) + # @!macro priority_queue_method_from_list + # + # Create a new priority queue from the given list. + # + # @param [Enumerable] list the list to build the queue from + # @param [Hash] opts the options for creating the queue + # + # @return [NonConcurrentPriorityQueue] the newly created and populated queue + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb new file mode 100644 index 0000000000..bdf3cba359 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb @@ -0,0 +1,150 @@ +module Concurrent + module Collection + + # @!macro priority_queue + # + # @!visibility private + # @!macro internal_implementation_note + class RubyNonConcurrentPriorityQueue + + # @!macro priority_queue_method_initialize + def initialize(opts = {}) + order = opts.fetch(:order, :max) + @comparator = [:min, :low].include?(order) ? -1 : 1 + clear + end + + # @!macro priority_queue_method_clear + def clear + @queue = [nil] + @length = 0 + true + end + + # @!macro priority_queue_method_delete + def delete(item) + return false if empty? + original_length = @length + k = 1 + while k <= @length + if @queue[k] == item + swap(k, @length) + @length -= 1 + sink(k) + @queue.pop + else + k += 1 + end + end + @length != original_length + end + + # @!macro priority_queue_method_empty + def empty? + size == 0 + end + + # @!macro priority_queue_method_include + def include?(item) + @queue.include?(item) + end + alias_method :has_priority?, :include? + + # @!macro priority_queue_method_length + def length + @length + end + alias_method :size, :length + + # @!macro priority_queue_method_peek + def peek + empty? ? nil : @queue[1] + end + + # @!macro priority_queue_method_pop + def pop + return nil if empty? + max = @queue[1] + swap(1, @length) + @length -= 1 + sink(1) + @queue.pop + max + end + alias_method :deq, :pop + alias_method :shift, :pop + + # @!macro priority_queue_method_push + def push(item) + raise ArgumentError.new('cannot enqueue nil') if item.nil? + @length += 1 + @queue << item + swim(@length) + true + end + alias_method :<<, :push + alias_method :enq, :push + + # @!macro priority_queue_method_from_list + def self.from_list(list, opts = {}) + queue = new(opts) + list.each{|item| queue << item } + queue + end + + private + + # Exchange the values at the given indexes within the internal array. + # + # @param [Integer] x the first index to swap + # @param [Integer] y the second index to swap + # + # @!visibility private + def swap(x, y) + temp = @queue[x] + @queue[x] = @queue[y] + @queue[y] = temp + end + + # Are the items at the given indexes ordered based on the priority + # order specified at construction? + # + # @param [Integer] x the first index from which to retrieve a comparable value + # @param [Integer] y the second index from which to retrieve a comparable value + # + # @return [Boolean] true if the two elements are in the correct priority order + # else false + # + # @!visibility private + def ordered?(x, y) + (@queue[x] <=> @queue[y]) == @comparator + end + + # Percolate down to maintain heap invariant. + # + # @param [Integer] k the index at which to start the percolation + # + # @!visibility private + def sink(k) + while (j = (2 * k)) <= @length do + j += 1 if j < @length && ! ordered?(j, j+1) + break if ordered?(k, j) + swap(k, j) + k = j + end + end + + # Percolate up to maintain heap invariant. + # + # @param [Integer] k the index at which to start the percolation + # + # @!visibility private + def swim(k) + while k > 1 && ! ordered?(k/2, k) do + swap(k, k/2) + k = k/2 + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/deprecation.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/deprecation.rb new file mode 100644 index 0000000000..35ae4b2c9d --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/deprecation.rb @@ -0,0 +1,34 @@ +require 'concurrent/concern/logging' + +module Concurrent + module Concern + + # @!visibility private + # @!macro internal_implementation_note + module Deprecation + # TODO require additional parameter: a version. Display when it'll be removed based on that. Error if not removed. + include Concern::Logging + + def deprecated(message, strip = 2) + caller_line = caller(strip).first if strip > 0 + klass = if Module === self + self + else + self.class + end + message = if strip > 0 + format("[DEPRECATED] %s\ncalled on: %s", message, caller_line) + else + format('[DEPRECATED] %s', message) + end + log WARN, klass.to_s, message + end + + def deprecated_method(old_name, new_name) + deprecated "`#{old_name}` is deprecated and it'll removed in next release, use `#{new_name}` instead", 3 + end + + extend self + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/dereferenceable.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/dereferenceable.rb new file mode 100644 index 0000000000..b0d1a2ef85 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/dereferenceable.rb @@ -0,0 +1,73 @@ +module Concurrent + module Concern + + # Object references in Ruby are mutable. This can lead to serious problems when + # the `#value` of a concurrent object is a mutable reference. Which is always the + # case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type. + # Most classes in this library that expose a `#value` getter method do so using the + # `Dereferenceable` mixin module. + # + # @!macro copy_options + module Dereferenceable + # NOTE: This module is going away in 2.0. In the mean time we need it to + # play nicely with the synchronization layer. This means that the + # including class SHOULD be synchronized and it MUST implement a + # `#synchronize` method. Not doing so will lead to runtime errors. + + # Return the value this object represents after applying the options specified + # by the `#set_deref_options` method. + # + # @return [Object] the current value of the object + def value + synchronize { apply_deref_options(@value) } + end + alias_method :deref, :value + + protected + + # Set the internal value of this object + # + # @param [Object] value the new value + def value=(value) + synchronize{ @value = value } + end + + # @!macro dereferenceable_set_deref_options + # Set the options which define the operations #value performs before + # returning data to the caller (dereferencing). + # + # @note Most classes that include this module will call `#set_deref_options` + # from within the constructor, thus allowing these options to be set at + # object creation. + # + # @param [Hash] opts the options defining dereference behavior. + # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data + # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data + # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc + def set_deref_options(opts = {}) + synchronize{ ns_set_deref_options(opts) } + end + + # @!macro dereferenceable_set_deref_options + # @!visibility private + def ns_set_deref_options(opts) + @dup_on_deref = opts[:dup_on_deref] || opts[:dup] + @freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze] + @copy_on_deref = opts[:copy_on_deref] || opts[:copy] + @do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref) + nil + end + + # @!visibility private + def apply_deref_options(value) + return nil if value.nil? + return value if @do_nothing_on_deref + value = @copy_on_deref.call(value) if @copy_on_deref + value = value.dup if @dup_on_deref + value = value.freeze if @freeze_on_deref + value + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/logging.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/logging.rb new file mode 100644 index 0000000000..2c749996f9 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/logging.rb @@ -0,0 +1,32 @@ +require 'logger' + +module Concurrent + module Concern + + # Include where logging is needed + # + # @!visibility private + module Logging + include Logger::Severity + + # Logs through {Concurrent.global_logger}, it can be overridden by setting @logger + # @param [Integer] level one of Logger::Severity constants + # @param [String] progname e.g. a path of an Actor + # @param [String, nil] message when nil block is used to generate the message + # @yieldreturn [String] a message + def log(level, progname, message = nil, &block) + #NOTE: Cannot require 'concurrent/configuration' above due to circular references. + # Assume that the gem has been initialized if we've gotten this far. + logger = if defined?(@logger) && @logger + @logger + else + Concurrent.global_logger + end + logger.call level, progname, message, &block + rescue => error + $stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" + + "#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}" + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/obligation.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/obligation.rb new file mode 100644 index 0000000000..2c9ac12003 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/obligation.rb @@ -0,0 +1,220 @@ +require 'thread' +require 'timeout' + +require 'concurrent/atomic/event' +require 'concurrent/concern/dereferenceable' + +module Concurrent + module Concern + + module Obligation + include Concern::Dereferenceable + # NOTE: The Dereferenceable module is going away in 2.0. In the mean time + # we need it to place nicely with the synchronization layer. This means + # that the including class SHOULD be synchronized and it MUST implement a + # `#synchronize` method. Not doing so will lead to runtime errors. + + # Has the obligation been fulfilled? + # + # @return [Boolean] + def fulfilled? + state == :fulfilled + end + alias_method :realized?, :fulfilled? + + # Has the obligation been rejected? + # + # @return [Boolean] + def rejected? + state == :rejected + end + + # Is obligation completion still pending? + # + # @return [Boolean] + def pending? + state == :pending + end + + # Is the obligation still unscheduled? + # + # @return [Boolean] + def unscheduled? + state == :unscheduled + end + + # Has the obligation completed processing? + # + # @return [Boolean] + def complete? + [:fulfilled, :rejected].include? state + end + + # Is the obligation still awaiting completion of processing? + # + # @return [Boolean] + def incomplete? + ! complete? + end + + # The current value of the obligation. Will be `nil` while the state is + # pending or the operation has been rejected. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Object] see Dereferenceable#deref + def value(timeout = nil) + wait timeout + deref + end + + # Wait until obligation is complete or the timeout has been reached. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Obligation] self + def wait(timeout = nil) + event.wait(timeout) if timeout != 0 && incomplete? + self + end + + # Wait until obligation is complete or the timeout is reached. Will re-raise + # any exceptions raised during processing (but will not raise an exception + # on timeout). + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Obligation] self + # @raise [Exception] raises the reason when rejected + def wait!(timeout = nil) + wait(timeout).tap { raise self if rejected? } + end + alias_method :no_error!, :wait! + + # The current value of the obligation. Will be `nil` while the state is + # pending or the operation has been rejected. Will re-raise any exceptions + # raised during processing (but will not raise an exception on timeout). + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Object] see Dereferenceable#deref + # @raise [Exception] raises the reason when rejected + def value!(timeout = nil) + wait(timeout) + if rejected? + raise self + else + deref + end + end + + # The current state of the obligation. + # + # @return [Symbol] the current state + def state + synchronize { @state } + end + + # If an exception was raised during processing this will return the + # exception object. Will return `nil` when the state is pending or if + # the obligation has been successfully fulfilled. + # + # @return [Exception] the exception raised during processing or `nil` + def reason + synchronize { @reason } + end + + # @example allows Obligation to be risen + # rejected_ivar = Ivar.new.fail + # raise rejected_ivar + def exception(*args) + raise 'obligation is not rejected' unless rejected? + reason.exception(*args) + end + + protected + + # @!visibility private + def get_arguments_from(opts = {}) + [*opts.fetch(:args, [])] + end + + # @!visibility private + def init_obligation + @event = Event.new + @value = @reason = nil + end + + # @!visibility private + def event + @event + end + + # @!visibility private + def set_state(success, value, reason) + if success + @value = value + @state = :fulfilled + else + @reason = reason + @state = :rejected + end + end + + # @!visibility private + def state=(value) + synchronize { ns_set_state(value) } + end + + # Atomic compare and set operation + # State is set to `next_state` only if `current state == expected_current`. + # + # @param [Symbol] next_state + # @param [Symbol] expected_current + # + # @return [Boolean] true is state is changed, false otherwise + # + # @!visibility private + def compare_and_set_state(next_state, *expected_current) + synchronize do + if expected_current.include? @state + @state = next_state + true + else + false + end + end + end + + # Executes the block within mutex if current state is included in expected_states + # + # @return block value if executed, false otherwise + # + # @!visibility private + def if_state(*expected_states) + synchronize do + raise ArgumentError.new('no block given') unless block_given? + + if expected_states.include? @state + yield + else + false + end + end + end + + protected + + # Am I in the current state? + # + # @param [Symbol] expected The state to check against + # @return [Boolean] true if in the expected state else false + # + # @!visibility private + def ns_check_state?(expected) + @state == expected + end + + # @!visibility private + def ns_set_state(value) + @state = value + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/observable.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/observable.rb new file mode 100644 index 0000000000..b5132714bf --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concern/observable.rb @@ -0,0 +1,110 @@ +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/collection/copy_on_write_observer_set' + +module Concurrent + module Concern + + # The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one + # of the most useful design patterns. + # + # The workflow is very simple: + # - an `observer` can register itself to a `subject` via a callback + # - many `observers` can be registered to the same `subject` + # - the `subject` notifies all registered observers when its status changes + # - an `observer` can deregister itself when is no more interested to receive + # event notifications + # + # In a single threaded environment the whole pattern is very easy: the + # `subject` can use a simple data structure to manage all its subscribed + # `observer`s and every `observer` can react directly to every event without + # caring about synchronization. + # + # In a multi threaded environment things are more complex. The `subject` must + # synchronize the access to its data structure and to do so currently we're + # using two specialized ObserverSet: {Concurrent::Concern::CopyOnWriteObserverSet} + # and {Concurrent::Concern::CopyOnNotifyObserverSet}. + # + # When implementing and `observer` there's a very important rule to remember: + # **there are no guarantees about the thread that will execute the callback** + # + # Let's take this example + # ``` + # class Observer + # def initialize + # @count = 0 + # end + # + # def update + # @count += 1 + # end + # end + # + # obs = Observer.new + # [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) } + # # execute [obj1, obj2, obj3, obj4] + # ``` + # + # `obs` is wrong because the variable `@count` can be accessed by different + # threads at the same time, so it should be synchronized (using either a Mutex + # or an AtomicFixum) + module Observable + + # @!macro observable_add_observer + # + # Adds an observer to this set. If a block is passed, the observer will be + # created by this method and no other params should be passed. + # + # @param [Object] observer the observer to add + # @param [Symbol] func the function to call on the observer during notification. + # Default is :update + # @return [Object] the added observer + def add_observer(observer = nil, func = :update, &block) + observers.add_observer(observer, func, &block) + end + + # As `#add_observer` but can be used for chaining. + # + # @param [Object] observer the observer to add + # @param [Symbol] func the function to call on the observer during notification. + # @return [Observable] self + def with_observer(observer = nil, func = :update, &block) + add_observer(observer, func, &block) + self + end + + # @!macro observable_delete_observer + # + # Remove `observer` as an observer on this object so that it will no + # longer receive notifications. + # + # @param [Object] observer the observer to remove + # @return [Object] the deleted observer + def delete_observer(observer) + observers.delete_observer(observer) + end + + # @!macro observable_delete_observers + # + # Remove all observers associated with this object. + # + # @return [Observable] self + def delete_observers + observers.delete_observers + self + end + + # @!macro observable_count_observers + # + # Return the number of observers associated with this object. + # + # @return [Integer] the observers count + def count_observers + observers.count_observers + end + + protected + + attr_accessor :observers + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concurrent_ruby.jar b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/concurrent_ruby.jar new file mode 100644 index 0000000000000000000000000000000000000000..ed56742a858339c98ce779509b11181210a139ba GIT binary patch literal 137023 zcmcG#bChKLwk4XCwr$(CtxB7fwrxe)wzJZ^;_+YtAJF84yrZAV^3^pd)cJIiUZ?2MP!jNKQ;eh)zmgoZ({}2nYm7K?VZy z&k5jv9#i3VO@t-%#3CT-|iz%zn%ZcC1O-{(l(9zGq$ z*zfOuJ;mX_yaLYeUq$R}ja(cZO>CW&T)ufIn>e~!7@5!;SsOYzg(&NOMG->fTd1*V z2~mRajZ{Ud3k?7u;Fom{>BhA0X2s?b3dmx0lIW>U2CTmR+aK2a#)Nrt9$^P&G^y)OZ||$OV*oWkC9(Ymr35fm7@Ch zL?T^lE61}q>16^>KPh>w@EBVe%!K8MiwBG#a0VefErT}|)qI__L%&%@!IIEmN;(w; zUqn4a)$C$dvp5IC^TwO{NJdO(9(IK}=MCn(Q(EYccHhBq+(YN&4b*zXmrEqiB{Pr#*6IqLIj)smNq)h*Cu4JVRX%HqvzB3!Es;VdL zTOp(}44ua&YDvY?MQBxM{xMr@t8w-KR+?A!S5nI7FTP2f8xfL^ec;TgiDOw~vfBcB z+dvojaYD!_@Z>CEhQoc8L0k~`uw_1FMSR9#7%Moi;q|uA0B==lMHrrL#?%}F#Dt%L zf=)9Dnlx%n7Hu{tuL!o;q87aj5PB`zTAu+K3Yu$3)db_c(no3qwN5>Bt#nMr^v>q3 zobgDCd!y$uhb|5zQ3^UL#df^+wOc>0c&RTmZUx+R!AId|I+9IM>T zPjkAiZX>*Yd}P}_vQqEin2f>!uSF>JKWr1XBaIOcOvjOSz#b^>+3uF5Z1^z_++7bp z#KyexEdva_ILQI~0^jF*;gKJ(%pV?DzBQKl97e}AiDtyh+wunS#S1kMrJn~_kxIn> zg4WF^b(l|AAFO6?$wBhN4P%&p(TF7uI3;!EdW&Rhd4plX+Yh6bmL?Be9R6modKo?W0Sb`-7eyz_O&Zw6ooQn8J z=7)mz^(P>kZYEapnH4;CWKL&EAKQ4lr}5{jERG+n4D!ZiqUF?Zu01qJH{2Jbwy1!; z6|S3#EBz5;Fv?zxTvdAwH`QpJ`8n&;6ga06Q8RVNe>NrHt`wN!Z}<4ay0f)o*1@(^uyHNkhiT-Xnf zhVUR*eidHJ(SBe+1_D6^pkDDwwHgu;dAeANQ{U8ldh*sDQH0VzH9RtwyimPBl+`C><=|9Z{kL(D10Pk}0N0S6>V_G`M ze>@D$#@;*V^!jJPkAIJxXD>3~l%EJ|j2d=;x*j`A^Tv)P0YJU5Vhx~ftJ{-B40D#; z25kPy4~#{f9IV4xK{W?vlE_`c+ZsOymH=3r(eqs&va~G7Z{J0M@nw(Ul$dFp1u$tz zu-tnt|3D3sjfr}Cc!FAlZS$b%GH6|UC_1ui;KpdF>A=m-c5(SC2zw`)YKXN>6=3Wr zf_c`|V7zT*S-p{-feiV$&b7bJZ>;eu=}T0|gQ7KDL`Ls}m0_{DXUwPx{xs+!vEta( ze00{*IlY*|A`^v{@*Q@?pcH?io^Oi9dID{=p)YbcGM7 zDC>|TdGj?qu9z;a@kVZH(y8)+;Z%w1^b~DTERvdEAZDE z*^&~C5Y*e&Y`1#P9wM}@#e+2%aaZdqXFya`Ay>=BNmFL!d`bN{4QfO$yC1t6?P1uU zA(B>6Tw276z2Ff3g=!tb8``1Dskstk#W(mBRn`Kj5Te{;2a!BqCn4~*Qg@sYl{5`P z25Q<>Y9({Ex-c>{cpWtS6x2giTBDjkZQ!j#((I-x>+N`dQ}5Ky9p@KogcR}uc#V{+ zXOHvhzzyoFT_ZHN+i=0`B61tcWu5kGH8uStG3r^3M)2g$Zc5G85wG7Uy}oh`>9x^-c^aR5Q=u)v5Gk^!cqeN~)#i6V;W;41?IjW9v0u*%ru}MeklVh+ma*Iw-rD zYmG%ie93Xw8o2e9RKdC#R!!yIO3S8{nW$zz9N|4u#e%yzrFiDD7aGVdrXO+I(^1Sf z(necPFqd(86z2w4ipCD-KU|p=jNZmx7n(Ruat@skik_8hR$FD4)+Xw06v;Sy5duMb zsHq*q)i`pojeot7XakL423y0t`(!t8v^FFZ&Tr1YqV5wBfJF=9qP_ElLcW@Fg^6TM zfLH|oN^X}ZHTMl3PoaRUBBl*ZP8a}L1qk6DD+m81`8XFM1(B$7CMCIs#BJgqnV>9x zR&7{=+4#C^!eAC?)cOKNJXDCZ!+AzfaT^QOC|lSsHg009hrAAgP)3-4-W05s-u zJh)>enj^&KSZWr^FE4|-k9CgLsY(8-h+*g8_$_HmNu}dF0slRgQqPDW8c#!U4ud=>kwHVEWpv%pptTAPq-xEGsd2HGPp;AR!GiT zTPoO3q>>~d46*O0ixm(JQVMRC%6alzNHX5w(>-^$D>MA2aa@OjLoB91VV=Nu`<~q+ zKdJGaehY885MPmapC3j$(X%X~qf0D{RPG}ums>p=2#U^mEWJKK<%#>8?~rV6(AjBg} zg_^)ta&CdYUX^dbxeSrFad_btpkpm z_vTQ2q!<6yK|t?-lV+O+(rVO#=x|w#)4<7wE!Do+Sc8xHM#u-M&p29^;JZ!LSWr^^a8}X5_DErLJ_7$SH z4ou(~OCd!SF6c+W9M(cUs3i)bfM`E261TBE2HhyCu!2ZjzU?Z542s=qKD|%i+wjzP z5-v|!(CxYlJ`#TVY)E{=W>l3}rY6^@v&WgVmzcHK@4}%hHvvaZ#oA~bTnu;PeQcWs zW~ukc)c=t5NG{8ugTFl+bOt>J~Lp z3%9v{xe#lQSScC|CKMGJ8jL{5FfUoK(A~UBSgQ*7H1xiC#b6|t^k(FLD~vL8!-<3& zes-LmOuyGPoqm6QdBpJ-+gxOd@`g_2E@8~>myMe_<&1O^;;b=L8_0qOxwnAEU`1uq zF*eAAEo7_tk%l*ca$It7n^MV4no2uzXU8-MGb8~Uj@CkjC2pBQ!qJ`$P(kn*D2cG` z=)}s$bPU-?QWV;1SYo6|)UaLoSoY z#>quu)Mm#K4}`*gzSkkjytPjO&2|K}nxsAA{dq>ErOs*`Z-;mN#9Gv6q$yL02@_){?RDO?()P8pGs-tqaZK4*Z5UtO~E1C%l^~k0#KwYjSbiX!~ zR&~aXGsrH<2l@r_q?@hhIR;z{FyAJSqURGo>JjJfEA3yy`ww>A2Bi`WKWb&2iv^55 z$pGV@UDR+3c{6Z~Q4opws8HGBGFS*ibl~?i>xuw(ekkGk4djKK7Mfy%$ zMS(pW*MGLtY$}W{$m9QkH3A;pMSkuu&P2?hyj*6P=Z{S8`{hr7TU(S{;Qg&-m;csP zS^wLv`pJk_!BBhvY{kFx__G>rqCBBCXLQg!l1}#N;Wdu?*l$aDv zV9WzR6(dN(x@IG3OEIOiyPi-~|x58OZ_uE30S9V0x?PFA%VX28&FiC*Jazt?f63vS0-+S*pp2!7zVR)6)k)UX;U~x7%6__rWqagP4iXbwwqF`m z0YEl2?$G@UH2>~2Z#A=I_B!rBm)JIMRkiqD>@PVJ9U-;80)^Tl1FWPK&}t_E@k6w4 zXTKJ06NuiYyM)ho$xQVKZ=oBqvb_Svgvu?An*}|h8*s8>=UieOp&YWz6tH9&ds#uU z23U{8s?A7FNk47l-~Y;0FZFwRJb&Yw3*zs@%iq-?{!%G&hW5gSMph=a#{aL%lC})+ zZ|)hEu`=c8px|=`sj{z97ontLT+Aarr7~UV8(vPwbR+px<%0tI*>s#Gn3Xt(sY~#T>BE#2|?p`d&Z6ll|=q!V>ssqPaF((6d{`l}wh$ zO{@Pg5_U`m4^usBJdV-{ec1ASFRkNAn5}%CY18?h?ql@KkYfDNS)$xj?%A!~(2xua z1`3c+vC&+SjDty%0dA*4eW>!bJ#d2K-nZ zf2))uaM|@D95pgowztTksas9x>Ak`S`w+z&s&X1R-T>^FN4rHo@!^zWF=l__^Iw8= zzn4bJ{{OfBApSS;`rp{}UoQVg^{`mg=Ieq2s_zwe1&$bCA7&Rv%RVWU8I?62(Ify8 z%QE;mdDAuZGRCpvA*2<-mwb=lnSu3-?7PAHMJ#g;yzoAzpVh4I}@{{5oZt*?^4`^f~iVSnW5|esEB812jJ3k8ePP1?tVgTDfXiM zNP;?PqhBk`uIt-r#1GuBA^T3FjEe=1UHdS4)VRrr#As^92C^2ln4W6tT)zdSv&fD^-z)^=GuDp}FKSF(;DWo$7e{*pTOjm-V z`p4-&BfyM1#|U89N=L+fRgjD8$@Ue?17Ph`$TrD5sZ+j&t;aa2FNY$+fV5bkU_Cko zXA3ddSGzKv{n_z;7sT%wz5e9FjM1a$Hcj~Lv|45_W!bVnK6BnsgG=B04Pi##of$R} z0|yF!?uir5kvn0{+FHe**rN$)tm{GQ>-<`%wu(z;^>DnPL>AKseDCBf29f3tB}NrhL@#~S;kHqc&?u098u?D=46(4g{AI?gxhMMvR992CrmJfnZoPWeE;2L3zq| zyg_hpkVg0buF(Rkdoa+{tGwgT{5BJaK!_?NYf|#vv*+ri{kxZI&#&iaC_#aeA9Mb} zS?xzjD)~gk1rY5KdJkowZL|KKIx38L$Oa01WMHXiIwqxl)*5qJs;n4TF=i3+Li+aa zoAp=E%@irE{L}=xM&@yx4d!0&Wh1kiUp)M__r&?T;E5u+?etpb^uR=5r<9){xowif z;hf?(Jiu+{o7)nbVONgm2PB7S>CX;#9`Hgpwyk$FaL=@9S%^Okx*bVq0lJVeCfy|f+_PSq+FCA$c40zd{Cu(6{8GtufZ!f8-3o0pB z0Vh1nql!#d8?gG+?i*(s=sMr+?Bjl%?aYwFy5*RQgP4hYa_Fk|1E?6;v>)&iV_`hJ zDX7)I#zrN^culfb70_JtvpyY1dcbio7VII$Segqr03hKl&4=Wqn*oYNXq)ql0Eh7& zz((3EV>lI{x_s4wGazx1{f*|qTtLM52NPB6fc+Ukth5lo(A}>RQ_JmC0I<-(N;p8Y z-g)Tdtu{H8f_sJh1#ptgZR>RM;dNW754Xf&NJ(x7)zsVN^@Ecb?`=b;0BWUCQ-lLI zeG)Q~2!BIlf&s?%sDaEjg3FMZ_tmbfG)JV$9%LK!MFh~zw}LJ*vn>sc_mHt5I7fmS zKKcP7sRRR_kddI5MTA|&DPDTeC|v+K_4l;K5Cu$e48+fRwP}@r}qnBMFG=7$!;u<+`cRURIdQ9 z#y&!A1yPTrhY0KvT7H3m4VoL(xms%4uQA_+j;@D9_#QHPRBqqJG)NueHi3zqyj`G> zU#bxH@v>Q=pA*OatiMe#MI#J(N&Zo86ZQpXcb7@cjql2)vP3mg-hq6CWD z0zFjDN{^EZ+-K7vIJ0?5Nh%2wz^rE2NBEg?-QItT4_-YMU)RS>(olUyFV0cAH0Sy7a_^P1|gHY4hm2Y%q zqNJ0%4~{Cn%M3}K;XBZFC?v@HD!Dg=WX0fM&d&vtaAVJwk^}~EYuP8lYmYF_R@tp=sZXB!D0rNNd^@+Cz3b1^8)9M|`ruGE?`NxAnQam$SDrTw z8*!aIiBwu`OE0KBB(Y%P-EJu?%@S#%1?!}`gMj7Zp{E2?XD=bi&vnQ_h{Y<97G%H^ zO8H<&o{M^F1I!zDDf46IsKcp)rSPSl>4V4;CQ7A?Qjny*1)urOu(T|yiJ&?Q(lczQ z?!Y{hQpyIA*Jz27-T`3=G^Kh%Q`*6aG`q@Wv7Eg($XW5^k1??2z^T|$yKL(>;1m`v zaC`cP9D{SibMk&F4q5x0zhoRfzQa{nTD*WDWrUiG+9KDmSaouTrAE~8&%J;(Nnz8> zZrebUMKyd$h0lUH^ca)`%tdj+FO(-|v7%InJgh0i(u6ISGDPk^;r!vy$ZK{?>fh*e z`@fA&)_;gAN z6T*kT-!29wiX348wh!Yl21?IZ>6>iV7c%s@5~rVIIZaQMK0GYB-JL!VyR4>KbAVu- z1jjSwKNL#P9`OaX9F%JTpq8b|ScGh#(Z>Zw0!`1@$j@47zD=DD1FO51puBj3C!jxv zxASw5cxB;kSq#7)&LY!1Lf{2Ea!zPB!?j6iY>8NJetR5@gQvhaUi;&fE^r$2sL_z# zzN0r!ZiC&8Ic2YX(*$!SqgX8aJ+u43exn1Um0aSLd!llb2cR5YtvqoO4Xy2N1pD>d zhD);r{s*S(JG=;|GPQjK7mYRl4~FA5r;SnVA`A9$lZyTuO0|)%+ZZVn);?N}ZRxou zCF<7>j|R8k4zQP8vBE`?AmTzeuvD!@{`2g}6Vo^da9hk51s_4Z;BwxD5gC*e5zTuq zF|f>ZCEl5PB6xP0$tK$_;Gv;yIWHd98GF-6Q|7zY4?86lLaUfyCCAd;Mt4+16K=J> zPY)BEWt)l37Rfq>7-p>;>eS|~Q4Vl;t2Krct`=biEdY60g?hH4ctv}$0n!r6PweXb z41k4>*{MxfHXw*o)Tp6LJT}K9tdb!y5C`_;qh1BN9@c2XXlS-=?49Z1-4LkEIC7Kf z&dWrE6L#W}+~#ud<7vA*8#j8!oRS;?scE!Jn+;b);=;`-oP2Y-37Z>e%venW_I4Fr zJ~(1mjY$2tq?JJy0P#s{6zyb=Zrhtl+%v)0dbDhGx}!C+N7)6&rtknMM(oJ@C4n_R zpeVig$ug{39+yxB2sq5y#Y8IV4$?!s&8deeeLYKkO-EJd4_@A_dIg09s?8=8A#zA?+&y9YcAVFOY$X zt7?ByariESQ*L4LhJ~axauy+rSaY!B@rcZTW#mQtf?wt1teQ`614$Nz@+DO-3-*xe zH$Lki94-oVIk?tDDu2#Hi1SR7tP}SL1@3VC!KbhVmMHCSeCGcxKL4)G@Gt4xe`M@ns@v?iaLW3jM+>0-|;+6c;#ik)K5-zc%LnNMn5G^yv2D%rXK ztZg;#DSSZ^6B87RfY?T>T6ZjhQ>CO9<5Ia11mor8z3Bx(t^55ZENgP?I1N(Dmo+}u zQ#>C#x7|N%2s{qD-@ecyJ$j7xosfEO2hifC+<-w?Z~DzO+w$$^4Kdt~BSGmO$JR(b zmxD0JaGPIh&|f+3+HR8U>3v+e7_h|LH={(YpY-~0p7pNWxa4#Q5ZvDg(*JbwqsBnE z8|QdF$ZUH?e?s80$40n6-Qazd=g03WM8-e(aIo#3;G`h)o(XXD&Na)4yD0)mJ|00J zeV_f(@^VW8CE#ih{&I`TAz&CNa5OjL{=mvHb92(L`JR=jZ+|ai>pL{%^x}@saCjj0 zo{_mD`K-&|`OL@RXTKZ!jB@Rn9`G?)*u&`Omwdw!&@Z=fV8f@KbfraB`xOoX%vhhZ z@_aiEIl5dizyjy|oR0@P<5#!1e#6Yd?vrP;M`!o3+Lt{P${X?ixisXMCd&NQ3^_Md zQpUPZ*#Wf~aKEmRjZPOHn;HztTTkp?JQ?TjJlL7Mk1oE~!*q?cGK3v6XXqO1h+)oc zAo2Eu)jupbk5|IET09w#w{ey*81$>#yTY5Ir5h8UtIbq+QaKfDkADK^9MZ0WiPo}?ohI}FGBq_gwlt^}6 zYVEc*d!dh@M3n_Cu~Mu-jxFz7&mm8CpZmt=0s&&_CL0+%CYJ^0mHaD$rzn_bb1K{O z5NJo~dt%ra?)$ zr6a5eDuwnf9oxAAY=={*P!gsP6GHu5CLpnVuof)bMGZ4jNeKp=^jO%n(JWmr+HaeK zst1x@5A4)?Xc5=Iv7Z8MoDk>?V(q^os+*Ntp=flCEj2ryAP>*aO&sLZgK(!qnrO<0 zLE(!MKWW`ZH{t8ut~CcPj1_?{Vn*JzI`l0f6C6P!q!L++;j=mnH|eRTn%5ObYwb`w zQ(+BiwbkX|$*dB*yJVJlRQy^B&VWaz$qr5=`8Cx$M5OcdgK-Tcy)2YgH^~T zABDanKVB<)y{i~j&Lp+%?UHs%Zws6M7^N&jS!G8b{j8qb_l-^%Durk=j|z114aL5v z*~{Cx8O+MVR$BACn+hm?i*Y*53re;`nDwVj;bFm``Kk(>js87=3G4M#R=I!YthzuP zS(mMybe!^Wz0bZg@;RrsJofY>Z@qd@r07?_vFA?+;w`=dEPKEVELE{*&~gcl=gel3 z#r{fNZej;|0kW8ZDRw(uxQjgSM)R8%@01JbvlTE=gNfq8?1YojBdoH>*)kvDE5R0dDo4^IxT9c{}&%=JnFG4b853C#MZ?S0 zGo=ouBR!qxv@fybXX0E8d_etSf~p5uxj6s(SFD-?ec*=9cy~(9%!s-*sf_|d*_D1p zI>cmq@!_6Za4K#n&La5aTk;U2z@@Yf8IwkX@1()B^`h)cjPJXddf1^@>&p}ViV)s#yN;>SrP<9EajMUgor4`E#DO}jNiDif}Ky7a+ohzLeLb66a zD*v)Q(7hzoS4Qa)LrfGOONA{EQGx*a&9X~2Kw2({ym4lRopoMRl6tGKy)Be%>_&CYp-UfWk2J)d5Lrlz>;!Txr8TS@x3O<>(#5t^_viO{+WCim(vo^^> zGUa!MIZli=X8KKU<8`R>M^t2^W0&uXFr5lEijd7A;v$AYz59w3BGL84%)pcl|xt0#>bTF<0hlPYHGa1O`1<$i(HG`mvY7*Zk2$?8~T|FuN< zVMQ8udc$IMQN_Rq_Ioi3DC)e6rVODi0scj}#)pT?;ljCRIr5+mT>bTgM{cHwbJ=V5tww~W2D6|xE6AF*<9V0A)?$~WXBNdta8~=p zIXHF67WEk^{W8=05Q9uud;#BgS)=sl&hX{nz%tvH{FkTqZkj(I5#S4-r$)vCQkBzQ zkPk8+;oc-^u2Ov~y7ND?o2_N>8EU^peFWae2TcK4rvb#RLweai=AiBJuP@D|y=5qm zFyfd>u(OXD*oUjqzCZ}jntMY{xkFJ!zSegT!sqXj)@w~1<4XLTORU8rc~d;t#J8mI z_ap3xP{4$uK0fy=@_3P!s~=PRq+&f(^+ z&-gE@Gm?zNw=B3CH~av-L+H!#8GIx>6tQoW7vhh4PmI&n9Z|gTcOXP~C$J;WH4z2A zA;f56JrUzSR|xc!?H3O^`L@a~1(_p+-vovsrC@>6RZNTj4muD=zhXga|M;!n0@YDn zX*IH7Bm5(XlHAu?!o1%2evcz}nm<8d4t2F`OIX@wP_{j1&6!EQ9i5zJZXMMV5s9-a zm-d^Jq``9eVRG{r;wyS17lwZHQQ>cKWht$&^$UOzi$Z=zNQ%f@H}q)~QYHFFyMu-3WS=~YmCG=d zl%z*PPjq-94NXNQHC5(9jFU&h61WPRi`taPsR%|u?Z*X%Vi~| zSo+;!E{-{^@J6MKES4oknt9=QVFt$5Gs0@_u@AOZ$zw|;T^Ol6l&4gbYj3pT+Ma+* zWctLs1^rk~5ocb_ns}^qH7qhiBPbV}qHXbCvMYRPvP#oa{Zx}yHr|+EDeLe9YRSd} zjW@r*sjxK77=ywpzxx4*DX=3Zg}pgMlU(s`_6X%35#7kSQcX}$(saP)GIOJ|lgl`q zz=ZVE!Ed0!I}=K(rGadTIFJ3*1a2<5ci1{s@tbnXTOGxsTN_^gz^CE1bZ^*}( zP-ZyYMon^?OHQZ6siZX4QXx>QZP0FRyki`aG-{+?BD-n;He6oxgu0eD>byRC0^r=} zMXG>2uKk*pC{n|jZYg$*yHqPFKl(DR_8s8RTO@dmvn^K6;us$~DkdL$N_uMn(YI`L8cU+E=Eo+SB79!zo0p!=m+R)tzKSy3bO{Y1btf3p42R|=iBFS+4L zoSXgXMxgCH7Ssqb1|JUEKE+F^+FckuLo!?*`@Fowrb>SNj~IuCzy9iJ#&p&exBec+ zDkb~7I^*BXEBs4J^v_J`ADL0Iir=4Z7as!mdNObWA*XKyp*0~*UIDY{$RH>l?H5wT zPeR?dtf4#U_UYacrNnsieTW+oEIS?pPBUQL<+trizLQP2Ya72`A0H^a#LFtAjMcj_ z5m2(!trg#(7oZ>Pb0Z2N&SqH(zo9=FPlm9Q3@FTnIAGl~qZ`vQKXk5Fr^vHU6|yb7 z4u(wS*+BOF>aVEttpiYT;-?p{SJ|Nn6KQSEeRdS#vqL=GXyw16| zitwa8#{@hl*W^^gAsD5Sx$*nF7p&5D0~ZuJZeL4zDB0C@23aa^!jJT#jnwu*Z1Q9u zjQ+((qeud<7z%3UD;7$c6tCfmD|gkMeTM8flhQLnu5q2Hrq#5i1##g(U{(c;cv~9=(_hIqdf@9b%KaP;& zfn$QwkjxxixF7t9)U1uJz&l7s*6n49j~dwF-DknF+Dln;EQ zcOb)++iD_j%USeGh}tO$3ENECLGNhNaJNSq%;;5%#JSB);hUz1@*Qi%`i^!E$UO=zFm>3j0el2K(sUh*!HX7AAn|;*@pn#4DG;029JAF%?rA9)MHrmwcG||{(lKHntC>(E*;c%auROAkSsLVJ z9^<;Lopx$cnlY6ouBwL^l~7`u<}5hqYWgzSHJ!uBTIqaAk2-t>YYspQY|^7ga)#Ve zbt!6nANFWRR3 zTXHJzzn4E^FNeXiPKDkjZ8mhH1%DW;4AC;;THRBqL1rfJ2kkcD_MMM^Fse)=`TV3~ zh%*t7vS_sut;Sy&caV$3+k-c>YsafGse+VBZa_Bkk7Ap9Xnnd!FlHDyTeS$mHKu%L z$l$-HN)@FbnzYyj9ku`!*#QS(NlRAT!K5^h6`AVgW-mHQ3D{^g%8QY+YzhFs!d^{? zdAg|r*p$27)QiR`D{3#y*;aDL9F^77h9!jb<-r+R<%!cZZMy2$AtZqy&GMF;DKlMS z5)8t7^~VvgqFhGs+{x#9fxKWP#MEMygdRK9Q#w3Ks3Gb~1L^{PZ31yrSz&IR_B>sP5(~QV38&qh>D(gE|Nh1vxEsJx`uF})X>?1w>p%=VAm9OlT~U+?})5O zMJhx+nL=3?lSxh^Hh?-_g&$km*9%vL54>EM@F7hMH(UJzY?#`Q=n0qG<|VhZ8KVrN zOYK+{r`fzmB5Om9PL3%4dnQ0;d_u)T9&y=ZzMof}@dTx*J7w2+a*nn;@Eyk~D5n1N zFM7yy<&<0aT|2(`+uHHpPI>)ny!aPQWb=E%7}2-6vFj%%sMeuaQAA!dA?~6Z5xI>r zvShxLhtv&B+jiCCsLP6V4Y+SgMv^FspTEF3r`u400`l6#<;4{L{p7{u#jp3rO9+2n zy&ssuT+9=wY;k|RdOY=_RLqvjg3wWWJd1E(pEW7N;}XX|2A ze6~>FM#_-Y(m6|(x99_hYBoLY8Z>rER!7-NFQp1)Dl#EmmeGdQ#Hs_lYL~BNW$XQJ znVXf%v2B+aykFN-7?!X_ytmM*KT`||GkFZcQi=w!05sLRJeQ`~6?_#}@#N=H6NZ=* zA46J&7MHpPC8=MnKjz$u%`G&(Nmg!bbJcXyY>Xe{*!D6z9f_i~RMWb&d&}a0&tsBA zCZM`%Q_*|FO@PZ*VrqzO_fjfh=U}@7PGH5f+2uMQ_@G7hDY`wfV_#@+u=OzA!_{G5 za1yJSD>oOz3`se^pJ^62=)nLcmzcrY*uB`#W`~oHO`B3p0otF-0o((7*JYL)^A-?g zdZTLe-j}cAXwglZ#rrOGi(q5mPoIM<@m;Vm9nlI#zbemUbw`LS^*MEzG<>q2$bQA* zchP4!6R-`qbEE|#&bN49Sv!3OIcpi7=mjqxjp7-kTDlvB=r&)KX?Z0= zV=z#OrWBHY?CQ6~*#e!fTtsrP|8aWS;TZ{ zKpqKtjQ7{VjdJisEWqj)R!QOx{{Eq@JOtq+<0 zDaEY(8EYO={ZlDX(~HB9PHlX8XgR&V^)@~KcCmdekpnVT|p+$MI|_god&Vu6oo zxSw$a#d&CXa8k3yR0$mNYVp4n&h~i_U!stosG~JdbwZZl6^(OH0;!alethfJ_8G|w zE3$sk)7Y0G)Vm^O%DAjT9ubYePR;QoVQ=QY6pl4lNSEF3QN_ByRXBe;67rvc>3_EA ze+is_T>po@sZzC3#!*G}MNfbWhaqYN16c&GA|vgkek=|N1Pd01ZA2E$C*HcKHZ*LA ze47vK8T>6tBHI^} zPr!n-8I}*yps3TUgG#Vx8fAKKtzi&2gvmdQoJrTPhmNDRT00KFP4q<;!M8%H9Et0_ zq~A2m%oSG%+hzH}G}JHw$Y`lb@OSyA=6E(wKL zYy`Ao>dOt@V#*nCV68bN((#P#fzml5V)nCS>!UCj7nv~v7s<${#t(6hsejIXGf)&p z4g#)jR2N)5qk{r*=L?)6^A50qQoqd!#@kH3A#){pmhQ*Dl2TahBX%V#oh3cBW)G+# zOR6OZAld^W0iNbq&hjq~7P){vJ>iO@eydKUKBpp6P0XN_5ESxE)cZi);)557rOw@J zn3!o7(Q2aI+WVN_GCKvOFDQ~c(ZK6`2sA*RwZJOzpKBJO5O*omIs<)RQ-Rk@fT|lL zZfBVSF8r=29d31oV96ncJmO7b$BdYRe(OnJrRA0 zsLtGM@OvFH?gzvI&(v}`Gc$+1xIEHwczz~!Tej`cewo&#CzR(l)yw2X!og~c|Bs~{ zQ))FSdiNNitsjkIzciL&-9_R!PzRP$Ev@PqdW>}pg4Z1dTK6gPZ^^tH9-my<4dw5-Gi284S@~mrHp-rMqD4Dxc!7F-!S(iXyBT9;BiDABVEcS$XRlqv!vpAyWdoMOi!mFHCi zo+=~8N7gv__D_O%bg zP_IgS0yqetC}Cmm)|y#I=(=%Klq9_fGtC2%iHiUABezGgB4`7f)GE(+HpJ_+FojVR z&oB!+Q1+KW1St_W#~NY-US+Pzf^CQz?INJwkMI_8Yi%c91E(hM8bZAIS_kaJuOjpO z;E5yb?u!P9RNkcA>;{>+doIDd{|2|jGr>G{>m!?v8|0+*5wx zn*Gg*voL?hi){aWUi^=G*FUvglAaeXI?Nrhf|!CfqWq0+SRE@_0BS~3DrisKk1efO z9k;2U)4elFi3#TW2saW~C)FB_DWF%5EFFnvIqBKyUAaBozkq53)w#jr<@RG@I?|+w z@y@-dFw>TpLf^3QPu;_^0G;r7nsE2n=&ZcoFxTt(HNo`Rb-V`nkc5KzR#5{h3e4Ct zJI5HiRk!p7DpS~{m*(Z`oK+}OX@>gsipLqXQ_5}eEAm_|%LSipgEEuXnOc4Yo=9!n z6>FJDR<`v&?l3+a{T}7IEpX-(X}^H(Mq#zoxzuEfrFHnK1wSBp#js2rzT}plsmERS zBC(TPP-YlU)Mn~+()n5qn>59-8D~=dCTP-9@>|+D%esOLU=oEEpt|OvVsrz@K*;fN zG=?JrGDpqbzwJ`xFm*PW3^el07E+SnuTFW8p3Q zE$w`_!2uVac+eBpuHFC=0`-i{Wcd&*+xHD%Z0OLQ?*JPa3djoHTpIns^~YqI^NN@1 z?xs3cpfc{K&qJ1!9{{l+@zQv`ZRZl#UC1T|+-l59{64P)JwslG3Deflbi>AMSs}3MUiZ70@fGUBex(n{KSb%l@vX$#}|-hXa*Q8p$IB zVRB#wZmP+Sw0pYCUC=$9k&5)|MuUpVm1w74Yw*y0&5GNM5L|lHzqqTN5AF&(&O^P# zV#ZN}_-gr_twdd?73Y$)w8E25DpwyJazVlnUSbcsz|2Hj;?XVw?^_PiPMAOnVnWmt zR|h;^N6Vb>Y=KFmS=A5l{07)~oI}yl{g7ZAryjnhPhf0;RyskIRS8*W1d5(BMksLK zUo`6XY;b#Qbu?M!#goq|1mh7$>C1nH>Bi;6H=KfMGOeu(prd|+&3CC^NULCmp8I)8 ztN7j+Mq$}mNW%+N^iwxxa*^~4p^XnEL7LW?H&=`R1|HWWh=~|n&Sgropb)*`4NZ0B zLMfIZ_RICb(bq-^I$wM>45DcyLIZy_0$Xs#rg%L+R&+`r3hq=mh+wwxle8tEJD4(qf zLx8Z%;^?IWc?5(&-XK4#+iv~hDy68*`y!ki4#rbF#+#Z_+`K%#0#pZ=pt;i)rmba6jRxB!EG7`KJLjWG271;D5)Q$ z%ESksf7ZbPUA;HkC>MVOI09)sEn2l$_7Yhhg!D1cZ z?m9~1j>#Uk|9G9JM)~%_7LvtFeaYl0qu8N_mIXpijGowRrI=CZdhg3=wb%MSt68{cXb*MIiaW12`cnUOJV;j-!5fCIVJMdGX zvPIvfUlJ3V9K>{JgJYa#kA4jXOAC0z-CAk=#d2x=j?UzG9TtZ1u_R8%ebn*o>KHK| z9;bT7=;{Z`cj&liJzpBz`1yLz;jMDj2Bn>jO1CYz7?6SHlmlo|YR0S_!E%*|WYXlC z9yW2c65vAHWpZ)Nw?7h$Bv%Pv2HFEc9eJZS4G|Dl^thcH><<+o8Y-)`2pj=N zMnZj6G%hzSjV7IpU#Ml;XT(jIk&Iv`)awCg9=L19HX3VhExI`cjVbpBc{3njTJY$R74JC zC;C$rOa77F&-j4;dO#wJk!IePTY1X+z*MB735~gw&A*gAjX=RMcZ& zvr==iv719OZKb8? zH94l_AWk=&$SKUP%9PTy2J&H3a&nQ{^eEW63!9Sh1?39(>z`#M5EKztdlV#P$U*gB zs4iB9Bb9E__9`vliRL};j{poWv$mq$a6t&YO)Bz}m!G6r zP&2_6W`V59`J52AshfK>+7{^tq2o?NFdUJmKL;Z0c7uOUBgGarccB2Mda78l04w!& zyT8mOnaZOiKaRER&ss=79WjUj*7F0+=7vT-BL|`_C{M8^3K$2RJ^X%0>l+os_I{w| z#mxK^W=@2CxkKUJr+t=31rG7m+mGx@XS!6V2;?MYb&d6{(UWIh>#Sbvq| zN(faI%R(*I5BVbpz{>uVrl@JncX+;DBml7YZ;+pX$u%$l)gkF}n~j^T3!za*TzCLP zT4^pTA%LG+^hfBmM0pEJ34u>F-CAzdJ1bckPOWLO*H^Tk@9}kMI&RMO+2&%0sZAsT ztiR|53@gQqF-UXfuNRrkekaA1TECL_@`!?E)YxK#A)=$vf0v`bglQa>y5AwO^NYG#3HlKLktLXDWLm!F+%|>FfS@s7OOy3*bnUJHo}+- zg8wod%_l7|5?Y+v3WXw;mk2mlHc-;6WFWi1)Qq=gBnEk9@btOjo1pSErH(uB0Wk?E zqkR<0DjNLclMW=J#!=Xq1WquZSDjp1t{T-?3CZ*$WQDq%7!U8QljAtw!x*ptog+z_Cnmx7!%Y@SiN^ z&$;=_>Ly=pRZU^qO^Wd;4=_s^Y!iT%VOUbG*vBvlH*^4(B7hovIF5$y79QzU$MhJY z>5IuauLN*$VK*B-0RxZoiRJ|vgCzmlCjlIj0XYuZUwfY!w!kR%@L`5EE70 zq=Vf|$DJ(fm*aObq0c6!-U&k}fAGmo7>p2r8nPsD8a|QkTiE>5Gn?h0u0a!jFw*31>^W>SwwLE3i)*^K zTW|~eJO7SBBgeboj;#u)l*_lx2JV5vT8G8e6g~$pC>V6hq0SpHtm10XlZqaQ<~gy@ zCh-&GIaqMTVv+;{w@j$~L8X>jPO0=D)lwWzwOEIf+;rkSmoFH0=v2vLb`8_@i*jl+63b#y>1TNBwo%->7)CrGU*jjQ=*&|=ygpv+(Ix}4&!G$iSp;F@z=0& z>qNsIru}c2Mhq}`5?B5So1pkOg-Xm_XRImxWRH~LkmiE5gryC}Se+fedKmt!KZ5ed z3&R_@KlL#B|6vdN$CM?3@xh=o_iNf`${8GMU}%(C#T7R#tH_e|!t;ed^nR|^*|q-> zL6W4kq#62N-TRi`fR@5)?8Mn0#~SOC zDbd-ffOVrfc_}&I&^j4CmqU<;SA;h~+iK<2;OsWS&XpL-fTqE3t8`uNoc05~u;Jtd zBk1nhxJkP*aag2Jyg-@!eC`+xrt-c#P$aRKgenLx<1MUSTK~*ovoLAG^6PPnd>s=d zeeZ(RMXbt30+!K@6pL<3yHi@|ifke~O?vbcFXWCtwV0ku{C4+3wV^IL2oSOM7qq_Q zKomdmr&PVUM5>wgKKS@lQAuAWfcQ(OdkO&jhIldD7LiQ3pr4^6v1&(GBHl=zAH5joocDF zvigfZ+!V%p_z29l+cxS~KLcM#Jmc)#S6TMIB%>3%4Ta1icJQn*|BD#>qz! zxOlUJc`E~6NTHS>1_A!1S2%au?ye*O_*Z@>qp<7>XT3R z@j9`gXr1a`Qs`@$mQ5X*lCKMG2NDRmMi&Vec@z1OJ@YDmaqA_^DI?q`K_x$$&U;gD>@m4r+UTnt6nKSBQnT-0C43$PE~)p^-*xMHZZlf zu{Lwpb2PL0uabbj=BmO9Yb7!#671lh=KL@g<&U{40bkB+Tg-SNzi?@sa7In8Ba{Rp z%j9gp5ib*an2rHZ3(#& zXJ}GjjH`5!J$tCLB6NLOh@-gA-kM+VeohIgLl-&oqSW=^Y4$+?9xC z5*H=wrMUt9l~exoeBxhk`}6)dSp6G0{fB3y)-S&XuDtnFwG=NBE@Up#01AE?VLW+U zRX*WKg;8t2$Z`hSapC*2V=>Z0u-m-cmQ?sT0hrOZ{dA6p@!yV-^!8+uy6b$*0ceSs zlpLyd)x)%Izpp^9JhwveO>Q&+;utK$D(k87!k;d-+CG!~hka;xymU;LJ?xpEK zeaJk>JVSmtfudA?{^bNZpI77XOXi^`%H(9~X@^gct65w1+At-~ZA(#aE;?a5L*x92 zRn#AufOD9sSfQY|@;u+A{slG``lbE6`9lw68ZLWpJVu^TRd(5wXtGo$SpYP3y~N zHw8nhXybemXHZ=k!%1pUX;?oI!lIaW$~F9G)}vV{{e>0}pkADgITbPWFKacKGBH5($~MK4>l1He5;Ryru=Uw^zTQo|6%QFR;FQ5uYCf8gN$-zZ-S{KQI?N)ELM-`S# z(w)>j`I>fYvZJETo&)-5`kGC^cwv@EAg8d7GjzqL&V&YpER~1;{X3d?YzU?GdF9#<5?!gJ8yM*)?L$o@BzQt-;a%qz8*aEU47A8acB|6+(lSnd#kaem@VP`Z z)$kmmBy6r8EdQc?_d>z57>X_&a0psBSOz{KhDK&pe~jJ`bTyJxcz{MIVJzjZb_ZJ2J{gBl`ozW9n(;B z897x{o-ytj8yr9XG$(Y#X5{D&|J-8pMz!4Jy^L6V$5TS|5B0t6&$GhIC55+sq5;xo z-*{j-Ail^q(hKa53MvnBp&*TA)YmsQ2$y-B$4@Dts9LWSfOhODE>@!En2~Am#GWtD za*i5q;f+-jNX~p@ckm;-N=Yi~Y(F17CBW<;lnAW1-`xvn!{0)UALk0s1-6W+Ers`uON`SD2Lj&m#zO(4P^fDpUmc;inr zh><5r)=TmR`|Bj8;-DsX{m5@hHjR-p12Gaa11$$dzs?*tGG7%U= zS6|Wml5gd@$Wi(R)Ck>ZNVaWCb$5rqre~+fc@|Ay5Kzko|N>KcPbCpK0rORk1x>o35dj1eEHP7 zh%mVm(JRnKV62jj=7DYe=%`sN`7U=&3mR5v^DiTUND_=Tw33_{!zpPdIoqatJS5J( zAj>0&Xq?sdOLy_}PTpT}XhbpKe+b%Fs%gD|G`L6jWD(MZ!#ibmvP~tSy&34_-x0rP zAWJWBYvw)fgtXM4fK_a;dtg;?K61D|;&^$eW|0;*}vEuNuk8%+BBf%x{ zhvDE~fGs|K2S=)Y^FfqHls9sT_4zH3M_ACJ8 zTNWxC>Whf1e$*cz+0R$R%s`(Kd{?0fLGAfzgBM4*H6$3n*%UCTNoTz20M^azn>%QA zggXRM@{R0Gb*Jp2l6n(YpfCaL{WQ)ZA?A}7d4-Ob$>2_7{kqAZ%V>aMF|hc^4k2U# z*WOmNXo(3T#jcNd+TOZo7==Y$^MN!vR`H}ghdJycm5J&x@p6giy;Aw$%KUPpBx>A^ z2z?RUhiZnqzyf>f(VZhaW3L-TShMJgWQ!M4gr#{)`S|uyYIh95qA= z04$!GgP~qIl(wfVc z%MdWiJOsLvB4k~ZR_FjTRM5vwUQ$7Y8z_v>mp4KeR*7GzMll4H=Qn4q>GOj_ z>?4D0fBZpn{?9Y`o2UK{v!o2gGwI*{o%~6O0m>DCxw`eD8r4w2j{v6neh|QP2)R6C z=g#t=4YgyZ)V@)hE6JaT-!B0?Np~`=v-!n&S(cty9VVJw&NEVUI=;OE(*~)S-ELJ{ z_kXw~ar$|X8rtH((Ua=q`voO4%F@#s>89efnRo~d+^lH@#yIIPm1Ci>fhC3-lBTY@ zS0tgLKi6lRj6`1qK?hJLYOI8=+^JjghtI}F<9iNOYc|V1$fsO`1!kt|96&EQ;XMh@ zIyNrXU`0g}+7(lo<&6%vk7(pv8tG<>uk)<5aGFnb7vhj5bs(hDHM`-iS?x(&_02b& zbjvlc7oRVBFuSS7R33owGp}FUu2f+p)6s>oA-&N`LF-NmqPhV^NsaD=_<$hET#a@* zAkkEt>JIhNUqeGxZq$*gS?41sZ57>vnsF$*JoME+7|Ny_9a=@9jVh5fmAW~@UwY#e z?bLE=FqgZG!@4+uu&2z#vs&~yE0eubab&xvniLseB`6J7%F9vg-p#b4XRD9>l-Y$| z0E)VqFDbR<1nh7wds(`6rnAN03gA~SQ)gSZVyS(6jjtl@q;>QG#-drc=M1bL$!lx= zaV3#|xX$E?CIH=2(2>BgN0{Tuk}ggDouwGw;}v))PPnMO=ylaCoHHzro=)*+#cFbn z2@y=QqUf&uv+}3enZE2I60vWnZ0c>od18e6`Seb}$|>CZxz|`jHo#MMIm|*_sy@pY zgK^)96G}*sff59^G4bufGo!E|-3s&&h|@ZyQXk;IhP~q<2Dd2}F97<{H9eq}7@!@X z45R6ygjy#@0}I=>R4|2d^n#7WGB%K+y#*gv!95>&#*6=@2$Vj}8~6JtbfZ5C#@YYP zV*l%O%J|zCjM^_@4gx%wbTAAz9H1;a7naXR-xu8!Ew&h^`owyZ-$lY&1NfWox8AB$ zG)cy(i+9qUrVn3a3E8x|BesY1gI4ad2b`=9z~uhi5XL}B=$%p@1SE*?)Ir@*US^@; zD`UyzWI<`DL?1Gs8EB~}m7Z$=0e3O+O32EymsJzdbrgBv8|W0tDTTE827C&MwDI}D zn32;(b^(K~GTXrU+-Ogt^28?f+7W8AWv`^0H6nD|g6=imcrD1sd)8`5Bizze^%eQm zN@Py@H8|rFw0MCnP=0j@_S?8?W|!kss>aM(29EorPtYb3K4+(tD=l*Uv$TO>W#0#y zFUj=YdBtfy6vbl4(ie2PX|UzRAtRiN*W~{5q-W#_YX}K#c+&G$=VrWHq2B)O?C6y^ z8AyuPN3{Xl@;=(7+C+|SzQkkT`MUwbTT%YjHZn)N>>9&y+fwnma`6cju=XqWnNmeg z4iQC(!W7HUnld|}*7T#S+H>}tNB-h==$3-qjNoNoy|=*4T*dQ7(Hax_tz?J2*emyz z?Pu7(uvJl>b9kz76xn@tfDYT9Sn`er3dpN|@|0@yz( zKti)+s-u6d18%6-2TVjY_)a3pH$vxq8(UiKkV@6NoJG}zH#x*8tpcT$f_2X%Fbn1` zOm!D__S{BV1RLY8o0%aLsKzE#qme)4B^g@YLP?!0#;Of6ee~_U2{|QU#Lhj!Qdh3Q z(W_&@!M}@GAAJA@37z#JWV4v0wg<%!#2Cr(lgf^h%8gQEtzzI67~)L|fg;qzU$7QG z-Ik!u%Eyk>))d}=x{Y(jB{!h0vBVppa2!Z95u0C!Ab>#slQ-2MeR_}yFh93AbIZ7e@H8q2@8#k3_({MrgI&@o0$$r0HN zDhuChN(+eZxBmm4z)~O5VhWkUW3os2Vv>KyGV17vXz3NAX*br7XShs^oJ}{DKToOO z;hl73p%yA^R?)1K@}73>=6USJbK_)z;bkL?Wa3kLp9-vI>%}IaaP49c9S4SsEhLg! zdcPrPrXO@+`CfQN#@MDGG`I1hjdK0w$dfGkRhu5GGWcESk3x%t<|2#X!P3jhbH^0p z!{iw8)Q?pmQ{NE<*m0<#0xNwLx9uy`5uVAbfDMP%ib1-3ivydv2kH~~89X_j)p>md z3W$LS5?DG!=P44Hw$hHiv)txkh6TQ`e15Dbtn}#%^biw{g{~D32*f}y4c0RiQIv*S zC&Do2OP;e$;V(d`lK00>Q)~KCq+Dsaw_m>EO&?u2PMeNfRo7-+_Bb}JsmDD*OR8kp zcr6rSVTSURN|Fp(GzP`vr2^%&V!_E(!qu&9Tk1UYsEG#Ud8~J2dXD%m}r#C_+8fDDM%RZvY;j`A^_$JcltZjzRFLs@M zY?3ClJKGK|_UnD08+ol(W6hMtu0)?<%&3j0PV`~dsIMPmw|sGwxp)-6N?GJcvV9GP zJS93o3rYJrkG8g4q3Ju8p(1|h*_kIgxpdTU-R50}QkCPUcm30+L~`{(2suJhN~-T7 z$%1MI-QpM>i$aiTPswQ4LxOyp1=vb(ff`9*K65h6_damz&|*29R~!_YWW#mm1(74G zm~SvCZ5B9d@YyqI_TFQ%l2sT7G^rgGI>}OHsyt(V*3xloY7OB_N_`{`pa&X9yTC%^ z;v+%LAs@}M{(?|VfhneYctZ>8C5b%L&NsX}sG6vl^?dMec?N#-E8E~fWT2M}S~fjh zSo|9Wqz|^>4^tu(W_5eZcG1RO$Pe&bbKkHiyFbA~LF)%kiqNhK(jc%(?TChtBRpT1 z91MobSmrtq6q;x}tRTY5S9Su$UQ&oXMj$?A7``FwDCo99+~!d|wjFX6mcnD{TZ@$gL5 z63|@(bLURsop3)RUE@`B7qwS2&tKE#eBX_N$%wXQOnqo(_kFi9=2{(u9F&X8sZu5DcIQ&TL(QiWw*;3+mP1yP%d(mH%qr2yquFFR zk)qmbNW}3A;sB9-)mFrk89o@5m#>-NYJpgZ0^;CksbN6(k9{wRDTAbzFM>5|kQAlQ?tmSebaLtufbv*yvGd z_L6vg`jVNNq7i>GvxWqbY(;zzGv%gO26-frbnG}Y*57O`(TJ711k^=x+M(8?KSI*bCZ!fsO);o9S_*NKJC5#C zR=a&vU!jML2qe)XSqyQ;mYXS6DckWGH*j4HAeaE?169hqt24*YxvAmAjl{*@OhC-_ zGp;fdPzW>$+Z^X|77?@fCg>_8Pt+MUs>`iZd3BEr*0okv+b$$3Pq*$A>8HfqRE zasv+M3T{^wuP?C@7+2V)Z5tI?63l()6bI@3-SSdQZn!;Kv-6-MT2v>lo5df#RF;_x z$~jTCrxLnRzgVo)P#(AcZ1{2Y$^20~rUTxelZ9vLBd?a(O^G`GH0TC?F2*ale zt(zsUy;Dg!Aj{Xic_KatgP!Oo`bJGp=WQOz6VsE0&_xrxt%ADGV*JRFdIc7_DTCRg zbJ!=<4)&9H*s49;7Yzvyf33@HwNGd`iA)Di<06zkYs^p98m~0_3Kf>rbo%_-V$03^ zK~AP1`j%KadV8gurYH?sJhp9)go9S(p1`#(?P3|EA8zw|)AmIn;R-(GjZw`=s!(-e zZjIm-wvN9!HH1GLx*n2fi^7gH|(IUKjy)$%5{eHv=o7X|@+qrE=bN*MQ$r1I%Sl}Zm33Lp`1 z79BE3oZ?&Wq^V;>8$=LuNWWeb|HANx@A6?KfPAxx$B8Eb7O)kTsYN3R^frEo3; z#6IanjwofdWGH>kFi~<3XDKWb{tTk3v>^^Mlj@{3EHu@juVhKP4$f zLgGJYxS;uYuwaTQA+JgA&Hhka5I!=<3dbFOV!KJQF+t)G)t?CzT>(6u8Sl7Pg!rg- zGtipnZ}#JDF3CS8J`&ZUO05TZ1A7dM3IYkXeE2AvJ$2}R-;cG}i_xSNIG@$b6)!it zd{*>|7^jBDP89f!Bs7R`Qv6US)utO4);8XJdKtPz3#C(y`8@0E-FiS{t(5 z5eeyp_s%K*h>M;$oIy?SV9{_0=ltlxQ<{Zd_ocguMfU6HuC%&Qay-KlOC@fU8JGm1 z$5*t*ucoiPg*9d6W$wXiKNnUW%{18PgqJLfx20c@8qimSve{D5Ee5Y5re!WKiNisZ zPx8rJ8X8=Z6eFORmBgU_ zwGZVRQ0z#;(NAALU5I2)rzN`t$-T5B7K0Fh<+3VR6TJq z2#zuojnq4VxN3@*;h{2{Y9JFOuGm?Mw4^%<`p>?;G8AahSLiu=wfrLo$eegPH~McN zAWyd+DYa&RIY$paT(jmcY?^9ZKe976nlYRzlVx|yoQ9%0KO~sPZ+*x^6zb$ z8oSH+GJiZJS+PJrs!K_%UJ)Tl-(@mbrWYx!N)?mgm-1vkYZ9JbX_QOIvb=k`F4KA^ zKX*F^8K7^CH;Q%Cccon47#I^|HxM~cI5KqXmd2=Y3>;x*xE2=Wt+{+AZQQP~%3!Z8q$-$VRci( z2#9|8X-1i%^`YH#7rO)(y8z2ATz>gVl(XETzmL$-Z)5nX+=KYFXzSropi?oOeBWm5 zh(DWXwn{GuYon-k-?Ag_yUDZWf*sRpx7Bg#m}+8{g!noacj3?8YPWrupxz;VA5s|`@{0GKY^s3tFYpL{$>2ojyOj--p zAUzn+LZbB5{Nz@D1ZauF6+(w6KhYjHyz&D+%w^IeF}$N*KhfC9Cb5RNTx%7fy;q@r zj4$iG8i@B+mfe|>!>M#ysF7!`FtxecUl>~>I?Nw2-l=zRzPyLH5LzCc@5@a_i?dXx z&KS_vi{0$-VS#HB_uD{lzczxwm&&U8{~oXw9P34fZdne z&xu|d9sn}a9;L4C(iyDjA?=O0o`~2Vx|}0Q0QHTpoRYbJ-Tuqb1?);I_ZPo@6c`g=U%RqnY)-L74~2-#`&RrmQ0ljdZ?{BY zGbg%8yo#eUZIB{&G_i2w!6`J(v$vT^5Q$lpp9Z~T`_MB8#&n^+@#bMShYjbTwvj`z zO??^WH%q7~{)%y2fhYF*Vpw0?ql_~zKp-5v`g&ZQwe9pxv z=cJ`}kzuv+g6I%7K)_uJ^+jEM6VyCu%U5$laWC(p-=1UaGKe=i5h(RPP1s+s zeR*reaZKF%)^}iTU*wy@-~I4%REXsA-N?+qDR!Dt(ZnZ~$gV6u5@i%s{5@AEllPXI zyAIF{Ma!|u1H`E;*v{D;1We_M)Fi56bCO)z!Pyu@WWGjrCy6h)yQMl#=X?F3!5XaG zY5K)AN@dsZTByikmI?X^aramLsj4WXc_eN=RS8yJRRLB}G`>rhWgL{RWMH(3ep&#p zzkx33TK+sE*r!kY@PFbhx&8p3%pL6Mn3;`e{_kpQQ9TFKk9{$O{AR*V)_>n)%69#_ z$4pmQ>CK;5!%z?3oGHRtJ=Os_tPWu4o&)=rq$vbdt7d4>To_8g0E7uTx*Rw;`5Fn zHjy2p7~%n^<~U7iNTZO1dRJ;dac~sgmL&3o z;6*IPqT#cvBO>xp3a{ZQ^_>?b_aMh=tjmD-1#b%cNeRX@QukW}25zg2@hVxI>0RJ+ae-&tj(u)1GEb>~B zro6a?hDF)7mIah@YHp&oE_>cwBs4qg^i~&o5z;n5kH0sGn8+(5&g&k zj&SB_8};R;!qn6`MT$Grg$#bIU`6v8FJc362YxzYW zCGDDs_67Yn&0|!9)W;dj;O|LKXwLAtifsl*mGI!~NY-sd*Hw|rHA2)`2gi1{7xl*9 zzLq&H$a!B{ifc2^K%mwxHiLW?X-_J^eR1)9nBi$jzTJz1VUbOl_KuH7tr7h?8MwW| zD^lNNs{ckmHPf!DbMeOf1ZPoiXW{E2B^6ZQH$)ln;}#4$IV*Lc*IUf+z%931<@_} zM_(i21NFRO?@|(wBMlWP6X6E>%<)#`RD(v9I4UQfLiD!t>%TKE-Z#(=zd8KWjTVtH zsADn0pkK~7-pifD%S(tZ3c!c^ZYLaWJous&ncEuwDHA9)hwze<3iuK6avNMbckh$O zi^`$6w~Eh#@TF2S>gGu&Su?_B8Y0}uM(zqgJCJA6Il_<=>%_|~@Ab(o?dD7;x;x=h z1;E-s=Muo0m97)7O%VRO5^R>Jjh8rGIoTzGpwJFPsDVVPmR z`YMwhO~e!B`3@f{+!ZNmRITVvNC*g07;z$+{0e`lZ4jOsB^EteeRd!EX>;@|?lDu7 z`vtStHK7tAdSPfmb7cMu(y{}$=o$7ERp9A)$v~6|*2mUnPnCv@A7j)H z{Nf{wf*Q?w`QYceoLk*_O`pjSGTAQTSrNExZsy_Jj{EFA!n>qLa}WfFcRcWkIaHts zyGW^f7AntATLyw?lnq?jmnv^xd7+lp5A;Y(Q%KcRh1a`bRFT_#rOJC?JF2DPsW^K= zLzJLk=P4Ddt-FLXx0*iRV%jX;#f7Gt!1BOkR4ONoNu6u3H7$sdlMA)p z+zJwk^W=za5-oj``X&*KSLCL8$5~mAW|=AXie_UR>rPLJI`H`*)u`u7g&Sxc>Mal9 zQNh-)4HKS3Dh}-ja>rynV0a1B0<3A;O{|=}lB)P#($qcZsneo4*$I(Gli z!Q}p%4mL*n&kcR zf&J%@^8cG5nxzP29s^Ehw9$+EflL_Kw9ces;0xh)i91@YD;FwBx}q*<}FL!Qtb zc|5)p3qKlHzuho9ebt5kg=#y42U~>YPOx>Lr9#8sHZw_mu!YdUMTVpav0z?)$-_F+ zq*YZrO9c;yq*82`M)K8?^T@v5=z6YZ_AqQqwub9SJ6*7wxCy`Ro0&;z{A^q}1_4aY z%L7gK98WVRV2+P@u^21w%u%8OjH|R41I`&1D62hM?y&!%YuE2;9oKz@KKTe-{vSij z_iu()KvYWE%*f?$mPtv&7DEO7eO~=+csjE_UscRd`ud_hQALY7k&`-UlI%1+G)*fboa9W3GMrROC!x4NkgPR`I%r`J&=0bBd-P;z zT29`SXvJ5P0@jA|A?VDQ`}CWQpBF!WI;=lmrl|m|`q2!dISKTs5^%as!T{{NIwXI? z-p(SBj_sNGFg&1-9A@3`CRw%JkwZtamY?EG798$+iqzV>{%Abuql|CNZ(Lzh2U3Rcy$S+UgE+5nWC0@@xmN zm)jszk_o2J;n^0Cqa@8>*)Wi_Ma9qo58YZLb-n^G^2{sd9woQM!YDP27UkwhAwH5| znw$4i(alSZ!+fh3H+g5u@L{T17OA$%^0d?#eQoN+d>XD>pwm6AHS97>we2cwY zd}8AG9tbdg7$hq_TUk-L^|>tlUAdj_b_|t4l$rUBX5HJOyLd%0oO_gO6JF0fh1|X0t&}U6j9$AvuGEfyJqs{>5XYYsC z`Pmr?(jeQt82~*IEQmAUHmtp!$6b}X-WF+g*g(W?Ne}y^o9_~3Q1>JjZZOr(7$Dh3 zu;+l7Zj{bsxx49^ZfSkoDU4PUxlDl*;_3JIMIxb0t5^hq?utZ5m-Z9KdjS`ORMNE| z`AtLor3ifrzaWD2D}|q?$K;7ejZGX$z%$3UG*0K3Pu2wa;;T`LC~nFCSsnPe7)c zM&$V2XVk5TS^!m?9~v**s-q#w3NByu_JsQKNMo6o-Ovw zE47F7^b`*1SL#+m`K&_5$ElO&6q9e3d?LYNQ9u*JDCOm4R>B+-OL}Rh;a!k6K6~@2 zqRo@YqNTb;UrH8_pLjWme13P$YRuMauS~b9$E&wW6ZFM-C1ri`N49fFq(6f?zd{mn zd}SxarrRoxZJ;(U=k7y>MmfUu%S6u9#R*k65@d)>2oh2oAKmR2Wv386x=qeAI=Ny_9&@Su*KnZL9iD*#8VN` z8XN-}4=BG=D@=W!E`igcWL=&fXb-TW#fk4hmR9^bFm=I3^khSe6p-}u(;aZ8H-j}3 zYceJnX;Dg$G=>mq!t$Oip<|hs^p@Qwi2A4IyqsWc%7VDc{Mr6<09Q=qZa@}C3wzM) zRm6lM{0%t{AX-X?!hrU??f@a)ia^*1;9Ng8lA=Rpf#i8>03=2EOnA59WBa^4)lnNu zApNWQeaDl&hiZ5tK$<}1@f`$&f~ian#Sm>H8b;|e;k`id-zWK>R)H_+7;wG7L?C{-lV;g_t4ShpUyC+dSop6nP1DUAq*d96kGXA||%i{E+aq!(| zTCm0G9MKtRWA!AsHiJ%Ep05*e%l15+$= zLskTZ>zhSTh9^WETpX2pzj64nVE!6D~ArPO=dg8WHYtook(!|%3L zuTqn;1L=mFzXfsGIy-lh5(%l^T~LW*}e3R*^BtI*^v z{4uM%6d-n3-A>b$@h-vL=-S{W4PT^#@`C)hR@EX1kx%LbntUT*T-!Y(q|*+qPRSBC zG(Psaqc&eignx`rBd~uOp=AHf)-Pz|^dZOR zcl+y8(dMtu{|sEUok3wg*q8sHUT`b6wsViV2Rl*Hb=2e2j}uekh6i!C{xgjNZ=92q zpqe;1-q|y_nedxRQ9;mAOjRt&QkTokQqnXuHBKyz68aG;@zqRTx>SM4A9g5cG8|gJ zkT+pu(WUpzykedWO2jx&SrFXsTU=7(kz+ z_w_%O=$UL9?g0O0R>mLAzW03uh~L2DKii>mXkB$lTs=MDC!Jy~AY2fqH>UIPcD%v?oI*gD3H4 zjWSQD@X&~(An>#or^zzES`goo%ok6yG559H>M*2Kt!L6dQ4rKY7#Mx?Z)TjYl>Fh# z5-`~6fgPc`stSjwQJ9)^uLH4mlKrL!-4c?xUQ2RY%8^s>gnYwjpaidd@dyLq|AkzZ z*Ok}So@sUh6QA2Gyc8)1ojK~)gmkA;SJe3tHR3-;P2}G!JRxgGdp9vhBYQnZ8~eZN ztzJn@=3|-OJM|&avrq6868(W!mL7J=66cHWtsBP zYiU`_m9D$GISy7*-x#E|@I>vj1!g3HoJe2+Il_8;wAb(XWT^BQBi-t=i7>H`+e1nU zwG60NJ8WA)pAY!MLMDrm27Z{DX>^N2h51aR7FOd2CQ64f_83YZT9J6ezVC4Itr{Af zEGMEeGCD9ln!0HnHl($aw3!iF1x^>6X>imwR5Nl26$q|y*oS(+5(_rkY0PBBj-S21 zc64%~e~MTllmS6f3^C~bup~*>XK6y4(y)<&e53{`OIOA=f3;4M#y(G(&Ijq{8EIoX+VB~X>2#%>#oG1b$0<6GT zGGk~0BT#M*Y86YByVAT^7e}VBEQ}(_Ai3Kng9%65N9XrRVC4VM_Kv}swcWOGcWm3~ z*k;GJZQC~Q*mgR$-9dNEj&0j^a?;Pc_xIvE)u;AT=hR)b?mz3-sC8X)&N0WB)0`W+ z)J;S;0?Qy!OBRR;;0P&J+?LZfKs`W>H_W-mjy3sfGCTu@$UQ1upL#ruGL%8dIE_;9 zu@BThZs(qjOiVq^x(|AJSGI^ZpOWM-iQDnm zP0Y5$@;Hzv|HKkZD^AD#vAVJobt^p-P0T#U5&i_z9)=|v75IVBB8-kNYkPX*sUiE8 zg8f#$F!5(#$WPbTs<_~}q1z0;Tp?lY4Gt)H`MQ|ALT)#>yG?DV6aGbKFJc8u^#ET$ zH*eQCZ+?)`&atb0wIV}94($mR*A$L7h-N#AXT(vb$9M z_|Cq6-EelFRU6nom<%w;KLv?DNwfcS!~Jzu6^)(#StJ%JTRI{uWBAC_4Y3ZnK+_0? z%h{k#6_D&s0ZE5dCP|)xHZ)=h*=JN4+qZQY1zspVb;-i@&@)0sPb;KdUKRdw%es8> zzYO#ZM7-q4K#*vIlgTu{&URc|zSg^)Rk3rW*A0Yq8gjRUf60R7Z9i59=TzHzI+@5EkK(J!V zk(`0bo{2GvzBlz4kU)nXQM8>+m0Vn1FgjxMBv9H?-8?S^KkG&oHTB4_y*0BbHIV4X zsFcb8nl3^!fI~MfozW+5xU?r})Sd1O)2y}Qo@q$1J>9EEUGC>dss7breO4Iu|9s2x!J%nP~a_Ke-Xc`(R#bg zb9tLOI>q|gsmo#Ts|$y%PGVt7O_(>9J1P@ac8?8fI2iJ|S!z;jg4pB%GR3qVo+k@V zCqJCcYOHyQg74{&<(}`1n%?SB5u%yS*Cbh?9hSP21H=Tyj?GDoYAY_8!_35^()vaC znOu=?y~H)kwrP+2Ryhxe`YDTro?cEbss{9guBJ1C`=bh_uY9h}om$>)Z@lg|Jsn-P zB=-E7Y!cf}NBYDHeV4SgqL$gu(fP>x`{5*et_g_|o&+QzpmzF`C$nB)B=&&>zVjGk ze$^a;OknyE>84ulfQsddo--z>T*VK$cA+I!oKdL$YIyKOwotY%B=9pR@{J<;+WPCG zI0*h#J+4ZvVBZ+3Z<}8T#1}dJPrw0w%a(pLR{{kcsGX~*of5DYO)7<8wSBN7An?76m$jP&yh4C`~$N!tAZ_TmJgHF0WRQinapjW2cycEKQu?}z*)Wv zl%+e)D2cTx;`P@ig4?z`Vp_~=_nr|XYXa_!L65AN*Iv;})&*vN60Mvg&inFZyyb&8 zLJkA>mVdYWasUO85jf~xqXfxN+N+>)OI&mIIM)zJ=;Ug;sFCsGI8ltRi$C5+REXB` zeArK(EqsCe{A!wfg?=*UYs}#*DDl49GZDH0ECdL5TJ|neLU3XtdA3mgI+8+Q4qiUT z6{B6D;b=qcj*6XNRh8^saV|xxFZjt4x=>R16;I=a_uzJoD8^SmCH(as5eMNIY*(ks zV@ZM%8dZYUVEYJ4yV$G_%tF*z=ZpBFP~>aBg?($4&~$_#9(N?jp$ro z(bs=rgCl>l!OQ=1Hn=7UXl&;LGSB}f>7%%flYsljL&^5z@qaE?4oeS*{zv6X>YvIL zQ=KqHl30i#aHL3>b~7)$@%HwaFZ-FjTU#>*B4D3mjh{He+z+4L#MMiO_)zHk6*sIA7XoKMDA%GxXLhze)7UJ>^!kJ^(Ex^)z}9{r zyguT|W$XHhr~zjNml={JyXZ1W?>4}P&u(74Dku}*Q2gK*5{W});V{|?PBPc33>YG> zBRHNuWjr!6zCrQz#qbV^VcewPddkT_D|U%h0*Xk9Z#nvh_JDftLwjJBf=<0>F|MLd zEX*JI{!j-AfRcz4F6pM!0Bvkuwu^9*XN1FY*#`YxoNx<{mwx{M3hR$R34grtciK0S z#{XHGDE*_w2xgX&oGY9-EWGo@LKEO1528`FphsN!Fi`{<7Pd%>Om5R`)JAe4897-J z*4#@brGzBDBaQSP;@jg^L~R|!(|D=xI+n??exLQxY-nIpDImU^>pIAYSt@iTV%tOxjW&3#D%nYPMFuRxjET(+YRCtx`+le^1vJ(s!iy+i zj>FV{odowrqb^aWCNMF5L81D_EeDMaKU;ZY#U5*77PdPN63787G@0u8-wB$-~BN;atX#lKalT&x@BR9td4dn(NP z`m$erL{%1+6=J~7U+iPeqw^y$iiGn}35oVjXq=0PoB^h~!AVh7v~e+%Qoy z&m?mqzndbpS*&pdeko3oFgmDU*%|eDgp{2pv{fut&Z2g<)?{J!hkSzdoB*NYJg_}0 z2=NeqzBbY>Y83Yf%zxJVHk2n@jnen&4M*FEtxL80W+8RcLP`;IJ6Y!q2<~mF|8@$p zvAdrpPrWYh>jUU0@1%=g@~k&I9Q=kB?3v&ql|FGo$d@i%{>|#mU0ZTVv5NU3+#I8l zQC90I8PilV&6o+y7qIy_BUfb4j-7iw9P~+v zxa}4p9lA;)nR(scXDShO8+Y@gB^v+7X1&ngKU4o!LR6|~D&i<3yp8E#xVU1RVK@*$ z#djbi^AP}c)1|+-BKfl*poFX$FFR^KqzMpEoFQ%DD%38e-I>Z zze&rY2D$6f@aFJt9X~v)rYkn}c|IQa_Q}bRJ5nSPFySX(8yRGeZC<+)XU7%^6)E;W z5GfHUnF>J^X>ZSt*@aR>l-gCHwnw}IpKuolVR6By_9EBa4iJXK493dPOfNHSF}5tK zWw#IUmu6HdlKQ8W$Od<8^PTa2RkjKxY#{=Hah}dNicX(VHst1LZW^kW2`6N( zuEZ`&Xg&z=B@~Pg=O3NO)DIpyaGG0@Fp(z8Xr{JdFIVw7HIL`DkI!gw?x>C-o9?T( zpIu9pZ)SQet=BOC;f!T<+{yFMV0y6fv5eA$K1!E~cT(2@LoZSQxTsNgx7Ur~axUzB zmm*vUHmgu-=`#BTTNnSrQD(9L6E%qxnnD4Ow$A7}s5IN2Z^BD-jJG8*x|w{GiY7RX zP3JB;oq(L3d7?}1P9O(AU8*~}OY9}YlSgcy6=x(hkC~oNb~9B}g^W|is2zV1n_GBu zsn^|jqOtXDUwO3%F3+2g3Wg0*hNICac50gmGs?G%Ko$e6d8*Z7IdiJ%D?5uFt9=?$ z-KK+9v%6135D(%#mn&2n^&oQs@s_ZeZa5I)!9{yobKOfe>9BJZ;8rG=d>bm+NUN6w zbHt5UU$}oDbSQKiIXKRNPxi7HxoB555&A8^QU|4S z!Mn-QW_-!1xF-}E^EX{uz0GX0yv1N%cVWpGs9rR;#BY)x2|R}7@^##pY{~s$P76h3 zO6_-JpgQ$gGIpC*=T>L?YHod{cYPsqb7$Ifzt`k0)$h!FEY2j;Yms7*!7_16;n9z} zm}IL6o4a-c-{(V){*e$+j!YNxFhaW?fG|l_u-)U?Ac=5`9My~btallGzWJQ27ObLy zYG1G-Acx7PChktUba3O{@Il*{i!fun!~NJ7o`oV*I&-5`4U709QZ9%1!&^wOU`w{0 zPwzZ&iM{f2>lSg)kgoVe-aARq72Dv0)#C(~U%2G( zz7$aBs;GCf)5)@c){O#>^i=mzU6_Z~*NLC(Eeon61XtH!?BHykc zaR5Vgj-k{_Ph?VS5FOzK`8JXspFiGi8-k{R7vLZU290>50OH-ZRq6JMD)|zSuH8WD zBQiQA^@7dZ|5Yc)1-%jX5_skj;{9+2wbeF)J}ndp^{sq^SD+k_5poezm(IXo%Pl() z)e`eGJ$p)`3F;Pz;c!T=AT*xP9_IrC4wU3{&3ZODZBEeYH}RK{)PrKax%#6{u>514 zAn`XZNGV$zQvvtiPbJ&m&;N5)yIXkJ_dm*NkCc}{xZ2JHU?~h>p-5TISwEP#H_m8iu@Pl&4{+KgPYsl5F z1%V;p$La)`tFt}94CtF+2EpoHN^i|8Q-a+Khr#SWb0d||c_yOV1y}MO5~M`UV^&B> zQ9Am0u*T!$c`Ru@ei@nBn?{=#?>v^qbpLh#Ys9&i{l(b<1qo^!47Hf9 zlJ#x(RoPsX7PFAdsM434Ha(*A438srCaW;fmPwjiqslrL3muHvbgLGO$L$kOf9$$t zuw5OsW~`TTc$dC$>&Sgm}yXO(vmk8da zSnneW)y@s&;ipN{APCW>!+wcZ#}F&Qq^WPyyV9 z0?lI$iFW(x_Zo_SIYhS3s@D#YNQ7>x%v2vKnMJW*Ag7k$iKY&EFnO~X%(m2UH~6YG zelZcy8-Y4bQSP1)GKf8$=nn6dzY&M2aRZW-AdW*~pIhmG&NjEE5H&!x1TsjE zQYD@mJaALaAp^wpLc&N!)(n0jqrV|I2sc%?M4fg?Vsh=MD63Mhbw(kXyG-ZXM6hI$ zVl;C)@n4)>`TkUQv4uHXR@V&T@Xk0XI{eLo6W?l%H|yN3fCYdlhj;>_T!c!@EALP4(F6^@b_~VhxnK^rsGoV_XTCP4Pw*myH*xYsftKWY zhT5$7p9LC*HF@{GLezRUurWQv8BFwpGprS2%)nL{^8~hsjikvEvXK(GPV%R8UU^Hw z#IB0p4SVm@Wo~jinY>Dg+8EFegbgNLBgad|w8w!NJvBdTYZi7VK;+GM&9QWz8)0$!2DqlV4rQtP zB1K*w(|f$Q9M$`l`tFwqevs^&R}IY0Z)~psD*%?jE(rgFCD_rKJG#CA*+~4?RuH-G zGLHp1&E6WNeN8YYt1H%BiA~GRFIAz3V;gd<1obWp4()k;op*bg{`(j)Kxr%aR(CfmLL(#u&5W z`jc&XQQp3mj!;87zlmG6Hf6slgpMMWE3N-fh}D4M6Wm#S+)ICw|HoR z3g+_!sUhzY2S=wdxG{v=s=Ta;krTm5i*0rI#Ly{jyj^O<`o<1Kiqp_Rzr4)aPP(nQ z+j5jNWArAjjdl`3T+WbM3?5|br&?XvXyik~fbY(%& z9*f8`tfvy;3hqh4bcsQFr+S7W(gY6$KIe?&Q1C`TZlRPSTZN$sWjtKGfE|JPSxLoA zU{r)D&5q+wPNxp4)H7?M2*9{>=MLA3s9k#+yNrr0HZ$nWW1so?9-X+rM%#^>~K-1dc~+-u`lKp zb$wL@)A&$HE{g8dOsn*HB9%=5R`Q%NU#HzF!4+4V1%*cb_$AuXY9x=+AiyU`Z=e;= zh6OJQoHvDd;S!Baq2ZaEJ#u*aQ{-PM*G~{`_MmYTC45-{(r!9kCRXw_NeSo(ay78a zsEI`6xw`x)z(`=Iy$Gs$1i3kp?&&{!B zZwRT<=}`*kEA#+VQF$@~8eR68a(409KJdjLN|g=6NHSFHf%mA9o4}tz;fd6pWV3X? zu#5tR%I#GAOgY9tX1IiZ;$N;U+LDUw$t5?R@K0b9;tQ6QvSDy8DJ@HiOe(FaKKtO|C@c)BO*lIsx{YMEB$yUY z?Ca@FBl2)yo-JwftY8J8VY0ZHrH<;E@iYXa=gT-cLE(3Z9-UO1Y<1lz?$aBWdhw4p@}|*98rZc9P8bks`X?6q!t?uELk-=-K4N78!@5xJMI%yXN|> z3fXq{=Mk7I#(H=lp5pPt+svUa@7*U(Ips^Y(*Z+KD+Tk8!^e+6KlsqrVA^M=VCIZA zN95qtxfBmPV!O&L*e(U`J793wDEb&Od)T2620%zDp2W49IO?xYItLQ1^C&+v?wu>enQcrQP8_GR!aH~iHsgjVoZIr;&(vNxPL+d`oSYT;cN z-viP{$TR+L`o&k@X4CLv9>diid$@mB=<^S&zkvHc{{LO*lO^-pxrX4axZK-Y#vO)F zphidgTTE@HAtZb`v7yX5qJX_+vp{U%QyJpphfiXIb)k3E_r2ZSc5k$Oym}X^lf#47 zMe78SlS?`w-fqP=^zo}Hq^jc`N}s|`Pmu#7iCcZ2)@3{W)ZMEc7ffIy4^~uldbD1k z%W5LTn-sdpV1}^4Ii5@dQV@f}ZZnss_s-$J{@$Pyl6TsT}FF=IvjvWL5tt^jKAKH54YjWd; znu;rJ)BJ({d|u4(cK)&3`ft36wsJ47K7wQCAM@6Sf``7y|rX7dOipB+R1TvOu z0*IP`A_8SfJgoL%R_a>9v*6W&RMqPZ;0L*_qR;T8Wcyd+?+ow1yu5wAfLeMx`?4c| z1!7FqHIQUNA^^7RlHVg2rzsKTd2HhA!2_DL2IBXF(3b2%Q11J(fdGN4B&^*H*E)A!aoky3b*6P< zGu!h+8DF_ybp6ujuO^aN5o^Hj54-)3X#ZWg+yCwY`9n|he`j%#M*r8QrU=;Iwz$ar z$>PFC4K?ICL?;cT-W5XIxt*4hqOp^ipr)0n@oSL4?@UAl%#iWK*w9GNNYCKYXAv|J zFcUB|f^VpUh6Z}TR|M1_nn(K4p-$-ET!u#rhTFnFxD5L%VW)!+bVL4#?%(Y^{WrS* zye?!WC4TQRn0IQ{rtyY+`h?$<5{yJliGtV-+6P`Zj}g+P)wF1A!g@9DbpZTCZmvQ| z?|Jki(-Ut0W93&|)DqVP=_!ucj2LlC0Xq3JW3SsR+MRB>Sv$ zNxCBT0<$o<;m%@CI}F1R#_9y19~&fA{YUhRgmN2|TzW^30sG^w^Tvas2@~jx*vP!m z=-SkCQa)-QpYHUj#GPns2VJ^pZ5`n0vt{qcB_Z{lVG2Rx=T9Q1x5)!U2-HMDz*dr7j9W}fpw2+`liL1daX|%34xCPqizSz)>Z?AtNS=wfOCr!bij)No&L+YXrXrbaDjNW6XTE?DUndU`0pMA=hzX&Q%O*oux9Hi7 zm@zb998MT{V*#g*4e;NQ?mgSM`ZuzHe?*r5T8AL{;CG=M%J%;)c)__V{$`VkECnf)#R98YpFFNqQJIVkFUlHj)QSYYw5} ztj3!5yb}OfSGZ_9xWorl?ewPF&2-mJEM>j6VEPhp_%MeKq>qKAN=s3H>k zf-NJKIys~lx<#K$GAvA9q@vrUlA?>?el}}#CrHG>Kpzm#;3UWHLn&go z_=`An)N5DxJqlfoTv+d4*m~H>=&7^2&W&Uq+-9q{Q&KfJk%jdFRKEZp~%jbqt zphAj4qryY#7-J`=Bd{5}51Fnpl&R44AwN4Rk-2k;)sl6%q;w;kiR6`XOk1*R5wA=g zHUY)2HV3g+KU7*X4jOT}3At4F9m;iB{ zuFAl=z->DMYWP_TJg)EeWC2ez{too}T=6<5%)Nb_8sdKg{67tke^)o8L66J~q(P&YlJTFHQhwaRGgH21w;gMAhM3r2WbS|b*4?um#~jbr zT79jtIt?-8?FXY8*{N&5#>J)xb8-}`T8Z6N;%&sE#T=CMkfU48krK=CmdgQg<%X2m z$QK)+s7X^7ElPL8Nt>jHiii$|sBn_=KBWxGnQ{y{ovcPMR1268eZeoeYRrk@1EXeF zZd|jjOSaLZuYqB1($rd@io>{;$3$*4wd#^ zES72^w^&{!GYH8AV9U%(l3OVS67f+B75WIp%t8h1fJ!gL@5=Mc_d)p;VkU)z;pi+@ zT`I8l;*`XTusq*j;io5&!cu`n0N%;W_c6i0HjUUt=0W1IvU`2tsz@5sfyQ5Eu5b4^ z85YonZVZ#W=yF9yQs{j(D3cl6PEMSn4VNi6*l@|ZFG2Pl!>=Xwc)gd3aMsP2bjKDD zIc!%&$c;#mj9F%9p~iMtpU{5zz2~bN;gOqZ4HsTO{H(UDgs&I<0q^6$CO4dqcg4W> zW;kwgyIX+z9`CHcLopIfH@Ni%Q$2Qp6#;#$+UxRkvLQh>PUL#58e9Baq?eJ?8}rrt zvlX*1u*)SWz>NyIbndEphWQBJ{6W+qD zKsqn`1P-20Pk7nl)bNqHVksX3q%iEPZ20au!wyeKN>V;+>3p$ho!384S~R@s-%B?(fiA8I zs9;}UWl|+PKAod7-|R?~wt`n-tli&0*v}0gkE8qHC#uC)+Y)-aC257I%h0uLhOA5U zg%kmKeS_taW-MIXi;{LbuO-0V9=5s9c?EHO)72i>H`M%v!bYPHg6JJj301aaVY?Na zPA%k_nkSl+0#jQlMsfc9*(t1(m>kHyK=NW#jxw&?Katk2;@kSRlh z)rz;A3j0U+p$4I3tH{sDBqk}eT|GUt$N5@HH&Bwg8P;!kTEd5MA=b_D`~*HmEQlS`MxI9r1%w#hAdG;7$$?H#3$4Vb~Ih_TRZYHDn% za+xhZtABA*Kn<@x#xMl{P|(;4pF81P=0i(wHF&CMO)Tr46s+k z391cfR3wyha#YN3hzh~Ub68E7+2)6*azz}|$6n;3q}UI}CmAerdcy7jnw!vhkC+zFJHGeo((SSbO1D zadTti5NhosWz+Di4F?x_HCMrwewL7%(C{S-$4T6>=k7(x%Sxrew{Tz>b$co_xLKN7 zgG)Q+x)v)m;I?SOd}gdLGJ&|>cscEkT`VOo+x@6E=px2YHRg1^3#ESQaxM)^1`Ol4 zP*kc)=)P^0O#P+y^n)ipX=VjQ{0Q~9-f%VV+d02R5=1rZm?#R`CZIxGQ%Au|mTN)m zWGS3E85_nxOJ#O6GPE;_O)++cDF1zQYQp|P)@HoxZp89-AMZxgav0_LNttmwuPS9# z&(q}e)+aZ&1m)&gpSsAz5;1_q9Ae;%G&kb1hC+QU4^BROoG$74V27|u7Haskj>?Ch zlqEM+`B;m}QAS~YN%lsK8Bs#*xOI6|F#Qj22wO3g&aHhL_t01<4j+PTo_^oxGNufX4SLmX8W z^MN!P!wqZwW9Kf`hmu4#!|bF=nD?-M+3I zA#b+Q&Hy=2raNEqi92&4LeCPT&1v4MSoh6|ji#$HBmG#++c}+G)pZHe$C&w!T^>W+ zeO$@6o1ThVMq-XcjN5Y|X{$6Ja*VVomQyi(2{@H0kZDI^$tv#PL=H0wwQ)I;n(I5< z?Gz2_b#7yo=O3!F=PM;v(44g28K$d`P*7p3E5MqqO9VMx@3?OJ5?wE8zQATf!b!Oq zFshWgjK8y^>!CZREZZK~QQ_RM9nR4xIzpE}m(uI)z;p z0?y7xODysZGfgkPk6Q|H(_G@nR|(UQ31@hVEOS*}bA@H5;`ySu{?$=^c?@LioCMK? z=SUIGI1=7HmyB&46@*Vghd0txT;G>bqfbrDw56gb-Edygm~WS;t-G5pu$5>Pc~3dCzU?$W(X9_ca6_sX>sJ2TPun$ z!ZKJQ4_R7{CF?W47PSQX2-!c~GUD~=rSj=Qkxt2CQMNVw5L97E_nhFRTye?=r~sRp zFtz?*>QXWG^0DX@QKj9DewK~03_6s}b&GCKooM1*VoySs3GFeui)nqI7tDIm-mNSk4>O&@YasXeP^?(GqwIycJ?L%w zf;3K3h|L@J0&TK686Y}_$GbZ3)i4#~0>PB*0~82P^f@G;}pxvZQeFLdb$CAq8`!QWWAO(gX=bu|o5LDJKPUuu|sh>(WG$ zwys18!J%sua$=M0cVP@+=|Qu0@;H-ac*;!hlXEBTz%ie5TLBZfhk8$DLQ7jpv2bnS zQMV}OQ+4HvJ=|WyhF_$0%KS0!^e{gU%>}Bt?NC|L1*(|J12ljg96ko^E1P}0tDL%DS$e}D@>#;tH zVhk?}STix&Pt$32gh*F7k?HQk3bWIResV5>8loXy1gMrDttCgQ<4f z74t;!itP94vu8&F11PRlIth47rUQVsSNB1$rUd@Cg$X`hT?N8x@9dSmTNJuj-excNMU&d z5<5_8z60K)J3=}I@1hYLtcBp#1vn6h+Q`Vkj~K#j`!Rg#t$&8uiJu1H9!8x0#4ZFa z*9gOrRxnhZ7epN?ff#&`?n1X$q%2aN5+kv2t#t_V?aRBs2nNwED#uibUWlm;;`s7m z9FEvTF>yMx?w)XfTc(Fk#o0rlf zV1o{#J=EkaY?{kRb^?VhzP71UK^n)LK);xc8}W-ctgD%S)3vAPoS)8zPI%KBWAt2< zVLe)$Ic0(?;=B{*4YgteJ}3Pg(C2E1yIQ>uzlb&xxDUjrOifmqQmuSRqK#>X$E|dA z17%Q)lsobz@Z7*?!IHPc1urychpB9Qxa?+-z#7e|K4&uhR$RtiUg za4;--FaSDVp5ktyCdauGsDt-Y^=m4E=9tDrmK9awwjh-k-dh=$Z(M2G5hM!Y7Chup zkw@}TYpLXfDO;KblM`q0xI9PpE6PoomZJD{R?@&spFN0FN7^lea~U>Lb*gH&vL5`l z!<_Z!aq-0I0L-HKUuK1m@FCMLrpi8g6k@mEXv`Uxxn9!QST+EbUzdSbbU zB-h!U2QE4?C+=1}Mx@=tKBpk*lRdc#W>{qOq+zLPmyhfg+U(JfD>$NLbY4c_(!t_B z#}un*vEqq@bgl+Ww<`7IXz*R5-w3>TBKb2H6sRM2WV;8xb%VS~Q|G)HBl==lJ}@l| zV%KKj_sP6byIdQx%iPFZA-iRNBhK{sTzz?tB=YS`qz@wg1(VtCx*?Qrn7cdE$d;+P z?iuBdQI$WV!Jgjmm<9GL5J2nS-;A8d08QC(#L+ZJV9 zxN&rex2kG;#@&@MtHtZ>$BDi`n;m-j2DHoO>>#zzG_dI&QO1)d;Jlh1RrUXXc=t*d zO+qi^tRO6nwy5}-hSm2)f^G2|sCL~YOe;yeFE#&ukIO{vk4_@RX=J4YYu}`={st0; zt#~;HSG#761jcLN&WodO|(9;rpoBNEaWBCR9|#z zbAs9xM%~3-*o`+WTnR@(Z3U~Om?>-vDXT$fe}{KFltBW>fvVOCsSbWrX<1*$4= zSH^>%FeR2-_+V7sF$|#y!d+CXj;ZId1ID;3HB^NtKTUMaQc6Ew^M)4hKYzc;XSkww}myem#;>;EtcEwV)VA92u*en|01Y#AhVg_9^U}IAafM zr#k%ArQ0Pl_b9KOPiZ|%RGUYP6&VciQZ&u(7GL##T2<3gc*S?F_sqD3P_o7&Ui+p$ z{Z#mhYkR6VdzN?TS2e6%;F4mQKT2EYQkFHgwNzYQxwu=p8h~EPOQ_9H^p^Va`DiVIG@E zI1f3JH-pTMK^eXLmnZ$~LBXJdm>LW}SOWsqF#Z;0ug}Z9dM5}yHJUI3J`6zO3`kLV zpQO7(n7U(Bd&%1gfgZkNM7j21coFn(88X1__pwAqT>e7ntB6S*1Pc|ns)4A}H-N&X zDyj=Jz{;R3s!uZj%OH|i(2^e>?z3uA`=vk9tEECTtd`XhZH;ujP-(!kOo&#qx&?L@ zsy28R26Y!%Vl>ca;G#fnOc#N@c$^pBL6ls3-;2J8-z$AWRI8{SSxf6zE z++LG7WRS$D2uK1os8pbsqZkp&s#B;?RS64qh*-#3pH4W$YZ?zl=^C}0jB zl}K19YbUUgmx)+TOzAxx*4%>IwYD$5vb0aR`ms=yiF{99xurwf>&>O11!DFOZ8CZ%?zA$gAxE%UA7W^SL+w#lAL%tk37I!7b8P z6K6|$i{2Iu_C-2w=O@DY#!qC~E8g(phi|=z>WvGhsf$q46!6{CVAF5fQ|b(@Gtp=_ z1&=_aMU;}z!BYpEG1(oScf&5t@+oCS?vjn=y;U|9&-QG?9PtY0R<0%y)c(8PngtlU zmE`tS3(CXG;XY%<`WPO(p0@td>Vt4!VRsvR<#u{>)b}^?zOe7ThqhAhEfsSCj|pOC zQjo7&oEFs62krUXY>&$(r%U~;>z{1@wx`YPi<|xaS979?;_$%%;nSxlf2`N1|w*m?>1cv zJt^(WC)cBQ3|p#J15o?;ulr-(+26n0>e5wzlBq!si5$fcSO_P;=8q=ingVt0hnZ~J zXy0|=2wbwmp*?Ckb6+tmVUcz%NvkrH0pNQQk@k1=pZ70yA`cWI%^&&xa9lmsJ;;}F zzi-wG;%|DSah3zj9wQGEbrr)9cD2Erq_Zp03}DRu;V*JHN5A-%i0Nqt1(<=j z2OlwdVGKvW1#~@}%Rl#UY7<}!FDaxosX!7#z4w}6eA^s`QLdO86+;q3tcp`9sG!#` zi{)kiH8idllz9bm$Pg+8UQ5X5Au9F6r|<*-IM+!m@B!$hH6Sp{;z;l?i4kAHes@32 zYgy6_KH5!U9}?7mwwwNwviiI8VN~?3oQ?l;KFqSr$9x#=;*a?-!DxsmWf0^Rnu!kb zLK-%#EJ#p14dwy{21PxPD8-75S%eqzBJ^5F@}YF;5BP6E3};L*G-%lx-Upf9cN~_d zjdV<}m(SCXehj~cAP6Y&1brSZ#JUYJM=_G=zf7;$M?+akb+^D^_843Xab-)S#$HHu z69>}C(?iX{^cb;MCWSLp8ksejJM?&1E#dyB9BlI|zs z2;MU?PhtX@1`VWJ?&D&sx6mPr;Z;;)QW{|$$}W%Kjnj@ab-ctRi{`Z~(_Eo;OyF0i zhp{1bZq(W{4n}vbg;#;VePe@-Z!wt0K1#-#RyL%$vA_Z;g7RHe3_2RU*fTWHa;; z*H~HJ>Ij||y+Jo?{HR)j;Vuv4=|KZ-m3L_mICT}Bv0x*Ln^=BO9Q3UM0OrAu!Qet& zFk4V244o6Y(k9;%Ya5+iA!tF)2K)jv4~$c7N*G}3VduW(9uF@uHO#wY@FNj*47y@) z(vAZODyp?_Y&(-Xl$I74m6mIk<=RJ1h;n{{4=^jX?TGQ2(Oc3bJBF*o+l+$4^*@ah zt}bwl_&f~m6nEMM3qHMVaHiAiR)(SOmc;Fr6hliZL^gwq&K5)36@GTWll~+37Gso3 z^dcLtrx}Smpy5~#MA6SiU@@cshoA%}UIM~uqVSZ6Va7d$b$b|-U@1B2#??s#eiI!` zIF>@-*gO$8$GBv8o!{45F53JTFHwOP|2K!NUbD~7L9s%Z*`TeW{$N!89^xGn=OjMH z%|%MOVL&PneuWj{jqhO8_bW;^7l(7dlho zZ#U+!R(m@})I5eSA2Kp;C%80>koe6g_AG`P_WF#<>2@Qq#L`X8@V-sTyN+b@8 z&WbQa@|id+x#Q}8WtuLpI8`sLO;v*YJS3gN@w9VGC1d>RshD2RS-xAJKeR?I$ZkmE z7|hMoI<((@OJYN3CJ9zK^Xr(ZXwQT7(9rl%mPSj8)+oW8SBAx3Tbd0wxc}f+x{J0X zM*8>omWGCYN1M8Ps6xYQ=VQ*G@(kC;6_%?_#W=g*jn)zC(MGG_*U2@YHAiP62HXlV z%q0n%0xu{S)TJxNzR>tXa3aSQdJK)cHa^S|`p{^bw71;oDbG?jLYhsJpf;gIeLz6U zG%y$i6HCVIBP^Kl67%;g2IhfHrDC$uh~oDurKVnu*31cQ{*^4&q%juqZ(O$SF43|_ z2CV6lE!YTVpok~RvHj|hhEV;Jz4SV8%@rnYULM8!&b|xGh)`pM`5RoM#8Q+kex#tI zsRjp++L7vr&qg@?*0W%q&9nQI^+R|i2+0h24X3r6BNucYu3c?`RFjPh3TDg z@ArDsV$~5tU)A?kaOr?WvL2jcUK7ndR9Q@;<$6geX@wSUqJni@?e9|=g~I0#z%F5B znQ-vG^$~O+C{H|x>ck^)Gs{5aW;eLR6UHmoKAb=ysk%Lr@BOf~;Hc%1*Va!gPU=u2Y zA5oBzkARo~JOv_r!@F`G%1!qYE!I(9&K2~0)_GoAdtSR*dHz}Bv%gn$nI|KYe)ko; z&-#AVYin7#PKWBid54EGvM$eCoA;-QZ>zs9ZP?r{@h)3bxz3Pdo?$(>2=L&;MH8^A z`a;-w{ug8C5S|GWW#OoUj`PJfI<{@wwr$($*tTukPRF)wPX5Jg=b1$pFoi3H>c}?aqGTglsIgnx37}e_Ok^>3hA;Q(1i4%ye*Qq$ zZI*|W7Nl)Gh1#~;c5yc-x3W>WvJnrT>|Yd&uF=9xf0iKI%4Xe>&X~I%+K0DjxR3q2 z^Q&*qPtQ}1O9%N;7s;JAa)fg4wj6?U*An+4rp8kbxgAVPz`|FoPODK5=4@)>0@5>~POF#AV-6gEj>ci5?o30;eu+s34kj{Gp$ z)?Oi!3$1qKv*Xb*)fTL(JSdevi9_LwDL4DXK_!~0I^lNneP1Qr}@B zRb@5YQgqp$iW>MOqE^!+WoGd6OWP~)J~{sBdc>Dhs38|Zh(1lVO*#(}Zj;5?zy>Wa zUwP@r{yEAaN0DeZhb1fmcPr`rkMf<9B$h0BL&fPg{FB``@3OAZ^iKq@D$55^u}lVE z``S#6<}$@ssi49oJS!GZF~22UfeUa#xBQg@d0!z_JRya~Y8Lc9HcGKBl_fdFkyQi* zLnmW?cR^!mX46;5^iP>jWhAXuCUSK6B8DO3ruy@;b7E?%T6Q@N7Z0|}ydSek6o#|$ zM1b=nzsE7-SwP=h(&9AT9ywBi<(~k^2>eXgl6I0N)vl-kcCr=kfq;VBFYRon&?S+YT<`M(y z?uXH$(&&vjZgFMzSqwe;4&rb3i)XO79Xs*k6VwO>eTN_H_m8Gh>AbbZf+&QEJ2kkB zm)P<(<`U@>2BCAb<=6`B?$j5h0ly_kC$Y5I3W6e-6X(PTV7N<4J59`U?$XchhTAzX z!`+>u?^U0ms?AOsTDz-tb#!WHQj=`~<7a4d=u?;*+D@d==(IKFl{N^Q*`K_tVXF#q z@`#my$1_URUn5LPajB9AAOh(BXxRHFVp}mulAtRlFfq@?YM@HSY*wPg1x+V0IRaKJ zCJq8@Yr^zf(bsh9`l$#9=L6v+2%>DzmclJGhsR!Hof}dW?v0bHfJx+PD=GBKHXv`ODZ>r3@))(C%I%s6Q*Kl zQ#^9;hUhr15d;~bX);?_XUkP>S{1pp1vRfC*&F^jg{4r(uGADuxCv0q2=0K?`n?Py^`rDfrqXC6c za6A96ssmm6#di-YKxUmqucdOcTn{5Ufkg@inZQ8f+<&UL?RXNa zyyU0!Qj$YTFU$)#TD{`;0kP9oIi3iID9dmeiN1SaIQDCm!G z50?30J-M)bvKi0f#K%dG`^ifzm5G-A9|%Rp`hOo;Q$N|YahoUr+jevIlG3Ug#MV$gT3Er2KNNfA8PkePVqu@hqU-rBvt)0l|i z^E9Xi)e+>CMeCZCX69;aFXe;^XOMa_ZuiUDDJ6rcBB`j!Gmj{<(xB`%jHg@tnJ=rd z4T!@jE&a_==a-P!#C#KOCuihlBgOen77z~Ek;#+tm&FGIHX{C3b=T(nx+Rb!ZsYj- z_r~`z+j(SZHjeZxmW2f7+2|wzv9X9jNtUEyd72!#8^57CN_45IJ5G~9jbhOB^j=k^ zeezY}FnGUEIR=&HpHH0UR+Pg&4f=z}%;>zv72VKsR}1Z~T8?@aQ$uzvQ5vK)B~BZ` zcOj%YXFAbrG%y4Xk*Q{SW@Y6*2h;iXiSdcR$H&e0r%NoE2S9jJ+U4n`qQIQu##_kX zDea*8+jlk_ne~U%Tq{pyr$l>QrZfKvU;!@!WkS@l!kryKUz<<}E!*Bqpjj#a2_wBm zi=>(JvNj)a^n|9&ypj?=rUv=FIELP%vc)U`fr-kEfc*hF(3!MYuRPp*FD?p4suS4P zu%DjIhA{A1O6a9xY`CET0ldZaUB4cxa@Gj2N*u(v-WP;wp=O21NnQcqHtAQrX&Q^L ztB2a|^@J3ZHB2zvBcEiP3yv6z5VvJXlcrx)x=kG}&ZrEU$|bZa4hL_S?nI81ddM~J zN4qH+GpSo8)rn!0a$$jEQK!f-tan^#Bh8qYW?bcR=^h(j@d7|7U{z)+r&5igrds8h z-AQ5V5sGdK(Y{GFLjr~ZMGWK33=wVvbK-gn=Zzv)k=7J4Du0vx4o^nn8U*4=P;Mv^ zAbs1S*sxE)swgjB8nxDf7O__`tk)p;6{g?qH|K;N@Jjkb_Ro$jhRzVU8qau z6$~uTa*&HiO+!sZS+1p4UTrM7S$eWhYwj{Ao=9fKcDJ(6DB3CwJ?}8!P!9m0L)3SB zHwkiFAzi7WTa}p0s4yelk;2rM8i4_H=)(g%^^?Pi%AR*LU#5fbc1v6=kX&5MDt8h3 zOBcAy`OsIVX<&0WP#ZOq77plX-mDr0EdksbLKbFjE5TOG{JJIs`QvXeBab65i&J3R5h%t#upBApsKS*BO2*XY%vX#(k>_+ zYevhpmeMXb9a~1MwdaDP#F`OigfxUL)zu<2b=wG9N{P&g#!Qu!uqKNsDayZ!V2m-E zm?r|AF_@tlFq4*@-5T| z!_A?h=jRxW{Fd+;E6?l*Ax(<3qD3zLriBaz>ornp;SFm)dYdfdHMUR8Oc}#egKZD< z66>(}N^S5z8U-%V%9he=#ygreSB;MSA=PjA2Q4KYr4AiyaJmE*s*g9`c`j4_rO}>X z{WEGxstp_93*Q~i=QK0l;7yESVSf=qzG@eGrzv2>#oTn+NczFmt*`nJ83^Sy3-|va60f8D-!5ZTX8$a4L*Vn;E6p zh9hgg_qBxd{B*N)R&a#CKw+@`1EIpyUH$b}bkd$iih`;l5zOxCQ%dL{WbC}HfrXET zbmmjo+n)gCz7RdN(Eu0W07ByWSo?|q(|!GljJ!O-OtKG37FXb1gN6Joj7Pj zCmbfm#>h8IVMK$gWe2|&$f@ZZ$*Zb>56QcDF)fJ=Y|^9BtkhRP|2aRy$~O?2o+EhN zi8^H*LChUyQ%QWb^2tKaK3V9yG$aizs5DSlZ(@Vk1EDc`He-V9JNs=f!+kWUSJRL{ zjN|Y69DTNj$;nJogy$_YFu!ljE{5WX@(3_{b`I?xfb`7 zxKCO`$D*pqcUPz1$}VZ6XA^1GLDRw~T=zDzw}(+8`jtF3`#97%$wC>Xc+pzl-d>+n z0EH=l5>H4ewi*(_XxE{q?<{O59KI6XtU*7w_R9cD=2iUPUHx9-FJY$CM0PSYRYiF1 zUg&2(=&RuO9iJ0OiW`;>6ofBuAegPZhwa_2`W{xGyyCZ~QaQ-(HY- zO@+$}Z~I}_IdX5?RWII{aWVxDrt+7rK{G`20P@eAutxZ7lsZ|D9{EB2a30$HTA}4| zJM^S?o#c1_dES0m>eFw9H&eF7cpj;5Tz_4Sc~S9cL%Yl^MIj+!ImFCFWSns$PE&qS z#F}41w~i57v!yJ2dRS)aWmwI?x^1uz^;vpc&{xzC!14tpBCx=eB&68RB^Gdpe}oe9 zD~=AXz-A1qET)Un!P~Ray~$Ozlw_@x9m!HPHUG2+YgNJ^t$3AR@Gq2Yi&~)?I}!zM~=z!>?2H`74Oga&q(bkVr^H?v0L447>|fy$ZFDc1$;k{owQMl6k5N8$co z@zHbr!c#y86Ty@Mf-C=<%?UA+fkJ-uA_F-sf507slY)+a_IQO|@g1fVA%~(n4(cCU zJ?tb&prQr(u>F%`bYCzJY=5_Td|-eztR0PWHveJu*cMcltKhI5Qv1Zg252n@t80!z zT(bydrP?^KngIo|jd^zVUl&%R2%(XAuS<5ZHAo&wPt^GNFyUqDy(X+rUkAne1>mg4 zf_ZKs^9rUMJikS`p$mxK$>zqaS#KgU)NR5$#rveC(>a9I}00 z%1*A`Cgu<;2$7J^?tJQkSyG!apUw!4q{;gBtG6L1FIj~ApU25KJ*y6iBgdXv?!}Zd zmhM`d<~ruU^6Pu>0&(%C8rgpF*6z)Mv01Z>+orE@y?hfbI1KmuW{CiYDmF|=k8yvo zrad#4H;1DOzsbNIWiZ)^7J!HNl6pf1KF#)Zr{>ouupSAh9Ca|e(r|weNd1rio`DQ_(RRAn58k=Lvt&F2ib#pe>W;T`ri5wXWq5Q z#ojnzT=>MPApW?{ts6}ot)|NdWPDSO0*Ptp17e5kRze%uRF+<|K|oWzM`?++fmF>$ zVTm%a2~MEd|JLyX&X@R&2sgQrw>xBt>3Mz?gDSy{ zbOHrA10%xAvrQi)s>uBf2Yvh*%$XOV^#g1(>06-sj`2YmZmynF3dj+xn6d|96g#*f z$XVUfV9Xyw^lIxI+mRSB0Ic`0OU}EO5Kb{sv|aR7h9h^QIjW z-nIxsI`p$=B#QtLSR)#>ch{#R4P$_llKAtb;wMvSNoW~L@!*h zDACb`@$t;#%+>Qn-9Dgv`*lf97eZwfDiD)=j6mE#q(|zEpohGyVvT^ZPf~~-i2d%_ zFMmRSs6}S!jSe~G;dfmK1G!c@M${_`K$Jse6Sx2SE!P+30kYRgR<$7IHf)#(CNWMv4q_Q2-&;qzo7~m_ll1 zx(;?X4a;5^aOLPHuTj_pM~8`oPDv#QRDgBQovfKpcx#7HSdm_$X0s=}#IoHu3A)oF z1ssMuF-fqKfMQj7&vBjX>dUtZi96v4M5dNaEzPFDgpU|AcqJP`gEU!64P`>nSnqs0 z1_T^hjNh0E5mqe@1`KviG|lM8qO1;N&HJImNWbdV|&0C>66Td>QrWf`VI%%aBq3y z%wYB($&P#lzD`w$Q4=^vMH3xg7Z7Q=vs@)hFLo0mYS~1th&G(_*>APH>VU_yV7s<< zOrHWBu$FSm9XMy|jS`*Tv8Cs`_W7LQSMsdAJdX@7T^{WEtI9#f%f0Jo&Ntm2++L+y zK)MyI{q&EdYctyc#U7NyWj#Ye&O~tXtove*F4zTI{=sKiYK2?TivK)supf1_^0z{r z&pum2#;aBF`Z5+p?GxV6uL_?7AHF!ce9L+KzvXiVZ5OFu3ffV8Ray{3xQCRS(H8z~ z1{r&*)xU8j=U*k!%r5KUKmd!*PIxz36&4%{VqJS!){-f zk^P__ZKj+n@e$=ifkci>*e7Zd$q8bKXa?rWW{J$0IzYCV%OTl;9ATjjVmn(6VpB@r zbXFMDS1yTStIQ2Y5h9!(Cm&>=5y95jFWwIgR`p2d!;TGP(}`4R(PTqRlaa@=^djgP zKh}$O2ik6w>Q6@_sxzr^DqjdzoFY+a4OYAqGcXL^W&7875!6G1a0KMei3{vPEw>=p zspy4zJvO2L4$cfKt8}Bopg!iJ~Q}lQG$8Y z{7gi0pE>-uu)sWQkxo9t`L3yd1b={&Pn0XwZ*Xa$76fkzZwQx)>-hU8S2q>!9KUPb zn~*nxk6a(H(0{SJrEj)QWjXoPJ|ce4cn*u*y?tjXY)YTMM=Espg&wh}RDAJs-w}_? zcoA1Vqf*L3ZzLWexAMONTFOLkT^`wZWH~?3m_Vmx;7{E96fkl($cSg%itf0*R!~I5<>E*-L#e(F8=U93#sM?k&rw zLLF4u-K;(&uw0mc{GC&+UHOMhc-3h#S6q~QA_X`U4ygJcLE@0tP||XJ1uSsY(gEh7 zmF)tNYm4XMUJ;2%$GR29!Dz}lb)yt;n_Xah$&t~uzdww`JqmC}anavtmVwAMGSsG6)JHJ~3xnlL%<6O3vo{>R8)!a!QSw|Hveq!g8e*>;Xn;|4EUs|kwa_SJMS2Ro3XZdu|${iemCN# zt}%EvXOMas26;+%-c@+|L{wOk$CCPB)JQ`vC4H`#D+KyhzQ|wlf~Y|e0q{0K(ndml zUtOy6>~gU1h~79`A+BNFpx2Ub2wa4B=B6q!lHoK%nEb1VXpVCpE@d<=xmLeDw+~C4 zY*n)(rJ^juEQ1PtR0Bp`rKX+b;Bp^qITI-FY}sIr&306ig}S+kqg{b`P2~intQ8mhA3$%jPCr={<#MGmpJqu zfi*DhGHQH(WNMi*Lr-%miN(LlNR@%F+)G|bxQrw5-QBkxVcTB3dDNkxG4}ZCrGPJm< zB|`hBY|Q^g2i{1e)7jJ101f*Q!ZwW1PwGg*x6a*340vV4k2p5i!ymLk$|~y*b|y+owkG< zRnPoWSa(ST)Xb+JNv$B=2(8Kpgi@Lxv?g+G?t(5%?So`!e`iGj@(=F^=I&PO@(&Bd z>3ftWc`AQL+W@Fi>PKJ_6f0y>y*!jsu|rC%1LEWxigIu&;&Jv@bm;9{4Sm8DM?V}G zdH_K_{fQpf_mblD0GQJw41HEw@m6>7`48~?bEnACgo{P-WM>Y9CXS01kox6)b7?k{ z6v>8n559FsoS;=$GpwN9L+TLKO@ZEfATGGQP7COZzl|VE{W|%0eQ0+=PnIwI^IbPl zqa)Zm?uy{#M5LZZv-a|fwLc~f&BgDU+iYJ64+afDcC_~L+Dsl>G80M-`LA@x! z#(;PsLqtGxxxwt$2L9%9gWxoc;A?wgdzi`g(p`$97g_t$uc$9U%InZf8xr&gZ^2A zHVT8+Grh#e*hQlk^~8X%tl5#fRL(5zicxIjjVJhmJhrfnNA!t|y2%-P`3jD@UUv(A zV(rxMgs55A8d`7i-Qiw)ck6kQ^-3JKUEmqU5qd`SmU#z*H~S_H{Ghmm;vI2qq`d*X zl>88S|J}3Dt)J^HD)Q=yTJp)~*`KpwcWKHZ!Z}E48LA%c5;XNMrvARk&??U*Q`vmV z3PZFxxnqT+x_+W{*k^#fBlg@1f_25_%H+K*v`e6K>j3IPmn+VDJVA>ldq-j~yjsZv zuQx$E&<$dlZflM?S+hEvYg2iWv#t1ky4uk_#FFQFPouX(T+oZ!h!ud_f{kP#3u9Yo zHj+ka_r(^LA%ZSzwoPs1k{_|gvTGF5qDkI}+Mxq8;ki=RY1*=BK#nc;B*gva$>0BB za9}5aG&NNy2z zVtuYIHO+|rr-5uu|F5DKp<_82eOH{CGy}_=9@d7Ei|<9dYH-4~yzG!^nURGet%hdq5+H9fLVb{EQnKP8nx2J-`1=C! zck8`3xs+$&YTdK-ZU4o}J4=tX%SX&=-JWCqWXudbXJp|;{{?YSqqG@4XGjwM)HM3q z^-%D$XII&GRcpdJ2GRWVB(X4ZLCBFTg(y2Psl3C5NIr_lB`KDwZ)|~BEsaKR{H*6-+)liokdeHg z9H@pDAo4@dT`eB;Kd%UC)A#zYc)!nI?OOd~{W4y}T2o>JxI2NKZs%@1dBAn{_jh1< zaJTkM`yjjsKQd$kX510ICQABeyfCqM73YRZ@`g(aLQ`^MXYhg2g0o&$R{~QEP;&y^ zKeVt%W(Lf?aHn^U_u0L8s`pXce!=fadjW3^qwRHd{#fB7Z|S?iTJHXAOjO*$ccQ}I zad-tU@2__PZ|_FG{L&54y+3>b==S~1nPJl%h=0{co~AVsJn<&%k}Y^1^1{&^r!{u= z;?^8ut&Mz5x=`>z=?KuBvN>3;U4Lo1aPjFHd`z2V2U$M+)+Bi6<4k5_Kl5VKBIPI) zgX9cH@*gCE>%?{0X-eC9B!(euVX2#S6=N(#JZ7Xn6TNgYVATt}$V2DMQak*H;25b! zj-FW!nqf~p-?`_7%%L9l;h^+{hY?u$6&MYonk`U90O^U3MU z7>y~`(#TB*!mq=17da>vhcCsN$W;`67)}>8=q=daweAqQv$Z*h1bMaras?aU85`tz zpWD@I-O5U3b4qX@+v=OXFB?h-|le`VOi!*75MQiY@$QPy(p< z6(2@PsMrYOn9rvZh1M_TuRy;?{=GU{7|mKdm%wgsYqDLguEEQyG@CXP8U4m)&_0Mx z`0go)J_Dp~6i7IA@n8OYch(6QH(?5_sRS)|d&E2Ka=Ce8!(G?ejaIC;Ft9xLe&7=t z9q~E0NM1e$M@pT&w&gqMQ)=KT9|7r}AQN8x1|@v@b}V5a8X0^?hh($WJvgQjL?&I5 zLIB;lb8cEZeL<5rp$66(h|s_P+BvIbj{T_kHbjMBdypQSLavE}C$Z@ zsvD?naNgSz$i|j}epe*5%Y5ffG{x5iuocfbvN@_Rn5W(MTHftg&{s#c^lx0UJKi0q zFA)D{V6F_j!QWp&;|szs zyyk4*0h;6O`iht53-B+{b*TrQ{Ma8gIN*cq3Hh6LeZn_c&xgPWJCrH7w)9yZy|-?L zUe3Cd1KfUk?l0)EDJO7scRkb37A%r4J1aO>-YQ0QO{itexv|KJY{L-3)<^m+-X^eq zTijgH=s#fp-JrqvV~MmVNc29HkA9&Flvga0v;Se9;Oz|hmWF9-LZLC>j9x=f?AhY5 zv0AA5OKuV1rspfcNJ=ZK=t{{d^R#)rIcL25YQt%r2fHo%L63R_P@76;gR)+{tbLZAUIDsi>d~ zF)h0E8zj9sqP&|@fGpNp)fC&#rh}3wZ9+<=D5Li2UfDn8p?hExoBf=Xj}6WCAbj|j zpjV3PXYauvHBX&dUcv`8$1ZeyR%X||C0?E^)s9#2i41FZ4Dyg8T%fmt)&OphU2uFJ zlg+Q`4{E3L)IGnuhk7qnb`v%wcc5h|vu$^8f_{3EMe`W)W9{AwY-AnXg54MU$7C#= zUf&+zWIM;aIoI@BUA6dvFHp6*xMM}0w5Za=>L&J2XI6bC#K!{;X5Mo^tP4J~=sB3; zEsahP52ztLTeMMNCgF@EerH6#UJky)4e5*-@7(82W}!51WvlsDDmC0lA`j<7d}f3YM1 z)qJF0nGQ^oPzs<+Ea$iC*}@K{H;2`dY47Kol$%!0uZ{-=H;o+7_i$YNw|F0#&hnY0 z{W-uMSFl?m#}|4bRPU^){GSP2tfL7I$*X%**(spy*>+X zP&_@6b|IXs=E}h_YCde@f*0T$yO$pzjL<>1V8sTBfe!ZDe}eE*Sc#ElBi|uiwgzmN zx`Wu<(%>+)t|pm^+%o^ty;~*?dGRpPM!wK@_y^3K>*1 zs}2cK>i+e>+1H_Ci`q~G)gIB5g*kf_WRzTzAg1BSM>$1Qc2bbuP;|P2dNeK)V*I69a7f# z8_iErSB%9B(P@^FKv-9b#SF^mM7dESS)f=4l>O}&omx%ct_L^r8LV6(Eg7FKF!Wu)GG**+*< z<+`xnb2a|gkCwZ&KA-_#SWbw{;lrn_2Jt=sms#6BI+V!n=vbcJ+2>$HV)r`6uv34R6@YIs7jFr@oi8FW7fEZv@^+ z?5^qC{!7spm`{OipYB7|WRb05dUuxl15OkRePwKS*Xi^?}nMkVin#Dlk0 zb$1T+Vvcd-WdjY)Zo~LW$StPN?s@1$QVYx(>Ay5ZsL*$Wp9V|XpZ4U#h@}q*B`KuyHYF8Qt8NuuAX$vktmpfB(lvTi`I4)%gb7Vp^A$6kV~o*zQ2pX^gIU|n05Q55#L(P_&i zK=qR+J{p}epht6K_=>_qbGXo$N`s3^5^b|O@_EtARq?)Kkol~{^pyiPh{i67lM42h zu_>XBNr;&URq~&S`6xegK6q1%2lfN4nOqN8&KHXR53DV58-dRU`t$KxFY$}JEr_>Q z=u?#ZpKplcqu@ci7pU`_yjmGv0Wde5>dBNH?Ht+I!is6@&M!fKRalfSGqW34%;_8$ z*tFeYw}C*45DExSmQhf2)7l}@>FW=lz>v4r4dAtb^MYM&nr5rL6203{f`pILf;}%H z9kw+G8|?5=Vpk`l(SI(~q%+d^tG3bd)rizQWYsccCHFd3jfqaTnw&QF3XBTiWNbF^= zB8vB+$ujN<2MrL+q^~P7Ve9TK8wY?+v^VygR@q0<-J$ALNSvXN+=j5|sCu?0YAIH$ zhE}^k!K*QE{j@jDjqJ6oLn%~{Q)-Z4((_Oy9X3M&RGL+=#pDTGp3{)p3g?zL`D0fjz z=$3^8#03B;Mur`&0%$6Rc7dun*kp`NeUzeQEbiSuL;xE)k3MM;I4cH+UATOBB=40I z*jNm2{n$CYY{u!Jlqlev2zrDq7Vti@VT6~Z(a?cmIUz%I2SYZGD6hLj5&d*|zaOm4 z$~t-|X!I>x^w#l$CrMWgBk2yfA)$`GRR4JtVdp>c%MKs6CXsH8mk zoB0T=fAvF`&@>oGx2Bi*T^Bm!q&r!ipnC(n@K52o2w1)aeI5{dQLe_;rU0mvSyuZU z9ljV#!}+qFK%SLYhxdIPu zq9AQ-;ty@&aCFT0(OlvF-M@kw*i55a+k}qTRF~VEPS#%G7g|C(NaQ1hV`waxl}Gel zLL1p*DOsY;5Ci`9^s2`pzGu|Sxg9aXZMagd zfd?YzEXX985-=tD#~P!JJR*$N;Ji0EW%)^mF#Hd`2toyK}e;Fn6`uAQ({7ZGjSPP+(Yp6=`9$zo;%>lyo#!2<) z=C8h`PFv802E=%-41vxmglqg;o!NT)kVspQ_k7i^l@{_UcO-B(T*7Yn5rN8It+Qz( z4T54gWAW{R&fFqVWkiVpVBsGn1&hTFg@qoc^EjN?&5C9Cf`&pDk~*q>5z#YKiER7G zRl>%~)QZPN38|Unu6uPbH`;}@HtK|l!uRvvqrtO$RdX?2qEp@2Hp1k_pM;HGo(=^l zSJXxggzO$U)=uf19&7p?K(H_}qaW2`KOupn3*Z&LAKNqz-E?LZ7-t@6VVcn1s z#7Gd~4O(Yww?bRMuNFgz{iKbpy0!xSFF;?KQg%ejvAn8qL%FW}UxWo(WA)NNnG3;T z^>Cz2QxlI&6LVq*i%Qc<6|76Swp3fnV7Ure`M;KGl^rEb(YBQ4Rz3a^Hwji-l(G^ox3PI%c`6t7@71J$7VN8CjQ&x|z~b$CT{J@sIfK0TW{Y zWL^?Ydd?AEt;C5j*h9)T!u4vhFCOSNJfC@kG+x0(ON*_9{hqKbm;P}T-^>vz(1A?& zpL{(hroh=DElBPuVY&!j`F+&fsXW)R^alQN4p0COxVq)3euYbMVNPD$JT1c z71#oKuU9aS?WpRZB4B}3QcIXqaAPn@b*eSW#W7tdy(o;Wl#n)txrflezd_sGLIg=m z1k1=ZfH!tYAAdiB3pe*Wm=LB>eG4w!eIuy^q7kSEua8|G&hB08RTEKz3z9O4i#eAY z^va7UmjcDz#vWXS>N`ZWw~KVf@Vkq8L1B=V$VHGLEo9NFMl3AwL=Oo^?^=<>W z#dtP|+$j#`CpS3avtZ%toE!b-N0Sp-uyQ#o|0UcsNF^^)d}vKq^N|4dYNB|Lx7=Y4 zehFEcF$nwyd6A6 znB~f#b&>Tu#l562Y3sChOcTV_(^MeA6_sl*3s$qsbb!W|g6lF16r%IE*K*5om-|}l zWzZ8B`*AwZrdv1 zc7&;Cg?(8Is;WC}ZzTB-=(c021KakYx_8?{buVyBxpk;xn1AENa5n8p>ozb(fC2Z{ zEm_oE<~q+Z#q?@iR9&Av+XVX+8`zq@Sa$E6175>SQ4D|n%hEX=dMLkgI>F}Zs5tK8 zkg9m#*o{Re?#3Rk@jm)^Q;g-jGpkme@w`rJ1nShkj@@+A-8E{X-fCLuR)5qfTbI7* zcVl*am71R{Nk5@h8io*5Q8z$|KW0fFhAAJ&++eGlScIhCVyn=x1tiwQ?XZbHP*}u> z^cRT3fQ?I#xUD_2Yrk|A40orfa1Tk!*|2v*j7Ru(lL{56{<+Sa0ozKekVti5C8#Amaj8-e%-vKiDCp+Le#kfuw2 z73Xe?Y@bJ3Cv_rr!faKl17sPijoWXhcjpB;jTEMUmXvs1uaU1rs61$qsP`2~HlxDRN#uieC!4=97L-61jEwz&z{nkT34aP?l> z1AN=?bQ{mQ=!fr!zrA?-gtlksHeDg7A8|r{bra>OP0!Fp>|cuZ<(i*3cr|g}$cSGD zK$Kx^92_Xi&q4GCNaRZknId^Z!nmcL5JkYnB6&+O?%3zcB6!E2pyq3a+Yr4W&s_?+ zB5aB~q0MayJtKVL%ykLnkiNmpc?&%QypvBHg>uN=JoA0=-#~t(i9gSaxD#O^-Tc!} zhB?(-qD4gSBs1!{R4K-Az3RP zrzN-xnFshGe5mqDDaJSae}3m@@C5g8E+8Ps(ElszgZBU7cm9vRz;9&qzp3U~%G-_r zW#q5s*SaYe&D_5N5(JHejeRVQ2EPwg>k86pDCj{q{&#o6Q%bH#d}Ku z=z+F-tHloVW6PU)3|^E57@j1}9S273^9a%B&UO+5(Ly`+H~jqu*ar5)P@oTo1tho$ zr^^2-vC{zKBr>TPcat7g$EcB1vpzLZkc>VlPLjeJ=n5qf^G5p9u@GWx8N!`k#L!M= z|M`42L-XgIBrzHTi7VRMTUw~CV*yILzx=B(Pn2bZeRHdB~Kz0PUoMGcrSRnk&&s|T?X z@jM1@%}D_r%JF1j75pISBil0!%h8i~@c{cW-*EQW%7*-M@$>KE@wj_?@8RO=JRt#A%|QW_8NG_%IP@rnE2Re5fIso6 z@o@_Du*GH6+1YR-3nE;G=}LE%Y2j1BkM}Na^9-6qh@0}JBC%Q;vml;gV}+-&BIj$zc0L-5ojMXK zeQBTDa1!C`7~@LzrBpdhAwF);J8(LL9E~sA1GSOuKnAP+Kb0Z~nkQu?C2%%wZ0$dy z1?m_iOtUU*|E$IfaCaka(cQii&2VY@Slop(bJ*A8`PP@N@ob0?obnSEz17W+6 z#dgT+ugoyZx+~-b_Q2e7#!PMOB{`CUMxxcwho0&;Ow_!j#{WjT6lj*oGe}$uO2-r0s16&szU{EhuU@@2 zKfeL*yf1|W?_3l)@4SfP(fuFV-Z4tBt;-V4ux;D6ZJQak%?#VNZDrWDGpr1o88$ld z-tMZd8n3dy8n@n`eg4n0_Fi*ou16xbid>P0Q7a@zClI}&v$H@L*=~&5!i@yGJnZ6Tri>#o z!y^}|86n*u?l?NUzxGwYP@WrK+|fRG#h|@-RSu%)yjA?X!EE*H0rvdNU4Y}jXGUbE3*m8Bfp~_qy1rbGc)z+tvi$P8>etx-xHhpD#;kHNlK(A4J$7)_0Jh>0 z<1rh2_850e(6dV4AY$7h^ZlAgXrezv z4Eh7fi;qjc6S_7hjKVN1yul?e$B;&7{7jebI_!Gp5qp;@4Cdog+X)Oj=g0O*!KXa= z@*g)}GyDthzmk)Kxt*~QD>E^}-#QmjirR8W3h+G5+RHB5g{Z*Ymq>Xtx0I(j7JX(7rM zp>9VPk;p|Ic+r8UDz20u$#Gugygwqp;uNJ@INiH*m!^YgsYxX4M0gsCMIhIH1p=&3 zc?IbRt?z$J=f9ivbn9a!@hQRK5jveQbkOn=5ZJSXY$g`T>+#(GImL*-pu^E^D$|Z2 zw|%$jPL%LL0TGEVxftLZ8OLL-ktCOPf9+Y)BW)~?`4NkRsxcyRxubFA?(H7;%F;K3 z?9uCOc0o0O2CEKl6;fa5Hk#}jqE&Op@q+yl=1`satIuM-k~Y{PKVlniBL+A*`=S^D;s*yh%P%^oU-!=a_Z z_l)B8FU>J$EmsJNPfeudKWdIK|F`T=$k@v2f19GRw$j|EZk)Ma14)pgGG#tEAghY_ zQ#qap2$YIsPzWH?3440+T6{dI^(^2!p{^%@sFRcE=9@yatI)ixn5pLxPdnS;Po4*p zxnF#|J|OfVp5R`o5Q2eyLh&Ih@I0w-`T!AxiU80-c5n~wj5G!k0|D^F+{jnrzBSp% z_8 zqh*cI_itTj$1chwtY8Q^V;_W$NV@r#ATA0B;qDDn1zV^lHWEcGWV9J8c3W$#HCbKQ z3i0pBeXCG6#)PTtHp~ai->a)$wHl;GfnE5v!AJ#YaOGLTn#Glq^}4A&1Fa%s7|j$X zJu8J5Iqu`0W4*W@J?b39qLyY7TcW6HDk{lsGvCs-djKiGuT8Yy9GV zNd+V9Lka(olXGLEzDmd&Y4(~n{+8!6Tz?;c*Fv{Ew17$Ux{#@z#b|^ zneF1?9N{nPf!j!rNMuKtI9x&xFR-Ingo6Ygh!L<`D`j#=96@}PHg?v`pK4PWf+a`a zLN15U#HU9EAJ=NfqZE`nFI!oL%P_x-H97+;6fs=PIVPZkHfj+?2LUq(Kk9_Hpb)QE zuRha9wRbR4lF$0g;0$>GwVX)F+9hQ4^Qtrf|B14*|F^D+xW1#A(7)}4j#7}3Ll%Jd z;(=c4XUC_-qpkK02ixlT^4X@h8#K@YA=rnBC|hXS6i`2PADfNQm*O+P*P{^?ueSrY zCFZv#uuMM_@V zBFYOPbp$aCThNJ7CkiOr6lc6&BRb`zFv*|?WKPyveu~P=wG)kkL+6=@q7SNq`L}!jmm@3v<3b0xXAi!OHxt6(7z zYcu{UtHJ->Un`qA80#DTM-#|W)Ry|aop5(4-P)qqSoenz+^W!5M5$PC0%5;}0K!}3 zUCyJiHmv$~h3Wf%6Oz1KydN>)7Z$qea0@*Xt&Z%DzcRUQUav2Ka|4rr4Mr$aM~)kT^1oXxzfHZHj-9zR~?R7YolqdCmaNsopPQ2GQUT! zbNW6^*J@(lUXa!Ft?x24yF3FY!=tNEaab2Psv5aN+Y#C(=|kzeU@MaIa7r*cJy6If z+>D95NRWSvzj5&FW_?AfXuSTz1KF?(Z+m(su>+r`DfxRFXw%WQA-HHN=?YL|O1_Gq zDEd0O|GENyRvrs0Ay`9v8rw0ZSFUdqAE6Vg@S}^MSHcFF^U2p84`I?Gpf-_o`algM zQT7mwK4=kwy<9@Mhe>^eC$--+Om1Ne+4A$Z{=V1$^>6)G!tj6lt$%yIEHy7Hr6rV) z&z{a{Z*H>MC?HwDCo@e%A$kbC9)LDxf>?M#{KBuk)gHCF{$++G)2zgmMcR2UW=mD4 zi;X{`7$`B#Pl&;Zg@uCNMT#V{nq2Ts8YIq(%~uz#vznXletK*? zJ!Uyhb1XewP2y<-$j8h9*A^hw@QEOb|OHu&V~IWMA0umqJS2C z9!Q&lh_h78-?IFG(@L)ZM<#0>*n&j8--=J7bj=kE6ogd2SWSu%*#c?J$stwm^y0|E zYDLhlk!z^4iW} z1t*LxYxwuhB+a)&T+9MU;k%5wNHI=Z8jL){_{4T8)BJuPp zX*)_uPDmoP{^--^NvqUsv_RQ>8?u|209}IdIgu=k`o4jPvVC~CRmEM3Gpp>R9~16z zMS%kI8T=!$mOe6eN`-iQo3Sc zu23g}u0iV{Wk!+&&gYkocmQj2YQmL)vBiKq4Jb47w>5eqbNx!TdrFx-$ky`ZfF$z! z#BKZpC%6;lj8JBpH_t3OYP5+vb#`w(7ml0=*sTWMcsk;-dLehVcv6J2Sfl!F=+swG z1Ub=u36$yW48nU`gvF0;uj1<#O`2VIl+7V1N}T}+!AUY!(%r)FluI0x%|Qyo!lWe0(cttxJRYIlo=bGvhu+J@v!Xr3Xqx z5t#=0mlGR(G9*Wf=#&^?Segm9N%y9#XJ-v&smrrz;>LlXo=m|AuF-zkwp5j-f>eJ! ziOb07+=JcV@UFAE-_h4$COvb)gg|iV6HlM3cAk36m0s`lB~ERy(@R(>|*G0qL4HD5OuqndzfdDs4IPdWO>n^=V`-& zXbj~=x1d`mvZkFEPPrMJjRG+-rogA%iW2O^172v3Fgv7^{P!AVg?gKK=ro2T< zoPam(O&_q24Xz(8dJ3P~1qh}RdQX)Fgx<#o;gW_C)y@bOS zik8I=jff!a6|;>B@f8K#vbrDR+_z;F@Iq|Du!k12bCyK9NDRTc5ETdw$=Z}mw5f=e z7>2_|uZ60A3Opo0R*k_ZGbWHKFhf+7_1$ug6}t(SIwI@epG9i8Z!C34P>=1tuLrHK zEb_rZjRRN;(>Onl4{cR<2A`}_35*&f=Q)9X7DMj{GNxUk#wUjepN6Zn7Jb-r+Rgdo zh<+fdn^PcD3h^fO%b<|&|5TgRRu@$%-M_Ku0*lM?RJZA)^3NApa@VBTIn+j-G31^Z zFsjJx>2qA8D=0lgM3vFII=#%Y?2e4bbv?eer)~`_Qj_6;8uwTU#=^zT#o$ijPU4q| zElp!+`k5vWVv8PydpY!3M8= zw#U>;@#Y0e*s?}j5Q&hY%RdNAIc&cK(xUzZNPSU$ZqU{C3wM}jSKS35-n}xL*I|i+ zL&pbh`e|Qnx}F8D8!1CR0j3<6vS!+@E_<)r1ikk)dB84arv z{8#f%jKWCncipPtUX%NDwk9~LNoJ|%$Sid0%X&EbX-=+$?=-&{rZ9pFw5 z{L7aJ*gtU3m7_?+y3v(c15z| zlAjx$d77KT!c^K3UcaE(@8(7jYC;moQSiPz42rE3%`t2pX*oF!dFU^Jf=6Ve-mDitQjU*_8s`KnNKiRJWhRWxNY3?+2)*OnoT^l7& z8su^F3XRdphsb>SOMz|hS`U2gHR0Y~2KgX>-#j(-ZXY==2KeR?D# zY}f5wo0hBCNC>vFQe0`m-K~e{_Obnk^;G{M_qm@Zky!O3Z44nH#(V5!bV9BLDCS-0 zv=cXP2wkbT$hi&~sC*hf#T9&sYNnvXVT>a^xebrf^35&U?qW4EwL&dpG_P9y-MUP^PE*`Z;4r6~o;X4|^yXDYHg)uxh0u zY~iIV$(3GZmQN21Q!`ae|6n0H_b)nGY)Pkz*QH2Xs8^Nb*ERAwc8Z3Ae~F@~TVYOI zXpA%PR%DtxF^XmnIAzk2is6bgVYW=&?Pi;8-GIbaF9|bli3mEQ7@J2p#x5i@f_k~R zS@f5|L)x0yV_s;W80$%DWL9c~Hn1xEaz=?#Wkt)YzMs&lw;o}X*zTavlAOK(1qc{t zEP|VrImf6<=SrTYxxp@QHIh7IntlR^cFx_Un#yS~p~wWM#OYFHmTr^@4<9y6mfPk;bh|c6RlxY<_%|#{_vYk~Mb!t+Q3u_H0LT&xGf# z2p?3_WeXJqZ^#D36+dr_DAN@&2GUF$Rec6C3%6K#GU%;%)Z_Hx&TV7HN6(GBBC8O# zJS$N^duif5Q71PRf`U`%mvi z`W}R`WY7fKHVBh-yGi!)u;cHVgGihy#P(&~s#a&QjHP5hx!0}Av1{C4;%c$jFJ$iv z(I`d4z_%?aHCe9|h{;P}7<%hFY4V$P)?!xjspZ>beB~f%{EEgC;6eeOhl0qO0GzH` zOHN;Huy+WPMVY0Ox}=J4W-*6d>~`0Bd9*qz@S+q)(3%y;f(v>YOoCvP&O2CA=X36) zb}HnseurC~G15GbWzQh3YeM&iY~~vS2l2+dY_5ctZ|e%2B`jvGDS&j&;55^kA-iT~ zo4bzF85(SMM#A1Dk?5=4AlVS$Rb@dTJ73jORgOj&ZnGWP1wI_4Lb+L5{xjNyw{l>Y za=Sb{qJmnGyIb;s1w?f?(K zsUP*BA+M8%$(Dhy?~Z1-Y$cMHfr2}`I|IU$#6^s1EIMr(E; zgNQ$0;|pn*{caLL$<@D0*)d#?=!jCeL)6;pb9ta}t)Mv7F2(BA5RG<4d7W{g8*C2h z-}C&O-sk~-;Wb%-vkUJGrffUZ-w9|Vg((V^8MJ^s<<21^oKhM0S96oOcvEq zxgZ;6EYNJdIB=oL7DB)e$CnHwMTiYwdOlZ<5}}aeY1Y;<~GAUY_?JNaLO{Cee@dy-FLoSk$W^PBcH~ZE)+8Et0SuOwj4=%d8|UB0s9|a3vqjoFs>>yH_gr~ zqjZGqanwnh)?B($6LKm*FIbq%n-sM^xdCr;u;K#!EF(Y95n1Dqo^h44I%H_Y!Nhb0 zPG=&_u)?3B>GI!3AY)vWv7-mt3Z{Ga;gaxO=nTf=#@;mKYhM|3(T<<`v|5$DsSZ41 zWh0JS{GMsQwfDS>lzw2zobj3I|5D)lK;v3Qbg6%Uc&b<)uU5eG@m2V?=e9+`UPC}^ z7%O-mmWVnx*#KoO z?t*PAOwr-%bl)+%F6DP|(@~BFK z0=yJ_vUt1VhCZZeA?lPCOD63wF6_OyD@(k5+?#z1$$ zw@(S9#)p2lSf9jFG@jcJ-=k-|-(+CP-=;6=Q}!PT?oYf+7hC+2kL}fKygJuf9<_$y&1KHKjwe#7JZu!M|`W?ArxY&SAOew zo$c5eXi)Z|9x83wOCjAa4Bitm69wgU0>GW_=$I!w%pBO+nKuvx1&Hl@<_L2?etwtG zC>te-gl0`-_l=74i}MQu07lpN1QbKvmzoq*m40-n2c)nSo!?>^gMEb^;J0-S+wP@} zeZs{4Mc{|frIV1t(cTlqw+I}1Wg${g5WTRGc$|`cfxLJQV5*atf zenvt2mH6}It?pT25(FSjTL_)^DBB@76OD@~x+*v>$NLG6O;?-x_LswRR$qaMqf)UP z%}99vqb2&>0>A)Fo0HoM3R?LcRwSkr3vt-PHDI2{do31!HWH0pvfT*af&#E`S3gb!xp6k_O^4?~@g_X_)acAIFj z?4|Y`7>8rYql<68tPsT)3EuR}Rfjt3tM}M7(+Q=;=JKXq^0N;_|z=4^8Z0E+UiaUO(VQtl;g^jEU@9xg#ZJG zL5s@E4XB=^NU};NUANp4lZ)Us$NM(3NdFp6Dk^(A2jW!l;Ug=VYv7kd3S4>l#IBd{aKZDt7zrr9YuVoGS zs%xzzpEj4`d0Ne@GW)1R*NUgq#@Wo52L?3|#RLTQ3gyqlFI&TNW zdC*}&ti=f8D-rWMMB^(HZCtdcrPuO!BD73BTzjN1<1l`&>J(Xb{%3Om0SSD7EVK$& zxE<+)PJsiIXIFE`Mg^OKqxiBk`c}Ky*5;3)?m6ctKiT&=qH5rmC{~yy&-+tn!$f7a zqWtAGb2SN&(1!cLu;)&qIoLp`)Ws~uBJBH1j~}P!Y(GRjQVA=D;inX_1y-_^2UA6++x88AM+Y1ch9B-F zuXEFu1u8{C}YPKffn1_nL!t|qYhSx9L>2Rk0MjXPPrC_o@6KR>Mq-% zK~VzSj@(zbQPrboNT!=8xd9>5rA3t)=Fz5}){@LXTfc2pw0E_wNppcTE-}hvm=j_t zDiA$BYq9tK=HqkN;LE6-$^w!;L_M5IK0Tc}NMSg1AZaw4?g-tiwdYpIn>#UY_A&3| zvj}9Bi?;MVGrs3}V3}jrKduSJl-A4hAT!PNg-Z^SS+-5|ohbdAa>w^g69WwQnM^t@ z{5;TcmWbf5?Zajtdb-Nxi6k~Oyi5-KetyF z-As>Z%_gQGt;8QGK9%U7ZFnNGDD|Jw8?5~n_uGU()=Z96d-LKU4~~34TuAQ#lW~Ke z>{q@^S>Z(=N~e7LjN{v?RT1PvqNC-$9*_a5F%@`KK(U?ICs#o=6ojio*=gJ+6u<1m zi;zL>8*E@V8S&X(3M`ZGm@WINqWl7Hcx0cyq;~2F37-SPEOU0+4R>Fw?~qNjei80mWxcQZl_S+plppdr5hJZMQU?g-cbH$HT{S>yxOfH#s(hXr|*8m=iH``fcUCfV9Tf zS#J9E`d}|EJ4N%3g<{?4q?Qb-4taUo7413BCil5PKV@Z>%ggH3c3w|Vr&EC_hfS&| znumu65bea1`yHu4_si*PHR}eQY1Az63{PEQCHn^P{cY=3=O_@`q0q?5&0y5!c)<9Q zGm)5eBEXm(V3Zcw#u zzsVaX;i05Bz1k1zgqG^T!p2f^3|xO$f_gPz;~s_SYp{Kc5kD>n z;}dMq5IS24`ya|NE(%G0Wt8u8)1ZuHCl1?l@6Ti$Y5K2v;nlW>?xW_dSIEC%Mm>B4 zoxsW8;YmD@4cw7596#`s%gEyr=}c-PNvM6!R|vAXs8B2?GMcXU27C%*Z+L;p075Mf z^kM=We3z3LldRU}7w2$0AI-^7O}+*Ie4eJ>r(4eZ7;|_JPJC}4&O_)O4h2U!ZW3cI zqd(G@bXh8Q38xYmlU1#92Uyc+JoxE$4Y^3RqD2FxzLwMx;0#bnn@9_D7h*Q^so&jS z66&Mh9?la3j}31Lx=*4bvMNmhg(697@HPhNCAnY}HQ#jN9!6}8#Bh&AwE&?VR*Y8T z?!bqbcusBh#cLTO`jXtFR?d7&VPc0K&M?yfvFh)pMK=vi!iCT77bK`Zy{OXv^hFgj zwlQ|lcd~W(o17QPw#$B|NHd|rqGWuLkv+cz&Dr@A3c?qKB?3bTBSfHD%SweBPc8Dm zKe0ga`o;)m3J8MF)=o}Tnz$UEzwM*+P<&%>F*t*8Dy<%G>=9V(raTHWlK4hpiwZ*J zN)S7tCM7e41-n?8bEuyJb92+ZF;ncuS<|7wt&v3F!(QERJ@nm$sSa*Qc{3Y_?K1}r zh@`?Jd3WBz;K-2-Z_8zR?QgTQ?OX z^v4Y=BD`51#tiDRAlH$sJdLn`S1tNlPO2W8UN~t&J{HMe!O(4R#E;)e=&1jH>86B@ zlc4+Op=|qk{J&$McG7;ZC<+}ZB#Wg=4?VS|HZ zgOSjH_+RfF5pQPi-V*8*6Z|Nx$ZNCzRn|@?2fsY}2_xGdVf@Fr{QrnxQ52U=A3u8F zAsF9x0>v?GUqDP%d3A~-`TQ1s3jPK&jW=#;)1}o$gf2};*jOQvT8em59Hj~AEAxZX zmm5gE;1KvucrSmt2*MhO4RV~4KsRH`ipRS|rfqZmhVn{d?Jfydtucce?$JGsV03t$ z#3%TZl;u?mHK?vcLPwW$V(Nynh6E4Pj|pfc?P?jrzztoTLd|;Zy;%;Iz~OtTJw?2M z7h1yw_!|$|&n$g_>K;PdW%{C@&&?P61O`n~VPs*x-yzR9he7bbCwSC<1W)##UUMnq z|03Wi{y|G46hS^BvPWn^(FaKBP$mQJ_e1_FPX;M_qAv1#i`A$O_$#C#v&9b06`Edt zyf~q4`eTSEQ#OUvSHI@Pkf}@#$F5&oC5LAhHGG?0KZ#+}jPFPLw?mR5|1<JFmJZ zGgwk#!||{(5G+;gt;asEl|Pgeh)A!$;$NGRn}R;TI5~v0^e0GWpsqc$u7cz$sY>62 zFpYASa^%T4b8;3O{W&X5%a=e28SL29L}Vql49#t=(U;A{GK>J*ujtGOL~@WHTZ)pA z<87j}HlwUmzqk-mks;{%=I=k%WZnWDC1pT&4*As)N?DVXS3S!UWDQzKX>s9}%xTw^ zOik!n(ZdXrnozGr_{_AtNOCs-Su$y?)LqF6fheo4?mc}$yH`}PJ3(sBRB1G4SL*G< zW~q|CEbf|wdx?I*eS9rmO|+M@%P`akD!VEIM5w9#4Z9Bk;;_XRwbjW^+8{iyB-3@3 zd9cl7cCW?2TMcb3US~*_3g2%~a}rJrQ3?BDX86a~YV+3EyIU9^Y06uVnc@b~p=U&0zQGtQu;t)2(v-{TR4u}_%i}-g3_URG~=_)11E~m<^361^$ zer(X-%ks{*ehf<QlrfOzzzFPfr*AAk{JBEXvk9h;;9m_ zBbiGiESA1BcyC=TLI~Xxzo@WYbR%;r$*H!n>puTGzjpxMb$mH89XGKGM6{5KxQ?L8 z#x>Zr5c%Pe22EYuT`c7lBhNb1`r92t=gTl3-v{6p^kQG=jsZAND*U(+dP*$9xG{uU zpINU6#8RKxU}E4geP1eK2ou7D0j)pv!C)P}K7;hC6oQkUgn9=p0oitNAh%G+B#q3% z&CfcPxfTYmG`~su24m@d7v=uZif=(+QW-wZUT-R%kpwLSz|h)`G4n7iMHNGu7W#Jx zbesjyd6`WnrLu$hQ1AQP5}LE=c^tdYt;2d1?sibHtY|TV z#5u!#t2nTea+Z$R?!lr%5t~z*%g#K*vvI@!?AI&PrAZ?GNsPx3-!di6Sau5CS&!Vi z8st$Am48yjM08NI0&egG8@4YUQnhuNS%9XkI-c?>LsT$=G6>5M6@wH!G6v z0*D%A4wFGIrNcy@sjLxoq_n=$Qp(n}DrlIqE;ge_b-vX^>(`C0kdj9FXmb}HXr^K^ ztVi;)Ol7o+NSL6TF}Gnx@%Tsw!mCDz*sQ(vm&h?$b=etKDiF=qGtk9mf+3J%X6M+6G!B340C` zMI5F1KCl&Y=_N-kgjW^Lt{9AxBY5?7u_uEHKA;MZxbwRVdHWEcRy8ZSY; zeAf&8vl4b-(I*g1$i6Yu!f&M-a3>TyihJ$lWpTyga+1PoBVB@fAevBL!8@hfUg*z> z=(P0$_yoKg;Z^|pfQ%6cj3KhdoQ0FHqbbK}6*2s9oFv(C5PT~xLPp;(BHb=>;%-ns zt)Bs{=T+y+nwen>{1J?mkp8l;;@PSM@)^vc-zf}i>yf`-ZOO;GyAhg>zjs&e&pGoUOSvh zB;hgQ;RrD>fa!zWxNoi%BQhNkBO;U{{3!WdP7jF>icVk1jEhK9=oy#gcfeUiK}A7H zLE-g`^o{gP^Z@Eo(bap2v4GHG4-CGP>6>mD?E?Q*AfPZq#~uGfisz3c?;pFM|BPZ$ zRP856h8H&NUDo5z-vc7g-A+UfCB#w02Nwp&7a2AusGZ1iHWk}mB7gMtMD8o*M|QGe z;3A4Tc+-_N{qoBUv=p=yiIP!bhp6{d)-^$Ce7kB3pyc(G2FY}RYqxDNN8 z=!pu58QjnqR&X5)E5ka*|3eXhs+z!Q`GNHTZW~mtE(~gjjZf_=duUzjTYIbYcQ44~K1tp(7A3ei&t$-GKf_6$Syl*8Hb*@^Bl6f+m>s!^~L z6s1S&ZOv1o^MtaqdMkrDV4$GKQ0xN;c)QKDn4HGHv)vXo$|yJtV`xGi1Y5Uc6CEv& zVY??<29uoUH#A2=3VC-aZ_G*=x9@R8e_xM?ZgvquThjKbKY@mUgIcHca9L;}z72cM zGcWk&-dl<+Mm{m04Y(JIT05$gD?ZgA!#0hNIE{+5vx4-?C{7@Yyhh3Nelnv;{kCZ_ zu->3jV^2f!vGP4<%&2@M*zRyIvkhtVy95haeB+bwvDVCY?(O!y)ch2R{ApzgV$+fB zWTQ1*oxNHTGO~ec-?n*!w0Ro*|2kdntmiIB&tN765?WMj>ha* z0_+60Xm4tVvtfW}9SWVE2y6&q3|Ll|UzMhsr+wS%eT+a{G*QhQisF{+(?3x9LUzGH z*+Gj&@W+3Aupi}{YK$Atxdbt}cM?-pYc#PSv#Vah_V4N3TlyOPxH>YdFQ^S_tct@j zQ>N_GF&%jyH_JKcck6%B8+RyM(sq-=+|1G;AF zkiPzs4)9K*{EnjexMX&?nH%AQHQ))71QGY*Vqw#ahsEN*{gMTG1{0jg1}okEvd|qda>8TA%Z_QgA}|pH zzb;@&6o5wo+6g2OnSmV8=C^Ev)DV3wG9;&SFa)E%%u(u4?iOg!=`{| z*L2kt%1a|HR7-=8pq2T~t8^?&Gl|g>Zy)3%tnHQQ`H>ENr+Wwc14e3(i_{e44nfxZ zhh@(0RfzoOs%v)P?on@{hz2ys|?`kwdIvQZMLjR_4Kzc#K-xh;heEwETR;F0RKEQ2p~DgnkJ@{WkAt< zvGNa#bsL*G>$&x|6@Dgsz5zi3SaO<~VVvr2DyF_X+0+jq9;fUr=V{sZu=0>`z&@+W~w$r*QRw8_U z(WtG_Rc<)iNX@Y%u&RCt8Joy8;VAGxy+1fxyp7zLlY9IeHQQJ0a1UV8Zk5_gTu|oB=W%`Xg+swPyTQ6jJq3{xtr$lJkC01CCeazAMn1g^ z-{@9eepYmGKGR=D6uZL}=MIj59;QAR^yKcRmLfNUfc&g#@1lDI0bs)!?Jyg-CCNNr zCd<9{65+&{+B}xT|dcd#>w;!2tNBIuRt>T1o2ds=}4kQi+3U+#UD*HlAWh4Mf zaBMv;mNIm{YTp}=Kuq>(jdG8W$++@(j^%ldn_e&@a3#G)-|AaY_|F- z->>k@QLJwsq2Yb()yAdz4O%58CYAa#>h%*wHkBuQ;diuvW>zBx14V(rIR$ql5HTBS zbi9fB=p7_TMpPOq$TLMNguy|f7iTl4Yf_`PcfG?P1AA5dK@#6nWcTOYqRdW zwkz(H`Qx;LDcN{Y`B~;D$T%`B3e1fWWi=3B!j?!nq?oDSQ#L3EjsdHRRiMy415<{( z0U~Nr{mHs)AU7VLDIRx{HYgtHW~u|7{X-i3Iw9z|A{p;dQEQ3FO~n*OqvKpKK^+;F zVqs%iV(EmUYeEy@TiEIP<ei__Nx4>ic3m3Kp)-gSrVM; zk#EOIel$u@m%_kTL%-5POp{z+vdeA}kD0@Jh1%EoU>|BtXvuNaCMM$%jSMOD@(KiI zeYUYvG}YBFk51)f$mE7(4dcH+qqh6*oFyH0ApWYYx#X?l(^Ioc*B*3d4VqqQ zUKVy>!@oo$jK3vol480g8DFN}r3k~tkPGNR9h{>sIDi}JPN4a!9nqwK!yT~N3SAUR zn4Odt{-e%ZSqGrCy(181qaGX>i){N&$M2i@Mv4ew&KW+k3!=h(wPgrgWt{?HFlR~? zuI0K9eY)K(Z*b3FNzQ$EyL0GI0fxgrR)_y_*O8oog|XphuKn-te{7T~lKZ>6UTJM$ zpjp&WP0>#0S)+2y-BD;A{P#M*)gP)EUJP%h- zckcjO*g$w(y!YVWD#@?Ix7^%PYEcR}5}=-AP5e`Kn;aO)z<2fq>t-#suk{&m$YwNi zq>EFm#To?KEu?_;D=QCnzNNLdhA@$|E@+Ni2dAZYsB-IME|TKC05ho7Bz`z?n`fvh zcO)@0JzFs&DpOLyc%VA6+3LxNvD1dsAJt_}7}zHp10`Zj#o_^Xnh^u5D3JQ1#oJ@+>1M$l z#lc?OExg_p^+UFUyq-Mmky<@}VbF{?bs}>9Rpom!3)x+SfQDsaP#r#Y~ zvJ-#AUhRJx`+tw6|0DRg+>c-U?{@M-2%C`8{Y#aFF+0XsD{BcZg4cy7D%>vtJjw0n ze8Cbj9G?z9Qhi+RPhWPCdN@F7U9`>+zHyJw#@lgg5x+*&CyuAC*B~P7?~-z~lDLgK zQ@iV!>A&c+V}S)|@T@^;Cj1G8AAr?%F67?2R7H&`bH&$|#>q3ud3N^|zl43XPzDFerPAl0& z(g*f#_MbO@I7SNLJISLsLwr5b+^nw#8g6TItnE~QYghGB#=!F$e)+4Ttaj%9=(puh z{Sp0t+?x0Q`Qpg-%J%UiXNLS(4euEofV-5Zv$^GuL_`84z$C!M*O_#wCzM&Yc4-Io zo(#a-5<_T?b^Bu5HQtdnG2yX!I!*UQ?I@5Pa)yxEO<|9!Pl9pk5zLfB&GA0bL@}}X zP)Lwjdjf?nD=rW}?T*c0_H^@X?bkhtAos!)IEUt5*OpUmlHHKF%7)_$>-ONyod^o# z3@JC{+tfhK?qG}4;9i|k8cWK0wUH#3@l%D7&wQpe-PEmodqdm8DoQ$>qy*DQYvU~9 zr+_fxw-0xGe0Yn};FCb0c^+FgRiUjF7byFdUhrXDL*+E|N&M%(z-|#Krn&e8JME8P z|KnQL{{j2opO=51oRX+;A$kO$-5C-<6KMc1lE;+SYG_G1re6$ZM>-%luW7vmdL=(D z1Q9r$YUg;+n%edD?)3T6BFrYrHAIj%SW&Kp5MAHy-)*nzz+wtlF9_-Cn2Z!ZYEa#B zBZ`=B#kq`K|GxE2&AZ)$un_4yDz-ob15h?t6T&u9*=FK=oTmo2X3A12lBiGpNR6}$ zMQ&(XRhkxus6l zC6OKfZP7d_Vp3F;SV(+5BJ1okAPkby;9&?r`T+Kp!K3QtC(=xF1lPYu)Of?$g9boa zZfvG_*beQNGHPtUu>H1ZdqK17{1LUldZWZEl241K$e3WF7<(Bv{j})MG3F_8!PtrC zHN#Jf#u>wdl?!w$&qBDik9+aG@l)-?AX0f6IDbh+qia@6q8OmiC%*+l>JC@iJ)k1x zlSoNT$GeLI^&Z(dMn}n%bqaY$#$k-}= zYEtws0YUnsBw0{Jodc@hil7aE*ZILKtdbUa9Zx~W#KF2xFf;v1Th!Ixp|jVQ)_Meb zRVaR#2^1Nh8l2$ac-UY%oO*lt^?>)KMq!*NXqbceYCvG2*AZ+D+L$VG&yOrJNiz%P z1^Ns~sJ0{trWKM0SC~-SGO8=f$F{?81T0++>?d4f-WY&TF;Nn9J9OD|?zY0wahV=@ zR3W0+i~-@N%pCB*-jGAF8#+cGSyqg#nB#j;JAfL~gBv57_wxOs-TKQs4N)VPj%JFk z;Tx+4*6GNj0sih%IkPVw29fe(_F9CH_i-mjC>9jCL5VL6GIodF(aX2(=ZaN1(sZr% z9ZILe#SV+czzU~A>JRFlEdDQs3}4}Uw<_iJOB3lkeZNvszx~pFflhLM$B4gseBu1q z1^Zv5y;F3iQM;~L72CGWif!AroqVw?wr$(CZQDl0b~J0%jlR9jT7t!S@yvqc87AXtxLubWoxew4*ALKM4QXzYub8%HF+&9W_l>DGH{0 znwZr{cVehoI&+mHelyd4V?Mbtx7oRjWs_g1!jLw!XjPL$ z9#mL~jc)H(3oPU|Nup(z!=eLjio-DYN}WoG!av2LiB^>?#6@s3uZJb{oey9fRNoUa zQ5*MKWQf=9d+!si>6R?d`_G&luiyIAcnq`X!<-LN+JV1eZIci%V2oI77Y;A6NLW@& z`f#x}DIGAcV3e-ty4p9M32e@mc))^5R1H`8C849ui{~rm82VEEZW+#JYgBNj*6eow z`ZfNw>(53pmn*DCzEx&TplYoiKfHE;9thdD4a(5Sah-R@2O#5@MY#X)%-T+ zo;16=U##vk*E8crA1nF6;RYyYeo~Xz00H&tnTowg|5)jE|6nZu{9=!pLnjH7z9qti z2wdA!vW?>i%1O=C)o3lz%w5BGTNZaW+SSs|?Ty_E2w3W<7$sCCa8nT}5`9RbX_i+&#m)S^@}S z9wM4^^fnhdUiBSp?BL}|wbW#M%GhXc$*Ev#g{ZMZ)O|NxK)aej+e}=)JY6o;A~DEl zis#oNm0)u~b;(eVfhEJ5`w4s%KuaL27H$~ZNYjr1bAY8AB13-eXNg-d##8XarlM(A zk?QTC&#V-ekpDM0PKmynQwB zNoG5Ur@Jh*&;`U|rk3z5I<$hOMW@$E=5LXNIT7QDyQO06Iy>a_@KLi{9Zc$O`@*68 z#tr^ZK+JI7%E4O!cMrD2LW}n`EO{NAPx#rHHb0YHnb5T3M2+mRiDczS3_ zx>-T$of(W>tF)%q(`Hvu`TVVQhEh?z0Pz};fYcUh#o-O-1L^U?P~=s-tDRcd(VgcD zAov)RRq#dY_HK;3ND~u(nBIi~e2i_oSH`eR6ZP^aI+lhFrOOhD&$Ii*E(td;lNt=t zNYM*zLL*KPe#d?@K#tj^b{=dKKHRdN$sZbVZzHRd<_v1SqOeYs%u(g^!M)Nvjh-;j z<%m(6{F86`+jN6ze*aYi>@#8lwk{>0GxKndM+rvSZ6wYq)1%o?DjS-;n(2zhgqSgs zk{oT`;R`o${u!A!lqx{%&WpQJx%A+gsJTx&acAt5$(fB7;Qwb0i2w?Bs6cqnSn9?E zCmMu!7!ky5cln*TZ$Uj-deQ*en@+E3-&we=RKgbCJ1B|XP{jLGsxtE^V*^H2JzZVK zmkGK+l|(#X7=czavH(KFe7ce7_jjx;egj)Wtx`$zF^FXt(O7m{P$`{z(~@`?3w|Q)+?6f5n^SE7-Etg!+_mB8b^S{D=Xj*mz!>92>K2qdi41= z`s3I2SjbIdo6-*eHkIz>I-3wpoiRj z5JJ1E42iGMyWQ(Roz?~0Af-Ng zb$6*~OWk>=GN*K5%buc?4j(ji9Q}hkQPI$nXcdDYW@A$+e^!LbTfF@1JHM@;ckZew zEH8$p@VY@$JY7?B_$EMD>&?*-^`tuESC7SI5%Gn$^!{sk8X=TZCk6ZUivaFFQAPjb zy`eI|#6-~I|29NSsY5y`ueg6*b+{gPx!;qJ`27t*2-Gnbg~oJR!O2m1 z-y0K(lMhh8&g+7uGQ6Xy9^e-g>dIsrKac^e((tv6Lbv>D1$I4O4evNSx5a(l)phsC z3Ae%9jIkTbvio-$0Dc$*-t_{97eC;F3xCiNzxGU$cRjI(2#+^J=3^u!6`_rofv+E- zeHlAobT8#$@2u>2u1kX>s2RRZ;D!UJq9ZF8^mv&V`d>v$Qn5 zsjZ}j4{z6a-n=T89M6gxCO@TV7~5V(Zm1<`$+{R~^gswDd4$`hX1k=3$9`$jq*tnr z8?Mnb-K2Oz;#~D893z697sxWBDbYVo*(&=6uP4n*LeJ=i`*yUaQW56onv`VD_SNS< z?`|cCE#%iOZ>*v(@Q77G#(3)?VZX(lMnYLor{c%L8U(=l@|7$%#4l=WWbews(`n%) z0Va{RhuvGaf}>Zd%bC|4`YnkJNX`;~8JKssbX%cC1y|uN+D8bh>+TMyAhyuS3O;seWns4|VQMJ<3^0oy_){xkgOf0)xyO7ZPSOv3I=D0(LYBbp#;Nx&;gkI#@`S9 zHr_Ag_8AP#2vQ`&FM+*i9~HRd79LD@$ckzj!@OSuxaPnNCSaXIJ;SQ<5fbRkEteZ5 zT8l}mhjJUH(~k|dGsp;RdzcH%C)$Oe~GvfqY zvk(|>ZwT0z1XK&q5)4yK1Y429_gb$V$w15vJ=n29w_gThj=}JXDFZEzf&7Ll+bW?( z4#bKx0d{G`a%B{_WcfjudMR05?RI&KC3Ul~rRqq2+|4DLiO2IaUSkD@(D&1px;F`; zt{Etm+DN55twaI=BWAEumx2y{wNxOn-~5w&ZuuUF!`RCFO5R)-&p+|hQ?imo6P>IG zxKriPaDx=Sat#g^xWpu3OX~G#ZO|f@t%-BtWT!$vT72$fhnRp^CcuSK#dD}iSgTH_ zHZ1+`GI$%P&h^B6)zHT31VECjI#p>&I7^3TOK}o{7G~a{I|-r)XFh^N{GcqX{zdcl zaZcQ(^N$DWPPi})js}auIfZHhZCM?m)=d*y6DWrcVPvWn_PLPrZN+FK5l)^zGr9#}etQIjOR#PS2y&b*e`V?d92|QFzWE*_aU`MzZ#edH6!Q z=n+l0lHLQiiXBN+gl|D-4>Hu;Z5yc3DMA0lfh0GhRB042X-N|*<7*9E4AMA>g4?p- zOGS#{I)WP`7i+L_llwl8igI^HL!GZ!MFw4-9%|Gal$Mh1VB-8|eA}k?Iqf@byBOCD zyeHSiu~tP5q!{CU)+38{0GU3*TYn81A`9q256)6hQLQw0h^_FawodVrJlhonE4gr84J_if^=AfF8vhTztxC0_r<}9I1mbf_rTO*hkh)=6J(=(fY8kgR)CKO!5C&x zq&do+X#9zI#!K46FYSN`&s@hW-DE@Aa}&zCT&0I(OJ$jD=oCOYDi7&d=nWD>fg7Srl(!=b+ z#Oc(iY4`=QBJ<2_bHuTv9`d-iSl-n0YfOK`14d(cuDSmRO1IJMr&HlVv95{>+P2^~ zqg%qMjxd{d$R2IJog7knCJW!z^NV>A-wN5J`5P(Rb_&QOxypMU+jWf32lmbVZ;T0W zLnQn_OfLw@SNX(SgVNmro4k-dXV6dO#5jR*%~hm-diV3hKxiy6DD zk6@T0Dgt)MfIDgWEe?nn#S7W~A*@h)-Rd_I<)A~f_zT@t<^=hphI-=(_T|~*1i%A< z)MP$QdK9l0T=30L;H!IT|4BhSeeWM1Cf3`a)`XM;TivivFGRRIQuD)>U6XDU@}Dg3 zsgwiUZlDu4h!gjw%Z7o`w#X|gs*?H%{R^nzzh#9xCTF9P-gs65h<`?704q<1q(;;2 ze~v72LA!c(Wh<`f|Atb|a_S}GJxLcKyF5Y`AYVJZwVDe7f1XMNOCVrzML{r)?a#HF6+xS{64px~_|t>kNbFfdatKA7Qd5GXF62^EfTJFQ$Q`c0ZMtE{t*z*3AofP|evM|) zS8hrqmaw?GbSpokNCkAq7hq4_ofdA9#H<*$?p-OpuUALG9quQPa%SnDME#`iWKcbd z3O8y95rmoLiOpClE%>}kDj^Bd(!jh`5|3fh9&IQ-J=;M64EVe=QYVITLE>&5k3YSt zjolass5YLs%5RWQLv{&*CAmhRQbiU|hfFn}i54l28{Bo}-*Xi_(3;EI5m9i17rlFz zpKLJznj&SM*?l%b`({nP2bZ714;*SSqTQ-F*=eFp2ET;*qe4Sml~SePkWqc8C&-q> z-Ib|c$$y$Yy4!(;>7&nA>Vg*N??QL)Q~$K!LqOwm{81BnqD4-idVlV z1i1VgboDpLHXvw2nN;(XRnv_tH^{ais2O_Vbaexgwv)1P7I5YU1&tNY+9whWi;j6| zA7?NSM7Y;)E+5WL`{i>2#34H{C;gM{4VX@3sywgw@Qq33~zc(V83$les=2_s3l#&>l4yzrUp7lB`svWccaX(VxGXEoe zr`7DF#ZRUX+_@P&wWu|ftL_D4+nsUa4*#a)%NBb*z~J+X_lsosHbA{MNAM@+VF`s8 z`z59>>l-BIrlc<5E#2q?Gjn%lpLCm_-5bRY>C-WB>f{yDt6z_w$v2MjgY@`_Tmy}7 z$m3Q`UC$>S_Nb08bO>svc*>brFE=(~dE&{xs+z*p7sRfKCn z&Pd#~~7c$#i;vB+LYr32axK zapQ*BC(*8k>lwFP&deXLpAswf5wbA<+8x>%A@70Qx!B06cP<%EmVon#>8w_u=jY2i zMLZs^h`ZdYEXK0FBOGOjSd?)w<4?Ed2DXJX#+QB7cL#E_nf4j%$(YhDpYmg zg^D*XSy7yz)a{sVM`B(vgk|w^;bBJlCVz1z>fm}c>wb^PSeHC zx)9Ui(#58%iHpG@a8&ms)z%4cMh=cEJ7Sfy0l!G8dL#{!Lwe~*!`m(nk1`(om?{2B zS(F{wxDT>)on`JR0H~F1?X8P91XNyHmNc}UL@Yh69+fTmtCjIk6}jAx@G9X9W8}!_ z*!15lBNWU}UO-ILxt5B%U*O6Vi@nznx!`4#jo1qy+ZxOFq|h>_3X1XV4e=Z3r;ZB8 zodz=!K_`f?N(#drhh3E-@F1YIsndHH7MuoE<+C+IBEv^?uM*zXi9HA@K8OWi{1s({ zC>hf4qzm4+VT8Gk0P-RUK0sCooe6?Z3CwX&7=&T;e?%eH=*AcV3!SsB#h4I{z+QDz zm?VpEuujLAF%5&XZcZH?A4F*u-yBU2FRK(ytmR~kvO!^5Rb@=zp)sw)HDvYFPORTD zr1MmrG#VN7auKA_HyfO~pskDbrWjx2Q0V*8QLdL8A_@v44cFStHx4b5%x+a_6QEHd z3*ShzAS$FmSduI%RY(rkGzqFrOfoD2l&Dv2QumJ=8AjF1RjR;+Igc+8O@|nT6H++ujJ`Mt0Ws*BcL7Nf6m8%p4DNoi1$~J+h08r z+@mMq*Ed1?gx$i{CliAM+;rU|Y!TP@m-~oc#J3bXv@SQm`;6Hre1|ikBd>Bp$ejuo zdoqUf-)*ZSdIYcJqv42er zvkk800vRu&KSW{JzMpXCxKza?0$?ox0qMMm8A0ZS!Krxrs|ZXOC24Ucapai-NJT(Eseqs~(T)RA5miwT zB3M%MHd^#2tkDspv}o0Dk=_JE&Xx9!iX{r~FyF$z8b-P%7Y+4m{Sz{~+RSpC?sB|# z^7(qbL+u5@n}8ss3yvcIY9-xkj#Q15#7MBX50G!7MOzP=k3Uk(O+wHwY(; zrAZg@3^rg*pPL|6q0eYH(%5Xw(ULXLI?`%x*R2~SCvUS%#%;*&+CA5HUdUI~@GQ4E zEJ3Q{7W${{9>f&LB$7bSQ3TWi(Cj0SW6;uOVcDn~m1QAIoGe0a^D-Ppg*m3NNzg*I zWu*AQXO}bcRDMKiv9{8tt>YI^x2AROduGc`GBPa8jw;@&>L=}&q=z-u7$O;Y#5I_3 za%-FO7k@}wP@-7SdilH{kw(|kzv8>yQODU-ay3cLArmMt+QYt;r;+gF$LBC?Ow7@- zPhfS4G&N0z1aL}nESTGotw|oK)3SMSWxil`Cco)D7m%dd$$d!nRz-xe*s|hZlxZ>~ zYOny!7wVjP6yfR5*#hiTyHL}4QZU|7#R9c{nwyjZyA|mImqz!mv{`l#9(8P#dxQfC z$1HLFm?S9N{Rjj4v=v2w<++tkQ4YjDf?yPFLB0V&N6X>b)yjE>hfEE$;OVz${n$Yf z@q3%#>V?n#StL|nlrd4kd*;)|v3XTi7Nrx6f)qj!{QlEIa58>AOCZf0@_$fMyn9hG zIDfrKB5WHwB7TosJ;YuPfTAy)7*uBuzF1;txFxaOP}}in2X_CQF`B9C*4k9!6t!K09ZuwKl;$ulgX;Mpugn}wQhBs1~N z;_isaV-yl32)?(lZwPIONA*N(D^cW}Qe6<`Ul{POxw5SH`VbQ-1d9o-h?}=kEy~SB zynGJUTrA=)ofwIq&=h$Ecs<5OdyG<#pFubwE4W7Ir5r=N3duXf?L*FbrF9yl*MR%` z{x>;OF5W`^`18<7`X34P|8arPe-Amw|3jXmqVbbnh4N)v1_Lvoj;Nvi3pKB?7lMke z3Sv2E{OK3#96D1XH5?q13EE&Q?+Y|D*&Dvs>>&Tokn_6fc>#}9n8|_*ErKUwnR>|d zJn=g5GHY|~{`z=<>?NrGqfBC{5K|bb`yg?ef+&&?H^NBp1!j`jE0LEK&p;wHZ1J0e zc&{=rF_2lvdA%0KF4cgUH?Ae6vD4jBq%F1hL0ZqqD-#pYR`BJOeC&Ma@ zwt#V~i>VA9V5Z2!5o|u3eFD=#4ih2wD&@GDwsa!BUY5m(MzeK~JKvB5DmfEfV(M77 zN1zYue(N*8ppt2UU& zL{huddbbt=nq+3ZGj>lMjFFN2=jshqQvk*j1?Ip%5D~P%+_Y5{aV*pJjk|*d?$Ni( zLLEh0ShSB+JK0{OPlcJ_V*V>8%yo^cyQe7oq;Y zRhG5)-%BtxbMYiHhU!97|MYgmYl;GQKy-=yR#*izz5kZJAuv87YA%hOD(2*>d58Nv zVrq^R!^Po;O00h4hERO`#(`nFDBKl>8gHJplY-!bzegEmCDIjk33Ok!=I^eHZl577 zO(87LlE)QL`2u~U62=XS3U5F`J42A@1E?9|N>X;AO6EbFi*m>EoD#L=JgL0d;zcaa zsx_cvsdQx@ax{MZHyCoL%%;ltvosq0XcYhd>&XRwxFI7u8+xPv$NH@Ya5i+OH*s^K zw{!$Bva*`c|0gy0|Du7Gx*)w!M9}!YbY)!Co~7mfaF7*B0PVqX9LfU)3dJLk#Qp+c z2@{e5g9!R_v<^Yr>A0f8-*l%w4g(`7+=5&?ZaJ^KZ!bNnJFD@>f9R?_OPltCXbgO} zeE)=N*Tj^oU3Zngb*t4>jG$}bdjtInu3y8Uw-4|`4<8WOx39zgbBh$60_Bm${tSQi1%{X>_)>N@B}1;r&_u!#?Mnnj?-)=VgOq320aKz&CyF@ zRQ$n&EpY?PLj*@xr3?&03E(BO*}C4latq5g*~>O`fPdWoSqYhtFjI$t&Z4+_n_;L5Xb< z@+}QJ8Tzda8!Ips_9ZA<59-a;;RSr8Ca|v?&H8KX78@>yawG;>vS!F`>jl?`9_|}( z0RK|J{y}mCk9oSYe}j+vMMd+%G~$i(?8x1Fiw@FISY2lD7|FkIk=`rWPE%C@&5W|5 z%E}6cg}B*3ZIuBW^~Cgi$X`k@5&+TZX4vLE=f6u^Wksba6O#7mw7VL6e2vIG{cy5VHnCBp#>!lRu-*o3Z-z!=yX&7utwa$8)EB9=|u6 zT|M8;+nN zCaIE4e$#YycBB1|{P{GdQgtQLN0k2AdeS6nDsYycQYpCd@|p@uTl3s24B=)CRV9{E z`S>%xpx^CUs1(-brP@5<7*nMh%4%zioBS}UUn?-8DJo0!5fofKd9RI20YDY$V6+z2 z81Xsy7|Lop+AHd93=F00+UABP2ZmU94PpItX!2ho`Y7!5&7OMYN| zxpk&8xvgc%kxk07MuGFyW6RNCgI|LMLsk!yV+E$u$(&ZyBri@Hvo0|I+H)^Ei9nWs z;ev^zudv?58OVm2qVZb99k6Y1h$gb_0V|sJ2vMoBJe-+CgVG30zCO2;XD0V%GT3_T z4S@)TFNo6o{#U*E`~!jM{5Fr3M@vaEfDdRGib6Qeq{p2A{V*`z% z=6c&X0xpcCOLsG76;)bNLXz^?^w{M%h?LLkNem@PxhM#ub1Ex2gsXaA)kwvV?R;m- zssM{U!Bfm~zO81m{z44y=tJ2`m(-Mo7YM=wBaGjy7iAHTaH4uH>8?X4@IGd`q7my*6f1&Qp=~+Rn8ID z+2{rkK#vHEVht$cL;5>*VC;oqg=p0jdikemvtkE#xFEoo1W8H4PJHZ+e}D)pumpdN zfIB>#NZE9)PZ9kg^V7;>SOjLbUsIEZM06jrE?;6wgJ@)g-ABagADo<+S%iY%!gium zEoLKF8UcD8i!+Oi?1Q8Ba_>$(oz^FIId0XGy7tmO|B~Xou?Ig*h55WGB|pPU_V0Iv$c1K{ zH@QzftWn`}gy4?-b$-$*J)6eVUL(YLF4-5HeiCrG((IqHEif!7eB{J%4n$4P^%^p! zp`-01Y(k(KldWw};UC50VG~+pAq*_?_-Gu#$?-H^-`*Y{u4p1dUXIfx3ReJKV?m?Q z+-cDS!lDUBVGvk=v8=7y=H)K4m2Vgq70*UK=MZ-~Wm_l6fd$bRTXOQp?oX3}m%f3C zBW=!z-kic@K@{QA(%SQ<&af?jHeJ%0S>M*;FLJTkA;H*C~UOkxR^ z)&j7pcM}(N=1+TDoCRs3=Wujg4{mBcOys>X^e^<;kV#Q`0K5nR|sI(TDP$)tPSj!8ebG4JM$=&D} zF|l_}t{$voa&{^y;0j3f%F=XF=c$Q7?9}u(POABW1~dP-ESYLp_OrEt#3q(d&mK?q*i`BN%m86BU)*y1MJ#z5qnJ z$@6&77zoQ4h8CoFtI$}hp>NPOIO-vJ!XV!E@@2?K*Wt)L(~AJQfZPj$o@^wBZ#^NVaxQosvF;a4fEC`)nF2^BPuxknz+fBK_gROI1=kzNOp?^CI#{QU-L2UtMCD3N z_WpDv;GOgvyO)?T(S8BW&}ke2Lj${gaQr=>X((yd@Tg;&Ib$RHR(bzmFGUz(=Cxbv zO8J27XY-bi#N+jKi${P0CK&zna9XAO>N)gFd;`(k$xy$CxAaAXaiLQ^6x^USa>J;i zHTQ%7#@kL0jMynIqb=kqn#wX7D~%-@FB?r{80D%Avuwj#U;zq&gQ{UY zC>)XW={|yEvp+;~5m^R_HBVYF8k)j4u~03q`GTi%ECWL|+U;%~Q^l1gZp}%_{FYaze;8mWVcdj|q|`=7ApIIL5ms1CW#{}3 zX5nEa1*z~2$+oYzr&r@@6B~l-T z*R4VnXXWG3%{oHy;ymQ|pgbFo8Ox-$j4o*Nl1HWW+*n!Io@_>HjLec})dHT4^*aCd zq9t9Xyn|hBT%4%_pFD|BT*Kr-rB(_pb~M&mcrV{2I#`qQmv<#D;f#*=gWr-^ui)Qh z+?50OdKha$V^^<=)^+}HwK02qMTU5p(yL9M%Cz={MtoiB1GCu;}sCsAllWb>JP%| zgdM6%E1T|jE^+l-1NWUiWt7Zgv~id6Eu55}ifDQMoW;!h;PX7B3y~BXIZM(M+>e(p zWLOPu8y4kt>%Pnvm`cKrEo>*W3^fkTS^PV7_SJUIWt~MaNB2}*x#L9qvwppA1)LN$ zJ>kw6L{)M0RVnd=78JY`L``j#K7L-2Omz5N9F?D+^s|JA0qV;%is~^7s=Lcf_bgwD zB&xK#J6)WWap=jrx8dR_71~WxBF=6eq&A}S_pFbYLY;PWfKFa_D-(n9^Y>SUfzV- zzaV(w^n3+<8@1u}eBs(hUFY0?oYAf?;9KwE)gFi@`$|0*jhDH+kn5`L{4eRzSPJ?j zoG?FtlP~ZN=h7d6yzgHR7mb^pm%Z0#^8c7imR*(qdA_75Rlqx^ybK2`RbA%tAyglt z{izMgtrL+Q!Y|#Axk2VZM-5Rl8bniUcYP+U{&&T+@^kM>!RcDVMLyT{m2J*D4#$ur zMxbzrmK9vahy@MxfTf`0)Zi_>qd@6N|X~_TTRD3r^nM_s0yrLlp>Hn8Z&F?Q&w&$gCUC6BlS zr5*i4Lw9Kl(ZNSTH|lR|@%)d5ZLO>KV4O|oRd?cG9j=RZir!Mo3p$bVwfN|knj`td z;Iskv4Q)`v!BamK^_k&;I<&-AcCul=jQW(KgJ>$e!~iRW%N6@!16o()rqNvbIi(VO z5>n91Z-%4AtK;*S?B=~I!#)b*Z7jA4w?3eiCNv^YI}KD5LGBit8R}zPhFMhnPB{-z zwm4(^F~fZL;xA4FbXbd!EdsdKrd_C+nN3Ki%<4oEJ#&QoDHQ3B(Jc^tJdv&^+G|oq zo~ZUL#Qmk8(h%hu`8J+Gvu06qF>hI!+;+P8tGH6lEVBtc3WSjR5P~!U*qx?>YCo$u ztiSc9WG)9rVcU6AhD;U3&6N!o7X%$4y~IB@@$D%FHEr7f;|L|c654Lu=(^4skUphY z@kPl^+`v6hi8<#t;YA6TbCL;8pZsLsmZZjwYeZ94wl#=@)U=D$JO3ivqPdP=&2bGt zZ=RC!KjDNid`O>&&k~#7S+rw10N zNAfPkoLRC>Dk$AzEcFvwVnr(~IAo^#k(ct+$zBG@SlV^+Uk0Rt*@KT!^kz3qbEr&C@wd9pj2x$;F7iU$b)gM?= zux{i`Pbxo9Pt&A`1&Il`s$xz!+O)Iq%Vu7ygH8TBlYIh5wi+|U^zMYOU<_mkf!&@2 zaB_bVfa3Zk3SjdSxy@Pv>(lQMPDC4R$|1)K{6ZRe{Oo-a62yK8X&v}OU|c{3|BIf8 zD0b2}b)Vo{p)b?}XsEmED=|LV2&=OpYDDFpJe>|i1#}ScGOp zY@EK)dZF|E6axEDow05bUUBgc589EGmiSex`iWHqgE$iwmPX{3v}4>YVNoch^eQ)@ z9hXh$hiQVBR6NcW(uJ^C0%z8+_KR2|YnQPOG@V1WO6WkkmRjxMIpc21Js{?HEHdp% zIP-9qWJ~C~!1=9#0oC8{c|QWZsCOZBE6@l31a!|9>jvXJx?+^*Mu(l_+Xs4(Mys$H z#(AL1D%T8;oj0+oYy~*d|kFVeoi zV>#@U`wrqSaozHr>%c5_q!n+^z&KM&$>Zy9ErM&Lzs17%b%R!*B7tBvx@qYnj8mAS zrS5T1EEtyO$f0}+&?WIgskpdUpJ zd{hEG_P~K)#A*)4HG(}qM`;l(qg7rT_2h3<8@OJJG*CvDL2aGUISitbBP5zFq7s+k zs=fg`Bg+&Qq8()Dn$TRd7{3mN)mwyPIUad=Mt%EA^_6q^hz;s9q|RiQ4gHVFrT|y$ zRox9ekG@#g2p^r*qD8bJ%6nTLMC{I(>$mm?-su^Ly+bp%gCZW=P*E{I4}G2j1rRZ< zmsqgJic7S|oP(x+=mHYSIqA=bglrax26^Cs<8%JVG{*Lmhv z)`dvj!y%(mBay`FX@j{!LeTdTg?}R^LE*JMz5kf&PQvPI8e`D+cqt`msICxmkzxzcB#yzPiU4|>G&F6q)|ENcb5 z5Uwy@ai@$oFz)hhiM=s!+B#1?pa#D*eCTkp$tIw=KvQp+>nK^6bHY-){YZq7& zs%AT_o6tt4m-g30H2-uc-#0{QLTyw!8?jlI{i(!;}>s9Lwn^2#ve%hDR5a#YJOF=taE_##<8 z3)|}~3SnBu?{E~7(nMIe7dw|{;sDaxSUJ%3 z~W>A>V`MggE*2c~S-j2F2EHrzCFLAOLz z+k#*6L^s6l-3U)a0-bza?q;GvXR=WHSiRS`1~i0r&GI#Gk+Bna%;ivBEZf%I2Mx8> zDg`!#LRUR`hA%`{c(c)7jwPF@W~xs~we3Ik--s{3p`}HEY`CjSj(V0xKj$tBZ3~fJ zI|!Uqe6Wuo503f83nwQ3>-Qva?H+&Ta{j27g;Jn4VdFrUi?LXYF_tr_$fS;bmJfPn zf2kGkdmW$?hHXQjf^n!ttR|-idT{M1yw~Rl4SjWX1K%cnqw^v;)3J|fjXA^U^~is8 zYtOarf!2YNF&;937%#4U*12`cI>9<7!au;;!y>lmq!!q8^_srUh2sDByL}m%Ta#?i zqKiP|A8_(Ax`)-Le&I6yc;j#RJd-|dT5QWk9QEoWfd&_t%sg0s=%&6KE*pqVk!QSI zPp-4=2B&M0_UAGe8m~OqfSM++7gcPy?|D)F3RWbUs$U6>3aR-~!%j9drWPYc(X4P{ zmO<>Q)cs+ajMMYO2NIf)*_ATAe_0`|n#R=SoKWYQG%>S_k`CP0Q99+h5b5VkcU9JD zyo58NSR*v_n*T-IhU7)@yv%+cDy};Iij_0T}I$f}&7SZaZZ4m!e(3%D` zky_U`C(6tv*R^Tla*0%!C9R)b09D256tmRvnNJ;hs@7XIsa&wyuxyl98Ngdutu?H3 zKE=C$y+bGY>TUFXm1T|iEKa))Sz&9Zt-DwMcH))om{qCNIZQ9rxu9C1bC6k}bDmnF zbEKWEwpX!GYe%!f)|qIk(57yz(6MT?D$;w0sox;r==>uF&nY4K%MLp+#G&WYud5LJ ztPtYaGbl^0k_b0bjyj4bR<<-nya6xRu@QV7+#3G|xZiFX?nIDN*=e(Sv;bly!H0dt z(=E=(Q}1He)7Ot^`NB)Nsk6o2Meopa1n>YP1vg_0_(*bNr zx`?Bl{m_FoRgznS@va-)N&~L<0fUmlw|ku3Tr|LYEV$f;~S5UNWVTy7Bvu97ZPXJ z330<4hb*_beCe2Y8d1F@#V5S-q(f zW4MeWD(#=F5fenRtMxQzyVWvsjuWCI>c`&u0JmfW)&vh2wHF@5=|u#9l$j*8Tzwq* zA%BS|#Y3n3C_9OskEUn0mu#h%JEFx=g60~1m7orGV3OiZY8b$BNg`1$Mlx|A^6t0S)3Ny{~RHg*p~Eoz;Hh=i)*!=}k^dglChgB~ zTko+~mj$37^y>u>>vT>QF?q#2ve_??7e$w3^dBU61w2up>rCvTo@f(|JZ|~TDcrXm z?0TMloi|d4L3(7@?@;bLYPW`$&YjYpz`iwIVQiLm2R7?D4iQi2ohjpdm0clRp}Z0g zn3vvn!}z;W>me`YPmJ9K-r@KbIeTX7Cof%3q|FxJ1T`P15ewbLDtm0!jZe7L^WA1g z>piPZm*A{&?_pCbUsGt7-v&}mIT{VkIaMn@24+^Y7u;1zpSo5?EdYJ^goL#|Oi%xg z)ri4=&R3@R0Qh(R47%a1YLY7l=aPFPcbqHc2;st3S^j#WyG`FxykkP;$;H z4iinGB@%L8n1_vs1R*=fhCt2qMdnj$LchGJLwWwx=InOMjb00*Hf8niaogp{yU@CH zVN|>J9C*l`cZ!e`Q)t(C$X9}F{E2@O%6#Y>Bq8Ez)oWKckqL4}P#mzM^6?9M&rfb( z1Z@P}*twGFaMomhxRy7wbkQ}#CrPL;i=L1sNl6PFYo1W1|1DIhH(=2}&@Sy_**L*m zIN@|n>NNKW#u>VtO^+j-noM0E2~6wSLA_2!*I#`Kwt*U#;h-I-Z`szupTvLlQ`hVe zP5mQ(eg>K$6^k<)O1K`2`gQuH+WG6?1-$o z3?kj#DJ|XIE!`*`(%mhc0@B?eh)PO_gmi;+NC`-H!+)Il-;DUwdFq@Q@4W`+J1h3u zYp-+9UKjWoOU^I-XDFLcS5MR4DcUSJqX@qs54?)dBa>=UlD^+~dW=5LXy|eOdN-my z*n!2~U+7sxyCpY#Rk-R*8qML%P(*%1#C)NBrx1O9j%m%uk_fS&g9S(q34>Fj3K%zu zWSdLmJm`)VqY`u$|HYfuDF|Guu-H@${ ze0Nn3vTQK;G5S|qKft6Ts_iYc$?xuQ{NXEBVAz2TSgG(uF@`y7qxcl>Jn`|iuLOAh1q(Ts(mw$So${OQ~jCNgb!gJRE>S+d-#oT zO^c4F@WmY&r*;sRr1^X3y#s1x4vnEtGY=HGGQ?I{J>ZL0%qQ19@E`RI=o%lCPeOPh z)CFhsyk5qgAon7u^U)#l@x(X`2XVsAaDH;U;KXGswftZ)m`OoH6tpyEy(PopZS(jQ zLSrRzBSE0x@*+9LWrPJ7>mt5P`1v#QG+CSy1*iEe^R&bjZ}4;jwNMBXeozM(Cwq`{ z-a95?Ry*DOlh7T+@6=nvh142_)Xt!|y9QCHom!y$Wh}BdaH*V{(L1$Rqx4L<1N`Dr zB0U!ME2@d)*<(G(Is-$L!|!?tFE&iu1?|q^8$#c8vclbgZ*h-obGI+%?Xs+Ar?FaJ zNEjwGs=W{Ecbe`oqWEkF-Nn1~z>7?F(bEr|VwZ0&Zl`>aY#H@FX!1u4to-1VXrp?+ zGiX&`kh;4M5~|aedB(tV!zxK5<;58IQjrfOkuu1LxMZJHa|e@o72n;117%3MOD~q? zkD&xbv``;ubl#913%}a+`tlTNpbBbfc8;MW*dcZPvFT-c$7xTet0?F{J1te z>6Hw|IHn9;Us^&q=`rJ@y+L@G}#*db0Vcl#N6?(q! z=X?oz0z)p42I%xt*4>!vv)QKlz=k0e-V2tlS5@ zM%!m?ugm+8-icZo0!?2COyP`>&cu;?vNF8;&Sw1^p+Q+el~dGMc~S~g%645 zTsfCB&m^4!Ve5K^m1Bb{8$=pK5c7%_?`72|K*eo&j4)<-#4rwbv7I6GpmWJ^;O9$G zFyP%3y`Lg_8mL5(x9J&a)@G3OG?_dg6f?cU3nb_|hs4P;uOVuk$P32-Goy>U&VKyK zgKc7h_gaMnY4Q0lxx*q}@YuN?uCwhy+wt0vK|z~_%smp2I~=BwQNox(ofDk$<4IKC zqx9e}bbKOx1v|M-B+5lQm{8Sii2_lLAVgOi7R4h6actvs)(8s|d_yc6X9I_BOz3Yrxo*LYW@UE4pq%e~{`BpLU*@@oLAkTwW6^d5%H%qDw^$>? zf{hm)hR?0Rbdc)7LwU;4TBy|eRn5xiW1FSTtjK%gN8_6WX!af8;nIY&Ukksm6403i zbFGEG=Rhw#xRfa^_XPDwBvJCvF#Gu@N%_L(60?ewRu;&xFEFI(+m=xaaN$b1d)PeW z6gg1cIRm|06{@FHA0wucu(7FyBR@)c{$RF48#1q-U^V=JO&c;N0GcTmTGu9`pfUgh znln;>(KPk-ODVN6mB_w+sG`(`f*Gd+Fj8T@0P*s|hDnyv$3d(FQmuCmwLgrx3)43s z!i`-BHO>*Xe*%-3rb*0ReGA8n`52Wqz$DHqe3T``&^o{_i%N?1q({c2p)|65?@a7s+WR2TzLzR~H$VkVyX zl$KPFh*XX%b@H~!XRlu9sN8cLx0q%9v|V->KCKt6gMLpHwxt@%N?2&7^M2)(SBN_&w3! zM#P1)_#nEEmb&tnI@1G6lnD`gj!;ZUYXjFb&PA$~=>vL>ByG~PA*wc3Y>N57oz^H? z49*=UWE>EOFb|V<%lIA~cYp++#FD&HDgk$c+8yi|Qi+%ag6vypyFP1%YBx2LX-xw$pIUy_b6vp;YF?Sb{_!iW0BbLI&h1H9^Q&{mC<%5N zIf4MAlq3R+3uU{+sEvC4c*K?aANxk3EeBSH&(q?gM20bfQQguhbPyC_ZtCx?nokycs+3b!iFYm9ck2i)rjl1M9Q_p=b2lZw;9W?@R z#ed29Vkx(zP!Mn1S&5+v+p3($tBNCm7}vsKB}KtQra6J6I)u29Ktp}`sS%uLf)H7x za@WY~Xt{#r7)E6whcrH9N0c<`X}$A3W9F;25&BYWmnyACR9;G2PEpfq;fviLwbAW$B!O zqgwWUQh+L4a8Co4ag@zOf+ygkS;2#qB%LPYdfY=c#)rOWE+cSq2j=WTFg@dvq_6sl zRhDCv59Rx2aV#dKY^VEn#j8`q4rWVKhUGeuaai)<4<)98&24#yj$B^nf^$I9li;*n z>)U`q_Dd0Ys!Kt2dxm;vE=NWE69)+)dY(yC##xaOr-s|L?<&K*yyy?FUv%|97f zXJ0%x?>i0kf^E#358c_QT1YtGI(_8@d7Ww9?7iV_STNrOcSZO3(@DwQEA26*rbzga z9pih4uU<-cKam+GG-N)qd?_^F|KbqeBIz3H(d!*U4B_JJ zpHR^Uw~oymZ@p zHryvh&oz=kHY&owF(S{U&0e35ig_b&K;y|3T`6C=CTr0PyuK#ED{8vidX%7tHhX_a z$4~Nn1C74{y8N(8xAB^yU}!!+>Bv~U_y+v-;0;XG(3Sg<^Mc+LuZyO)j$73ohAGNw z7NIQWK(S|Ok&@4&=7kl=a*9;3OpD>Bj(sAxgZL%w!!iQwHs{Tz37PROf|~GP(>S&x zYO_YS1NU1~hUF+FbSbw!E_<`f(-c?EL#OQN`D$LVMMq=Z_icJ)F+)JKNp~>Dq(h59 z6;H<{fVo--zO6ewA8bDQ+zgz{F@&xpo|^_x*z|%rPIfYKSe{uqi{S~5!t6B2`e;Lj z;)$0S$T_F_IVRem2lV9m`~t@np?t14c;5Lq8qQ6O(Bcjy=`}(8-~ss+g?iCVO6@Vf zP3bi=IMa;}3$+4>C9Oqz*Slx5Sa!BzWLLbHvC zEBS9d%T9c<2lN&qB61?E&|$gv`X;WgqC^Ex1bvV$j940VKCVq!bJ;Tm-5Hya=h=Rd zH9@C+_jNnR_=Jses5!Hty4!s=b*aEpw#Y}Rshbd(oN2E8`S(>sh*r5CHEhx!#&^>n zwoN?KpwLj+ET%?U%8Ye;i&R$#_X*{h4U-LS?x6!@dkA+SxLC;+>VxO*dHWMVI58fZ zNbj~%A6bQBT~qVlpk5SfqcU!v%b?+mL}xvh$PF*&V5O<&;xVyBCJiDOFrx*s0zF}*O7y^&= za!Z&PLA@oM??5>5ZW<%nF?I(DR!7!jIN>Trw3w&JiI2nE2-;10ncF|uwyUR#t;RVt zCiV}owJ$*AK+!XV?lBqgXdA_LO74R=m;+4&J>-i{n>eL*xD_Awpi_(wl0SdT$7!i( z@}jfi1ay~S?}`)=v!X$@cB@&GQ}|Sa_meB!{HLwAMDhG8LPiCc^-mU85z>ryb#1yX zL+IThrL$FL4O`~RERn{qJ#AA@Hz!=uxHjN}+2>;4q^XdnZh6G=XpYL(X&Zv}gfz#_ zroH5vKI`!p2txFHidgUrQIMVQPR1VNAsb)7b-OV`+bQ1uQ~KODo&sDK{?ARx3TN{u z-`NhY%5Rb?4RcY(Y#k_=PjYhWn$>Aj>7QA92+MdqaPx%pT5?~ry6gdcZa}&u7?JJ? zo$8-{_tY~EpW#*O__!6OO%;hboRe8_9yKO30o4NBjJ zTU*%%t=UdzXitL{-PV@h-iBwM@yvy;3t`dB#zX0Bdw{q_luJ#UN%RmSTt7VZq(ISXr{^ z@M=vh9z#uo3oqS+NV-te+*+rr^C}&c0{@6hA=V0ow_ZZ&%=N0ORUhaoe)Lvr+1{PJ zRT9^llzSlWz1#row8qpdC$z)K^09?Rwmt^T7&u3_9`Bf58eU)Wn4;8xJ2ykhgI?MA zUWWw`)HqxH-0(oZh(a;WCX%Sc@+#giy7o_9TN!~poNL-bSM_Mp&aalm4%lK9@r2VxLH9Lo&T`yhfxxSf80?-NL%O5`0Y;ELFK`3hO*c$&$QILViEB#m@L zi*#g`bcB~-!gC(EGnWHjUZ_k43b!a|1oWX(zmxjlf-~uioWdk(OHTHn&nGL+QO+6# z{P;RB^^VeG@~2Z(Z&$Bl&}2cyygD)TNJ?pWg(_c1SU}It`fqPX8kD7>iS5sAC5!r0 z^5$Jbzqm+&%1p;Q7g|zlE8HZCpg?zs1m}}G5pJWpSGmIRPVdoj@5!j}Bz4<)V&3H+|; z-mKbf;&wx*6?uVY+tc=#tTybdHi+0O+7gEo_8}vhiVY|lj~6V&w^8v^4xd`a)KBzH zxNT;V6A({t!j-T3HoWS=n+!kS&`QSb5gAZS;1eI-3E{ja3j=B93lmg;vaRf#h?K6@ z30FLv))at$O1gmj4vGj7of5Td`7SZ>qv|CK6oca#OC%G_cOUl=3euaT7^N2q>+yg` z{|1naUoU%?wcfZGqveXUlGqN|dU)hU-w>TOlHhvH+T!)z%imd|LKsmDGR1*zU?8oC z^w%aGbYkqPFUjfUf7W{w5q&nwbZ$Q}KyggC;ET0sPBKZ4yqSPX*W~2ISoDF%;j3mcDo&Vs?aIbY$~_Hs6f3dRgKT z?g!;ZQt!w0nzB>aI*fQP#`XSF;ncnj6oyTzalE-hr&0VWOafTOZJA-boPbX@C2^=KR#wAPYr@X%kOSGQGSVEtxy4sBa${bf|J` zC}&M0W45N1_F7{!h1K*?8)B{e>B2qQWRIk5%@RxF*E?`r(+KyuFB2m*Km$23QF>#D z-}8XGB^~HJsR0p7j$`QOfzMA;k=IjgkB9u)ucIkbhjSe?U@@Mu4Dt7gw31~ zH+$+kt(;Td3}FI>3x{|QgfzZLegP%ach-a4PAL*j_DTU4c4a$H>g>{eRZ_sOKk`5q zb?3C=?}=(Sy$-f@YBBq`sU=<6>Enr8Sxq>)24c9+k;SI}N!CZV zM3Z#rnBMZt86MpDl+~7`j}X{pWW;O6&H1|~o^j4gDrEG|+ZW|D$>lXLOh$5P{jJ#AG>wuWNtl7C%=zTmpz#Ai64h5?#2A!^ zA5!nA5U?`pmc0vnQEcb zSir85FhAP!UtxCB4lFYGFvohRG**u}2`Kn1 zjK_ZRsB&RerSsnC+0o9}DfLe7sX*IXC)7hGd)(I?p@E-prHqiAD3ws_7A& z<$)@t<1@6t%Yt{;LXK+?i08a~^IL<$+plAqTH|AByF1Nmtb6Z!zs-CLSJ_b6EYrG5 z+*FJ02E8&|zA_Nil|hyx;VpaG(ZPp#f%3XpIg}B^U5`*Ot@!{vJhwRpHk2I^Zih(X z7$E>{r&QN9NKnq2@9+R2NN^6BVVE0*+!aAqbWR&yEgf5&aHl?4v*bUu4B3$-lf<%0IUj{R(HtP{ASlL$`|;iBl)3? zvCiPbcdpM#Q7r~xC^+c(k9{f$70K-!Y94K#sL--JT5EoLrMn5KV(JHHd=a@UNUlxQ zK-Q{K-LEF0H-H_H&nImGGf*$B|3EQpJ@qgi(EQ8?`4! z`c~{V_O)yCZtW|B1$!>4UIZ#Qu5?@k&5O>&Frhah_x60nKePnt&VUV}t8?0xw+6wx zr01@-?kvkq8@8Y@d|)amNPw$OU4zSRdfvo8IFBBRpoTJAfNg_5R1~5ueE2XsCD@AG z4S#4OWKQ5vESnveD9`M|>{pOFy%j*b3ON6;VKte-*^vEj5&pfjFxhsI6q2Yh_&CXNN0y>^y zc~zcGJg%dammtnVdWbx&3#}~uW+kUW-jw#$Fd|%LwPdBGRBd4$u8G1REI&xo4Lf4QIGU%!BaGGd$)!S9yCOe!w-Tjeg~^9O6VXJgonOYVcz!wRj#!qoD%C@qOXCA zU}ClJ5(lww8d?sfQ9bEWEYR%iWu*i2E2DOY5h>%RLydkYxn9ukc{N(gSX&83m^fz> z&QDLK%va+YP^jDCp&LJ5Ut?s~^G?Rn>Ae@28iceTfu6-1Lcw;sbRQO*nxkkQnPCzu z%2O0!KM4!^r1$7SI$w4lhdMU2F?%?M1g@k~#u$9cQmi(f`^%XM)DWzU!be=5)SmZZ zYHcfa)lH3qESW8(CDg}fm>BZXhO)8y5`!`7)8G=ja8MDjvq$LYDak$OHQeZhJbKyyTKt3y&p$$>oa38*YcBpuXNksR&YOM-oZCB89_>kwCP;D0| z-68~io`RMgNH7IM`qGwyP-~c{J|va30|os&49djNdL9H~QDxvpY!3HStMD!P%N8^L z(tva^H=GXvkyHnv&F~_MR9P|XN8x=i&`*+7M3=OU zk|gl~gU7JK9y=!=C5JUET*ylZ454^Fe%H)T>H=fjEoGyz)z1^z+%`n~hH2a*L37UF z&RmC;#}kC}+FgYZohv4rrIui}P(__mG%MsXdy3X@ zwyZ)QJ+X4i@&au*b;Ukro=Q}bIMJi`R0zBZJf`BgBHCP`?{QT|RTS|55nLJFV$Hkrc&jXumJTlR zO0?Jg>+}b3wwR!oy}M0=&I8aJ#=#BJ(%?PHxo>d6>c^QYdasXYiUqZCid*aROBM(} z@A0D}qB{8!=*UI{`=c3w_h)7VB?miG8zVzD7E)m&3yVMZFG!2|V?&?7dDC?YShPzJ z{`g_y(FJE%1qiS$`i_2 zFItq)&EP_mw#9rgyfnbcj^aku6m=v$$&Mts>_;mQP9hL>_^k;x8}teudU|#DQ95Xj z9OvSL`$T1R30r@dcx(^X6Vu=+UOn5Ymk>l!z-%}rE4R-8WAbVAwT%|UQa~)pgyWeB zsTkZG84f$mG_G_(q+h|o5E`LSIX?`W8LOTYee^vql3ph)4&k9!Dr)Z!&)Hn1*znf< zrwt1RsR^ibv=cV_G$PRGa|FfjLe?)Sl3Cd^B~n&AeCG_e9=P@C>7MUYf~C8HZkVUI zsqp~BtD*y}PJ_O;JY^78A0ES?gQXPY3!KzBN*+L%P3U(%-RW5=Gj9BJ7=X{DOy?DF z*kWkV2{sfvTbW572EO63dVUk5MrW01B_4WWTs|jzOu+BynVsp>rQP6?)P&W2sF1t< zpi2)D+mLFyxgtM{Gl#Q?^Lf%p>bT}$eYUI0LGnk(KGD8B#*t3}%{F2CSlcABy-Nft z@lCFZF%T>=`X9a!wzdu-`M!c(k5+?N844HhHXg zQ2HoETY`qvo8$<6?*NQLk|3fF-A4RPzS*6P>Zf^sY%#{xIE>2-Y=>hE^dJBJ78`=U z=A9CH_9nuAwQ7x1kdZ?ZMDpZ?n{MSGq9LSv9r^_F!&?wWcVKhOwrXgzMI2PQT;oa~ z4P$pUUf#RcFy2p?k;sPH2tP#LDZ+i%mg4E6byWL8TjgjV#hM?)LXT7ANe!b74-8{y z;G2(}1dog(mY_TCv<9mN!5uX;a$wjRmGOAdD;Hx~FrS(Z#lXF6l{Yu0X68m~EIWUu z4Q%Ax+92Ik^`7Y2xu1lcetVr*ZpM6Y)`6rSvB~qZo#m28%3N{9Ot@S_tX#O=+8Zmi z?;{u@LZA$w?(uZ!`$Gm}k>6N`8HDI@(?ZxjET|ZMNL^lBw9FDuLB5^fR{lP^l$u&4 z6byUbyxxVjJE!ioU(@4J1*uQ7nz4B_Hw7y~`0YErvzEC#8&q&lm5FQ-^1$m)xMdk?GwZ?Ei6`{;M+HM>dm@cFXHX$oVr zTF~=@HJ&(!VY}cto^!N3N?EMuhGXz7uG$dZYdVRwhokfoOlNr>Es@+0dq3k_47RD4A}ldy|tPBT&=7j1LY3k^lQ zGfbH*S(g!VF+@X*D%_dlbsYW=R>lSI5%bVqnAwk`)3wf>i}g9s!Hqo2FOH3xOWIFi z53D{8e_iAR1Fk30kwC|fDK@J#mS=+QG|sn=j?EOC)l*!pXFtgnk)oyNCL%Zbs&YBK zxU+mu#%5T-x5_M{Yin4&--t^8T6yyr zC3_V9vKLVEPYbM{-_Mo3lDK@*Sx_OaB8?iz;CBdO%qW_ePw7wNuv{=lgAwKaM@O@u zDr#F>z{8Pvok$|meq2+@>R7BY!>*oK1`RwMGO?{YM~$(SeJ9>O z9x+dlJHzN4 zr1HjXHk^IxyxR11u<79N352qX2SbN0)OzEBK^f2_S=9N#M7<<$kyzI1K)`JyeGmT; zM0@2n>0v~#`o$}euAY@*sAnbhEf#bsSi4;k%}I0d^`Q%=QQx|M7_$v%+mA5&ASLXd(Td5NYvcaj62 zr5Z%WKc=vB?oIA37wLNVuFq>|d1BXQye+z|7(Zpd(WVU-<8ZGizQCI)Oxv02G< zu`@3lXestXlv`)$=ros-<^|Ty)+o`X%_o9V1E))5o5Lq~3W$BUS(X{^V~7ibR+&_z zo3pmr!k&mf&AF#*^?p}!rce(-3rBHnd>84QWrl{N&7UTOujOqAfjfL!wWLo`7L=SY z^aZp+6X!uZb6SA|WY&!VTKr)BW>gL)f^zb!)f_pxgz-{DW%i_r&NAbhuFWPPq9~4$ zE3G%YOc@2HLeO-I?K*!VBB!sy{x@sXd;m+q0vOW z?{!~CALj!y1q^LSu9ouvSCfw1K&} zXMR%*?Z5yH?k(No=tG^NK!j83p6qZe_l{Id?>m+1! zf@)uc4JLSZETCA-5t7>R>al+s3y;tH$89FNX@=25>6n=Zd}d~*A#q>_gFS;vPx*}5 z(wPr)!gOdxKw7Dms)_N1)up72JbK`gvXo3_W}mk|8jS#Fq0Ky_^ffYLY@sM!=U>#TQkTEand|S3t{$_jv))Pz<$mR-t~K2ahUTVDN{V| zjd10KcgojOp={TUgxQu0ympHuCs%Qr>e~YR3&Mrts!K*?MBy2Gr}dp+d-hgmI=fvD zF0Bs}HqTV9aq0v8T+$5ceZ+Uk#a+O3jL(4v8Xdd19;i(#1UL967t<~fO&tfPV2G21 zljm|4XU1cfu;%rC&UtP} ztb_s|?JUZxP@K|`az2OyVgC>ePHwGct!Ur6$v3{B;3<+k_w*aYn{{2Du`g^Cbbf+l z$SZj16)cM}ELx%IznKE`4U7<-f#0GQK4q@kRIT|~u}$)D!^NvgcVqYi;exR^k4Qsy zg4h!N0YmM;y)jQVD)39T*od4o+$;)o7Qa4W!CVPUIl9U1>yq!rqZPRB0DTaq&V|AOuR)1RCo^OU?5@gt1w#jw!Z$MSZ@~3#i(MDFNm8x>ZNswbhW$+F*%<7%HT!u_3_fmLT%o>Y zbfO`B#{8@bNw{C=4mheP>NH$e^8MM&ekA%hZa6XFqr1ZS>r%G35w${9uH*OQxUy?z zHf-o~H%tp>97a2xVjYBD1;7zFPT)^3a+U-&zE&F)6tX*)o2W3#oh;{`Gp79NfuJ^t3OLjAEJ zUh7N5$ctTnIQo2Ew3V~1$a&a`8a;24&1#0Q;;|r3Z1}|ZF0P#pUwI5qmth1BxrLU! zSasRpm0U!*9+TDz!~ur8inaYup=GENT$2Rp)RPP2S>kZvEJCxavI`PqMA`^bd$8^fz<11WZsB1N0E$s1U4c~_aN{XB_tj) z7MXOQ^QzG>ROBbbsgPw+x$!aH_%_FBKCBMHGW$4M|2i>VFm<>p0!P+3EI_2Pz?io6 zL#=Cf36n9(Wkr%Y9WE~zm@&0qTDb7NyZAKrer6iWVj8rcU<-3si_FVujdis0u3dse z^X@4-vs6mtQiO7n3`VIsdi7EpRx=hGvXq_9q{3YdP27ACgfg<~w;htD;v%zq-sW>q*%_@kD~N%;|aZOnHh;48-QB_s9Z0(Xnc~4rZWq?*irZ4iS(Td8~7*x@cjRJweno^|G`2d0qj8tE$-ui;j7H~e)i z^jc}}QK5@fzt{bhvj!U~!PQKkx)bj&y{;dRJE__CdQ=%QO>t>8JtpjCE!xP}4aG=h zh@GpIY7w4`H-9JW0dB!Vil+;L4>uHK%yCd2%L_O8v${Yb)~(iVDmiS!ykIB3f*N42 zTdl9Oel|u7SGT7$O6cQ6bWQt$Ph{(4H+{m_nw!AP_hyB>bCn%Qy^0DZSWF6rrWrEe zVRF{k<2@*i7tJ`GqV@}m2*_6GUGk|4~;%do~bR1$UfX*HY z%SwJDCr|MK2e-Nx0eWw1H@q8&=rUJo5Xt+kuy1e6HSbVq2g}xcnY;RxCyIhZaerZ*fGtOq+ABn=KR-qp*z za)T7#qf>N=q_giMpMkU9z>x-d@VB?d96D>bkHd| z_I(Ks{IhTg57`TPCoV-fftXm==S|Tg^AcB(H=iUtP%rM?*O~XPlr>Q+$SvYF5yah6 zz_d)A@twJr*u0Xm^G6R`RwJ2A+4IT<;~%+B;C1risl0$gm{J1u85NV*Qd4L+K}dBa z$cu`7$`t^1OvGsR=JfLBkCp!vjgCYMQ2!D5V@qVvJ7^$iAW%?HAS+_VpZ7rq0ix$0 z2Q&!qk8T|@qRN7_lCom-*WI7z$=g4=8O}2=0{nAP;1}AhANySF{Od<$1Z5?~M3t23 zWW>Ha!!iJORJ}1h1LC&_um>#etuwR&zZ3pzzgIa zPE-hp!glLK;Gegb{m$mGf)3V}rUr^ekB#h%tPG5#P4(^c>|Fmde}BzzKzq^e05FRF zi!lwT?I#&b-eS#5tw_iZ4unYpC*9*k3UINv9i~DY$RcA@z>qcudTMF)70b! zpg4gohFNam{E&YZ_icd|-`d+>jo$^32m z37KMx&;ZP=|6-aF|2C85+sugilDukQfEAE^J?x-?Kl$Du_{wnq@tLpw_}j#}>PygP zfCs?<(dG6yU}pMR;|Kk;*c2uthEVPOX6SN6UXu--6N^=O7@&AaJwt_Mo2Q|9QyY+WQm! zZNWlp2L^Njp&|e-;0a$GB%(jf{5B$LI-LEp?lJ((ZSP(IZfN|-E0l~Z^=wS6?fz-@ zm+EJliv?;T_0vN)8Yu~mf_D?ee z_3a((^b8z;qMNy%zJ-yTzL}AM!?(e*n>qb{0MrPW_jvxM0-wxue;WM9Sp8nU$rBk15@r)q5hj4O-ptgq%A z2QPvE4ORow#_d@*(d^fl|BLO;uE#`q0H?VF*iYaKeS(0*+58&yN1o!%8s;Pb(Cz^= zU{e3$;J5o}+K)gGv$3Rv01P`I-sfI=e;+&%zlIaBb_C`XA=f|Om977L|F&|E)GD68 z1(4(b{dR2hj{F_^kEu4GY8C8&mqI{;fB-4(7ss2ZU!#gzIoP@0ZX)=#qSGB_w!49> zum%tBPsA#(QA&56zirH8U91On0FMCR-j)>P`LA<-L`;-(D6avc!7*UY+wMbK@M}b1 z+W#XUd|N_qJ*x!+a1fAvAo>8si7yV-m%qmQyFcyMLFe9L*6uz)cnfIrc628z`!&{| z$(fMrpYK18v&|JrnY#eJ7NGxev*a&XaF@c>UWiVlWtV}8R-Wg#s1Lk ze_qIdc>KN6%6C&`AmjUwSAAc(_PX_opBMFtKmZo}5AKh5@7iBuDj6AD8hvYS`Fxj+ z-!W$F8aO>EfV-{L$MwI6v#~IB_|Y6Xnj0ZF1;mH(|GJMg-tcQkWjj45BRl*5JS6vZ zkf@|uKea;w0XZl7TBRgyzX~)m5;FZxLTy^gnye34Py&d#x8p6?`(MX>KL&ci73^;S z<|_f>yZ9}Sc|Z1Ryl=FO{#vJRp2{kIz8Ql8KyOcDtW&=Mwf}yucLEXP8Uj>l?1O+x z(Je8DW`BwJ?b$11aN$!9;GGjdCPIG;(+uQa|1{=5kLxcU*Zf**j+)NURDh`_0L?Mp zJ~`1ppZu@yT7PfhsATCB)CHWr^6uBg1Ic?obNas@Ec#ji5P6!u1i-gd0LtzAI1a!P z=byCmw*a>v2KbsH_r4p=9H4xb<#q;POZhXDZ#{VMHB0sy^hgd01SAYtj^4Jn1ocm_ zzU@phB05p)fSp4CVI*(a`;h6MPyVC6wWL21XSQ&#O#I@TAZnNfZg1?_tYb9C;p8<*< z1{8gJN)ZB*``?NvKT~3`dmwLc3S34na2cw%9InOSU%rg*E`v*lj@Le`q93>h;0E~@ z2cOZud=1}U7gvks({2OkB;X=$&#m>3|CNjQ@!Wdt>Lk4Z;9~&%?RZCJ@-N}PtN$nI z_Rlu~b{xRIJ+nrc{!8rd>%DkK9Wnp_8v?=pc1m|<_M7nU7AF4q?s11x^j&}}0dQ|C z{Mh1mx&Kt5=pdF;CxZY1Awu}CY{Kd{+5b?gXdj3^^#w{5KEz*Z8{P4@$v>!BFvsFt z0)7k<1jP5-u^ln+U*LW}%)XGQqBsIf{P_zDw{zBApuG57VfKHhVk|4Ot1<#28vtvT z+q0=b*uO#lK{?~gy`qG^2&o_-h>^dC{`OMz%biGWuiRU@euDQ81&%Lwl;FK3)#$)a zQ2s6ye7_X&<(374w@`M#oc%w0&flmX*Cf7tcK&t};xYP5xc{X%@#T| ex + set_state(false, nil, ex) + end + elsif incomplete? + raise IllegalOperationError, 'Recursive call to #value during evaluation of the Delay' + end + end + if @do_nothing_on_deref + @value + else + apply_deref_options(@value) + end + end + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. If the delayed operation + # raised an exception, this method will raise that exception (even when) + # the operation has already been executed). + # + # @param [Numeric] timeout the maximum number of seconds to wait + # @return [Object] the current value of the object + # @raise [Exception] when `#rejected?` raises `#reason` + # + # @!macro delay_note_regarding_blocking + def value!(timeout = nil) + if @executor + super + else + result = value + raise @reason if @reason + result + end + end + + # Return the value this object represents after applying the options + # specified by the `#set_deref_options` method. + # + # @param [Integer] timeout (nil) the maximum number of seconds to wait for + # the value to be computed. When `nil` the caller will block indefinitely. + # + # @return [Object] self + # + # @!macro delay_note_regarding_blocking + def wait(timeout = nil) + if @executor + execute_task_once + super(timeout) + else + value + end + self + end + + # Reconfigures the block returning the value if still `#incomplete?` + # + # @yield the delayed operation to perform + # @return [true, false] if success + def reconfigure(&block) + synchronize do + raise ArgumentError.new('no block given') unless block_given? + unless @evaluation_started + @task = block + true + else + false + end + end + end + + protected + + def ns_initialize(opts, &block) + init_obligation + set_deref_options(opts) + @executor = opts[:executor] + + @task = block + @state = :pending + @evaluation_started = false + end + + private + + # @!visibility private + def execute_task_once # :nodoc: + # this function has been optimized for performance and + # should not be modified without running new benchmarks + execute = task = nil + synchronize do + execute = @evaluation_started = true unless @evaluation_started + task = @task + end + + if execute + executor = Options.executor_from_options(executor: @executor) + executor.post do + begin + result = task.call + success = true + rescue => ex + reason = ex + end + synchronize do + set_state(success, result, reason) + event.set + end + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/errors.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/errors.rb new file mode 100644 index 0000000000..b69fec01f2 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/errors.rb @@ -0,0 +1,69 @@ +module Concurrent + + Error = Class.new(StandardError) + + # Raised when errors occur during configuration. + ConfigurationError = Class.new(Error) + + # Raised when an asynchronous operation is cancelled before execution. + CancelledOperationError = Class.new(Error) + + # Raised when a lifecycle method (such as `stop`) is called in an improper + # sequence or when the object is in an inappropriate state. + LifecycleError = Class.new(Error) + + # Raised when an attempt is made to violate an immutability guarantee. + ImmutabilityError = Class.new(Error) + + # Raised when an operation is attempted which is not legal given the + # receiver's current state + IllegalOperationError = Class.new(Error) + + # Raised when an object's methods are called when it has not been + # properly initialized. + InitializationError = Class.new(Error) + + # Raised when an object with a start/stop lifecycle has been started an + # excessive number of times. Often used in conjunction with a restart + # policy or strategy. + MaxRestartFrequencyError = Class.new(Error) + + # Raised when an attempt is made to modify an immutable object + # (such as an `IVar`) after its final state has been set. + class MultipleAssignmentError < Error + attr_reader :inspection_data + + def initialize(message = nil, inspection_data = nil) + @inspection_data = inspection_data + super message + end + + def inspect + format '%s %s>', super[0..-2], @inspection_data.inspect + end + end + + # Raised by an `Executor` when it is unable to process a given task, + # possibly because of a reject policy or other internal error. + RejectedExecutionError = Class.new(Error) + + # Raised when any finite resource, such as a lock counter, exceeds its + # maximum limit/threshold. + ResourceLimitError = Class.new(Error) + + # Raised when an operation times out. + TimeoutError = Class.new(Error) + + # Aggregates multiple exceptions. + class MultipleErrors < Error + attr_reader :errors + + def initialize(errors, message = "#{errors.size} errors") + @errors = errors + super [*message, + *errors.map { |e| [format('%s (%s)', e.message, e.class), *e.backtrace] }.flatten(1) + ].join("\n") + end + end + +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/exchanger.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/exchanger.rb new file mode 100644 index 0000000000..5a99550b33 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/exchanger.rb @@ -0,0 +1,352 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/maybe' +require 'concurrent/atomic/atomic_reference' +require 'concurrent/atomic/count_down_latch' +require 'concurrent/utility/engine' +require 'concurrent/utility/monotonic_time' + +module Concurrent + + # @!macro exchanger + # + # A synchronization point at which threads can pair and swap elements within + # pairs. Each thread presents some object on entry to the exchange method, + # matches with a partner thread, and receives its partner's object on return. + # + # @!macro thread_safe_variable_comparison + # + # This implementation is very simple, using only a single slot for each + # exchanger (unlike more advanced implementations which use an "arena"). + # This approach will work perfectly fine when there are only a few threads + # accessing a single `Exchanger`. Beyond a handful of threads the performance + # will degrade rapidly due to contention on the single slot, but the algorithm + # will remain correct. + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html java.util.concurrent.Exchanger + # @example + # + # exchanger = Concurrent::Exchanger.new + # + # threads = [ + # Thread.new { puts "first: " << exchanger.exchange('foo', 1) }, #=> "first: bar" + # Thread.new { puts "second: " << exchanger.exchange('bar', 1) } #=> "second: foo" + # ] + # threads.each {|t| t.join(2) } + + # @!visibility private + class AbstractExchanger < Synchronization::Object + + # @!visibility private + CANCEL = ::Object.new + private_constant :CANCEL + + def initialize + super + end + + # @!macro exchanger_method_do_exchange + # + # Waits for another thread to arrive at this exchange point (unless the + # current thread is interrupted), and then transfers the given object to + # it, receiving its object in return. The timeout value indicates the + # approximate number of seconds the method should block while waiting + # for the exchange. When the timeout value is `nil` the method will + # block indefinitely. + # + # @param [Object] value the value to exchange with another thread + # @param [Numeric, nil] timeout in seconds, `nil` blocks indefinitely + # + # @!macro exchanger_method_exchange + # + # In some edge cases when a `timeout` is given a return value of `nil` may be + # ambiguous. Specifically, if `nil` is a valid value in the exchange it will + # be impossible to tell whether `nil` is the actual return value or if it + # signifies timeout. When `nil` is a valid value in the exchange consider + # using {#exchange!} or {#try_exchange} instead. + # + # @return [Object] the value exchanged by the other thread or `nil` on timeout + def exchange(value, timeout = nil) + (value = do_exchange(value, timeout)) == CANCEL ? nil : value + end + + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange_bang + # + # On timeout a {Concurrent::TimeoutError} exception will be raised. + # + # @return [Object] the value exchanged by the other thread + # @raise [Concurrent::TimeoutError] on timeout + def exchange!(value, timeout = nil) + if (value = do_exchange(value, timeout)) == CANCEL + raise Concurrent::TimeoutError + else + value + end + end + + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_try_exchange + # + # The return value will be a {Concurrent::Maybe} set to `Just` on success or + # `Nothing` on timeout. + # + # @return [Concurrent::Maybe] on success a `Just` maybe will be returned with + # the item exchanged by the other thread as `#value`; on timeout a + # `Nothing` maybe will be returned with {Concurrent::TimeoutError} as `#reason` + # + # @example + # + # exchanger = Concurrent::Exchanger.new + # + # result = exchanger.exchange(:foo, 0.5) + # + # if result.just? + # puts result.value #=> :bar + # else + # puts 'timeout' + # end + def try_exchange(value, timeout = nil) + if (value = do_exchange(value, timeout)) == CANCEL + Concurrent::Maybe.nothing(Concurrent::TimeoutError) + else + Concurrent::Maybe.just(value) + end + end + + private + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + raise NotImplementedError + end + end + + # @!macro internal_implementation_note + # @!visibility private + class RubyExchanger < AbstractExchanger + # A simplified version of java.util.concurrent.Exchanger written by + # Doug Lea, Bill Scherer, and Michael Scott with assistance from members + # of JCP JSR-166 Expert Group and released to the public domain. It does + # not include the arena or the multi-processor spin loops. + # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/6-b14/java/util/concurrent/Exchanger.java + + safe_initialization! + + class Node < Concurrent::Synchronization::Object + attr_atomic :value + safe_initialization! + + def initialize(item) + super() + @Item = item + @Latch = Concurrent::CountDownLatch.new + self.value = nil + end + + def latch + @Latch + end + + def item + @Item + end + end + private_constant :Node + + def initialize + super + end + + private + + attr_atomic(:slot) + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + + # ALGORITHM + # + # From the original Java version: + # + # > The basic idea is to maintain a "slot", which is a reference to + # > a Node containing both an Item to offer and a "hole" waiting to + # > get filled in. If an incoming "occupying" thread sees that the + # > slot is null, it CAS'es (compareAndSets) a Node there and waits + # > for another to invoke exchange. That second "fulfilling" thread + # > sees that the slot is non-null, and so CASes it back to null, + # > also exchanging items by CASing the hole, plus waking up the + # > occupying thread if it is blocked. In each case CAS'es may + # > fail because a slot at first appears non-null but is null upon + # > CAS, or vice-versa. So threads may need to retry these + # > actions. + # + # This version: + # + # An exchange occurs between an "occupier" thread and a "fulfiller" thread. + # The "slot" is used to setup this interaction. The first thread in the + # exchange puts itself into the slot (occupies) and waits for a fulfiller. + # The second thread removes the occupier from the slot and attempts to + # perform the exchange. Removing the occupier also frees the slot for + # another occupier/fulfiller pair. + # + # Because the occupier and the fulfiller are operating independently and + # because there may be contention with other threads, any failed operation + # indicates contention. Both the occupier and the fulfiller operate within + # spin loops. Any failed actions along the happy path will cause the thread + # to repeat the loop and try again. + # + # When a timeout value is given the thread must be cognizant of time spent + # in the spin loop. The remaining time is checked every loop. When the time + # runs out the thread will exit. + # + # A "node" is the data structure used to perform the exchange. Only the + # occupier's node is necessary. It's the node used for the exchange. + # Each node has an "item," a "hole" (self), and a "latch." The item is the + # node's initial value. It never changes. It's what the fulfiller returns on + # success. The occupier's hole is where the fulfiller put its item. It's the + # item that the occupier returns on success. The latch is used for synchronization. + # Because a thread may act as either an occupier or fulfiller (or possibly + # both in periods of high contention) every thread creates a node when + # the exchange method is first called. + # + # The following steps occur within the spin loop. If any actions fail + # the thread will loop and try again, so long as there is time remaining. + # If time runs out the thread will return CANCEL. + # + # Check the slot for an occupier: + # + # * If the slot is empty try to occupy + # * If the slot is full try to fulfill + # + # Attempt to occupy: + # + # * Attempt to CAS myself into the slot + # * Go to sleep and wait to be woken by a fulfiller + # * If the sleep is successful then the fulfiller completed its happy path + # - Return the value from my hole (the value given by the fulfiller) + # * When the sleep fails (time ran out) attempt to cancel the operation + # - Attempt to CAS myself out of the hole + # - If successful there is no contention + # - Return CANCEL + # - On failure, I am competing with a fulfiller + # - Attempt to CAS my hole to CANCEL + # - On success + # - Let the fulfiller deal with my cancel + # - Return CANCEL + # - On failure the fulfiller has completed its happy path + # - Return th value from my hole (the fulfiller's value) + # + # Attempt to fulfill: + # + # * Attempt to CAS the occupier out of the slot + # - On failure loop again + # * Attempt to CAS my item into the occupier's hole + # - On failure the occupier is trying to cancel + # - Loop again + # - On success we are on the happy path + # - Wake the sleeping occupier + # - Return the occupier's item + + value = NULL if value.nil? # The sentinel allows nil to be a valid value + me = Node.new(value) # create my node in case I need to occupy + end_at = Concurrent.monotonic_time + timeout.to_f # The time to give up + + result = loop do + other = slot + if other && compare_and_set_slot(other, nil) + # try to fulfill + if other.compare_and_set_value(nil, value) + # happy path + other.latch.count_down + break other.item + end + elsif other.nil? && compare_and_set_slot(nil, me) + # try to occupy + timeout = end_at - Concurrent.monotonic_time if timeout + if me.latch.wait(timeout) + # happy path + break me.value + else + # attempt to remove myself from the slot + if compare_and_set_slot(me, nil) + break CANCEL + elsif !me.compare_and_set_value(nil, CANCEL) + # I've failed to block the fulfiller + break me.value + end + end + end + break CANCEL if timeout && Concurrent.monotonic_time >= end_at + end + + result == NULL ? nil : result + end + end + + if Concurrent.on_jruby? + + # @!macro internal_implementation_note + # @!visibility private + class JavaExchanger < AbstractExchanger + + def initialize + @exchanger = java.util.concurrent.Exchanger.new + end + + private + + # @!macro exchanger_method_do_exchange + # + # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout + def do_exchange(value, timeout) + result = nil + if timeout.nil? + Synchronization::JRuby.sleep_interruptibly do + result = @exchanger.exchange(value) + end + else + Synchronization::JRuby.sleep_interruptibly do + result = @exchanger.exchange(value, 1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + result + rescue java.util.concurrent.TimeoutException + CANCEL + end + end + end + + # @!visibility private + # @!macro internal_implementation_note + ExchangerImplementation = case + when Concurrent.on_jruby? + JavaExchanger + else + RubyExchanger + end + private_constant :ExchangerImplementation + + # @!macro exchanger + class Exchanger < ExchangerImplementation + + # @!method initialize + # Creates exchanger instance + + # @!method exchange(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange + + # @!method exchange!(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_exchange_bang + + # @!method try_exchange(value, timeout = nil) + # @!macro exchanger_method_do_exchange + # @!macro exchanger_method_try_exchange + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/abstract_executor_service.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/abstract_executor_service.rb new file mode 100644 index 0000000000..80ff953adb --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/abstract_executor_service.rb @@ -0,0 +1,134 @@ +require 'concurrent/errors' +require 'concurrent/executor/executor_service' +require 'concurrent/synchronization' +require 'concurrent/utility/at_exit' + +module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class AbstractExecutorService < Synchronization::LockableObject + include ExecutorService + + # The set of possible fallback policies that may be set at thread pool creation. + FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze + + # @!macro executor_service_attr_reader_fallback_policy + attr_reader :fallback_policy + + # Create a new thread pool. + def initialize(*args, &block) + super(&nil) + synchronize { ns_initialize(*args, &block) } + end + + # @!macro executor_service_method_shutdown + def shutdown + raise NotImplementedError + end + + # @!macro executor_service_method_kill + def kill + raise NotImplementedError + end + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + raise NotImplementedError + end + + # @!macro executor_service_method_running_question + def running? + synchronize { ns_running? } + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + synchronize { ns_shuttingdown? } + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + synchronize { ns_shutdown? } + end + + # @!macro executor_service_method_auto_terminate_question + def auto_terminate? + synchronize { ns_auto_terminate? } + end + + # @!macro executor_service_method_auto_terminate_setter + def auto_terminate=(value) + synchronize { self.ns_auto_terminate = value } + end + + private + + # Handler which executes the `fallback_policy` once the queue size + # reaches `max_queue`. + # + # @param [Array] args the arguments to the task which is being handled. + # + # @!visibility private + def handle_fallback(*args) + case fallback_policy + when :abort + raise RejectedExecutionError + when :discard + false + when :caller_runs + begin + yield(*args) + rescue => ex + # let it fail + log DEBUG, ex + end + true + else + fail "Unknown fallback policy #{fallback_policy}" + end + end + + def ns_execute(*args, &task) + raise NotImplementedError + end + + # @!macro executor_service_method_ns_shutdown_execution + # + # Callback method called when an orderly shutdown has completed. + # The default behavior is to signal all waiting threads. + def ns_shutdown_execution + # do nothing + end + + # @!macro executor_service_method_ns_kill_execution + # + # Callback method called when the executor has been killed. + # The default behavior is to do nothing. + def ns_kill_execution + # do nothing + end + + def ns_auto_terminate? + !!@auto_terminate + end + + def ns_auto_terminate=(value) + case value + when true + AtExit.add(self) { terminate_at_exit } + @auto_terminate = true + when false + AtExit.delete(self) + @auto_terminate = false + else + raise ArgumentError + end + end + + def terminate_at_exit + kill # TODO be gentle first + wait_for_termination(10) + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/cached_thread_pool.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/cached_thread_pool.rb new file mode 100644 index 0000000000..1c7c18da65 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/cached_thread_pool.rb @@ -0,0 +1,62 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/thread_pool_executor' + +module Concurrent + + # A thread pool that dynamically grows and shrinks to fit the current workload. + # New threads are created as needed, existing threads are reused, and threads + # that remain idle for too long are killed and removed from the pool. These + # pools are particularly suited to applications that perform a high volume of + # short-lived tasks. + # + # On creation a `CachedThreadPool` has zero running threads. New threads are + # created on the pool as new operations are `#post`. The size of the pool + # will grow until `#max_length` threads are in the pool or until the number + # of threads exceeds the number of running and pending operations. When a new + # operation is post to the pool the first available idle thread will be tasked + # with the new operation. + # + # Should a thread crash for any reason the thread will immediately be removed + # from the pool. Similarly, threads which remain idle for an extended period + # of time will be killed and reclaimed. Thus these thread pools are very + # efficient at reclaiming unused resources. + # + # The API and behavior of this class are based on Java's `CachedThreadPool` + # + # @!macro thread_pool_options + class CachedThreadPool < ThreadPoolExecutor + + # @!macro cached_thread_pool_method_initialize + # + # Create a new thread pool. + # + # @param [Hash] opts the options defining pool behavior. + # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy + # + # @raise [ArgumentError] if `fallback_policy` is not a known policy + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool-- + def initialize(opts = {}) + defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT } + overrides = { min_threads: 0, + max_threads: DEFAULT_MAX_POOL_SIZE, + max_queue: DEFAULT_MAX_QUEUE_SIZE } + super(defaults.merge(opts).merge(overrides)) + end + + private + + # @!macro cached_thread_pool_method_initialize + # @!visibility private + def ns_initialize(opts) + super(opts) + if Concurrent.on_jruby? + @max_queue = 0 + @executor = java.util.concurrent.Executors.newCachedThreadPool + @executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new) + @executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS) + self.auto_terminate = opts.fetch(:auto_terminate, true) + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/executor_service.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/executor_service.rb new file mode 100644 index 0000000000..0fcbeeeb20 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/executor_service.rb @@ -0,0 +1,185 @@ +require 'concurrent/concern/logging' + +module Concurrent + + ################################################################### + + # @!macro executor_service_method_post + # + # Submit a task to the executor for asynchronous processing. + # + # @param [Array] args zero or more arguments to be passed to the task + # + # @yield the asynchronous task to perform + # + # @return [Boolean] `true` if the task is queued, `false` if the executor + # is not running + # + # @raise [ArgumentError] if no task is given + + # @!macro executor_service_method_left_shift + # + # Submit a task to the executor for asynchronous processing. + # + # @param [Proc] task the asynchronous task to perform + # + # @return [self] returns itself + + # @!macro executor_service_method_can_overflow_question + # + # Does the task queue have a maximum size? + # + # @return [Boolean] True if the task queue has a maximum size else false. + + # @!macro executor_service_method_serialized_question + # + # Does this executor guarantee serialization of its operations? + # + # @return [Boolean] True if the executor guarantees that all operations + # will be post in the order they are received and no two operations may + # occur simultaneously. Else false. + + ################################################################### + + # @!macro executor_service_public_api + # + # @!method post(*args, &task) + # @!macro executor_service_method_post + # + # @!method <<(task) + # @!macro executor_service_method_left_shift + # + # @!method can_overflow? + # @!macro executor_service_method_can_overflow_question + # + # @!method serialized? + # @!macro executor_service_method_serialized_question + + ################################################################### + + # @!macro executor_service_attr_reader_fallback_policy + # @return [Symbol] The fallback policy in effect. Either `:abort`, `:discard`, or `:caller_runs`. + + # @!macro executor_service_method_shutdown + # + # Begin an orderly shutdown. Tasks already in the queue will be executed, + # but no new tasks will be accepted. Has no additional effect if the + # thread pool is not running. + + # @!macro executor_service_method_kill + # + # Begin an immediate shutdown. In-progress tasks will be allowed to + # complete but enqueued tasks will be dismissed and no new tasks + # will be accepted. Has no additional effect if the thread pool is + # not running. + + # @!macro executor_service_method_wait_for_termination + # + # Block until executor shutdown is complete or until `timeout` seconds have + # passed. + # + # @note Does not initiate shutdown or termination. Either `shutdown` or `kill` + # must be called before this method (or on another thread). + # + # @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete + # + # @return [Boolean] `true` if shutdown complete or false on `timeout` + + # @!macro executor_service_method_running_question + # + # Is the executor running? + # + # @return [Boolean] `true` when running, `false` when shutting down or shutdown + + # @!macro executor_service_method_shuttingdown_question + # + # Is the executor shuttingdown? + # + # @return [Boolean] `true` when not running and not shutdown, else `false` + + # @!macro executor_service_method_shutdown_question + # + # Is the executor shutdown? + # + # @return [Boolean] `true` when shutdown, `false` when shutting down or running + + # @!macro executor_service_method_auto_terminate_question + # + # Is the executor auto-terminate when the application exits? + # + # @return [Boolean] `true` when auto-termination is enabled else `false`. + + # @!macro executor_service_method_auto_terminate_setter + # + # Set the auto-terminate behavior for this executor. + # + # @param [Boolean] value The new auto-terminate value to set for this executor. + # + # @return [Boolean] `true` when auto-termination is enabled else `false`. + + ################################################################### + + # @!macro abstract_executor_service_public_api + # + # @!macro executor_service_public_api + # + # @!attribute [r] fallback_policy + # @!macro executor_service_attr_reader_fallback_policy + # + # @!method shutdown + # @!macro executor_service_method_shutdown + # + # @!method kill + # @!macro executor_service_method_kill + # + # @!method wait_for_termination(timeout = nil) + # @!macro executor_service_method_wait_for_termination + # + # @!method running? + # @!macro executor_service_method_running_question + # + # @!method shuttingdown? + # @!macro executor_service_method_shuttingdown_question + # + # @!method shutdown? + # @!macro executor_service_method_shutdown_question + # + # @!method auto_terminate? + # @!macro executor_service_method_auto_terminate_question + # + # @!method auto_terminate=(value) + # @!macro executor_service_method_auto_terminate_setter + + ################################################################### + + # @!macro executor_service_public_api + # @!visibility private + module ExecutorService + include Concern::Logging + + # @!macro executor_service_method_post + def post(*args, &task) + raise NotImplementedError + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_can_overflow_question + # + # @note Always returns `false` + def can_overflow? + false + end + + # @!macro executor_service_method_serialized_question + # + # @note Always returns `false` + def serialized? + false + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/fixed_thread_pool.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/fixed_thread_pool.rb new file mode 100644 index 0000000000..c9e03dade7 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/fixed_thread_pool.rb @@ -0,0 +1,206 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/thread_pool_executor' + +module Concurrent + + # @!macro thread_pool_executor_constant_default_max_pool_size + # Default maximum number of threads that will be created in the pool. + + # @!macro thread_pool_executor_constant_default_min_pool_size + # Default minimum number of threads that will be retained in the pool. + + # @!macro thread_pool_executor_constant_default_max_queue_size + # Default maximum number of tasks that may be added to the task queue. + + # @!macro thread_pool_executor_constant_default_thread_timeout + # Default maximum number of seconds a thread in the pool may remain idle + # before being reclaimed. + + # @!macro thread_pool_executor_attr_reader_max_length + # The maximum number of threads that may be created in the pool. + # @return [Integer] The maximum number of threads that may be created in the pool. + + # @!macro thread_pool_executor_attr_reader_min_length + # The minimum number of threads that may be retained in the pool. + # @return [Integer] The minimum number of threads that may be retained in the pool. + + # @!macro thread_pool_executor_attr_reader_largest_length + # The largest number of threads that have been created in the pool since construction. + # @return [Integer] The largest number of threads that have been created in the pool since construction. + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + # The number of tasks that have been scheduled for execution on the pool since construction. + # @return [Integer] The number of tasks that have been scheduled for execution on the pool since construction. + + # @!macro thread_pool_executor_attr_reader_completed_task_count + # The number of tasks that have been completed by the pool since construction. + # @return [Integer] The number of tasks that have been completed by the pool since construction. + + # @!macro thread_pool_executor_attr_reader_idletime + # The number of seconds that a thread may be idle before being reclaimed. + # @return [Integer] The number of seconds that a thread may be idle before being reclaimed. + + # @!macro thread_pool_executor_attr_reader_max_queue + # The maximum number of tasks that may be waiting in the work queue at any one time. + # When the queue size reaches `max_queue` subsequent tasks will be rejected in + # accordance with the configured `fallback_policy`. + # + # @return [Integer] The maximum number of tasks that may be waiting in the work queue at any one time. + # When the queue size reaches `max_queue` subsequent tasks will be rejected in + # accordance with the configured `fallback_policy`. + + # @!macro thread_pool_executor_attr_reader_length + # The number of threads currently in the pool. + # @return [Integer] The number of threads currently in the pool. + + # @!macro thread_pool_executor_attr_reader_queue_length + # The number of tasks in the queue awaiting execution. + # @return [Integer] The number of tasks in the queue awaiting execution. + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + # Number of tasks that may be enqueued before reaching `max_queue` and rejecting + # new tasks. A value of -1 indicates that the queue may grow without bound. + # + # @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting + # new tasks. A value of -1 indicates that the queue may grow without bound. + + + + + + # @!macro thread_pool_executor_public_api + # + # @!macro abstract_executor_service_public_api + # + # @!attribute [r] max_length + # @!macro thread_pool_executor_attr_reader_max_length + # + # @!attribute [r] min_length + # @!macro thread_pool_executor_attr_reader_min_length + # + # @!attribute [r] largest_length + # @!macro thread_pool_executor_attr_reader_largest_length + # + # @!attribute [r] scheduled_task_count + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + # + # @!attribute [r] completed_task_count + # @!macro thread_pool_executor_attr_reader_completed_task_count + # + # @!attribute [r] idletime + # @!macro thread_pool_executor_attr_reader_idletime + # + # @!attribute [r] max_queue + # @!macro thread_pool_executor_attr_reader_max_queue + # + # @!attribute [r] length + # @!macro thread_pool_executor_attr_reader_length + # + # @!attribute [r] queue_length + # @!macro thread_pool_executor_attr_reader_queue_length + # + # @!attribute [r] remaining_capacity + # @!macro thread_pool_executor_attr_reader_remaining_capacity + # + # @!method can_overflow? + # @!macro executor_service_method_can_overflow_question + + + + + # @!macro thread_pool_options + # + # **Thread Pool Options** + # + # Thread pools support several configuration options: + # + # * `idletime`: The number of seconds that a thread may be idle before being reclaimed. + # * `max_queue`: The maximum number of tasks that may be waiting in the work queue at + # any one time. When the queue size reaches `max_queue` and no new threads can be created, + # subsequent tasks will be rejected in accordance with the configured `fallback_policy`. + # * `auto_terminate`: When true (default) an `at_exit` handler will be registered which + # will stop the thread pool when the application exits. See below for more information + # on shutting down thread pools. + # * `fallback_policy`: The policy defining how rejected tasks are handled. + # + # Three fallback policies are supported: + # + # * `:abort`: Raise a `RejectedExecutionError` exception and discard the task. + # * `:discard`: Discard the task and return false. + # * `:caller_runs`: Execute the task on the calling thread. + # + # **Shutting Down Thread Pools** + # + # Killing a thread pool while tasks are still being processed, either by calling + # the `#kill` method or at application exit, will have unpredictable results. There + # is no way for the thread pool to know what resources are being used by the + # in-progress tasks. When those tasks are killed the impact on those resources + # cannot be predicted. The *best* practice is to explicitly shutdown all thread + # pools using the provided methods: + # + # * Call `#shutdown` to initiate an orderly termination of all in-progress tasks + # * Call `#wait_for_termination` with an appropriate timeout interval an allow + # the orderly shutdown to complete + # * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time + # + # On some runtime platforms (most notably the JVM) the application will not + # exit until all thread pools have been shutdown. To prevent applications from + # "hanging" on exit all thread pools include an `at_exit` handler that will + # stop the thread pool when the application exits. This handler uses a brute + # force method to stop the pool and makes no guarantees regarding resources being + # used by any tasks still running. Registration of this `at_exit` handler can be + # prevented by setting the thread pool's constructor `:auto_terminate` option to + # `false` when the thread pool is created. All thread pools support this option. + # + # ```ruby + # pool1 = Concurrent::FixedThreadPool.new(5) # an `at_exit` handler will be registered + # pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # prevent `at_exit` handler registration + # ``` + # + # @note Failure to properly shutdown a thread pool can lead to unpredictable results. + # Please read *Shutting Down Thread Pools* for more information. + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface + # @see http://ruby-doc.org//core-2.2.0/Kernel.html#method-i-at_exit Kernel#at_exit + + + + + + # @!macro fixed_thread_pool + # + # A thread pool that reuses a fixed number of threads operating off an unbounded queue. + # At any point, at most `num_threads` will be active processing tasks. When all threads are busy new + # tasks `#post` to the thread pool are enqueued until a thread becomes available. + # Should a thread crash for any reason the thread will immediately be removed + # from the pool and replaced. + # + # The API and behavior of this class are based on Java's `FixedThreadPool` + # + # @!macro thread_pool_options + class FixedThreadPool < ThreadPoolExecutor + + # @!macro fixed_thread_pool_method_initialize + # + # Create a new thread pool. + # + # @param [Integer] num_threads the number of threads to allocate + # @param [Hash] opts the options defining pool behavior. + # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy + # + # @raise [ArgumentError] if `num_threads` is less than or equal to zero + # @raise [ArgumentError] if `fallback_policy` is not a known policy + # + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int- + def initialize(num_threads, opts = {}) + raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1 + defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE, + idletime: DEFAULT_THREAD_IDLETIMEOUT } + overrides = { min_threads: num_threads, + max_threads: num_threads } + super(defaults.merge(opts).merge(overrides)) + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/immediate_executor.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/immediate_executor.rb new file mode 100644 index 0000000000..282df7a059 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/immediate_executor.rb @@ -0,0 +1,66 @@ +require 'concurrent/atomic/event' +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/executor/serial_executor_service' + +module Concurrent + + # An executor service which runs all operations on the current thread, + # blocking as necessary. Operations are performed in the order they are + # received and no two operations can be performed simultaneously. + # + # This executor service exists mainly for testing an debugging. When used + # it immediately runs every `#post` operation on the current thread, blocking + # that thread until the operation is complete. This can be very beneficial + # during testing because it makes all operations deterministic. + # + # @note Intended for use primarily in testing and debugging. + class ImmediateExecutor < AbstractExecutorService + include SerialExecutorService + + # Creates a new executor + def initialize + @stopped = Concurrent::Event.new + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + task.call(*args) + true + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_running_question + def running? + ! shutdown? + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + false + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + @stopped.set? + end + + # @!macro executor_service_method_shutdown + def shutdown + @stopped.set + true + end + alias_method :kill, :shutdown + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + @stopped.wait(timeout) + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/indirect_immediate_executor.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/indirect_immediate_executor.rb new file mode 100644 index 0000000000..4f9769fa3f --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/indirect_immediate_executor.rb @@ -0,0 +1,44 @@ +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/simple_executor_service' + +module Concurrent + # An executor service which runs all operations on a new thread, blocking + # until it completes. Operations are performed in the order they are received + # and no two operations can be performed simultaneously. + # + # This executor service exists mainly for testing an debugging. When used it + # immediately runs every `#post` operation on a new thread, blocking the + # current thread until the operation is complete. This is similar to how the + # ImmediateExecutor works, but the operation has the full stack of the new + # thread at its disposal. This can be helpful when the operations will spawn + # more operations on the same executor and so on - such a situation might + # overflow the single stack in case of an ImmediateExecutor, which is + # inconsistent with how it would behave for a threaded executor. + # + # @note Intended for use primarily in testing and debugging. + class IndirectImmediateExecutor < ImmediateExecutor + # Creates a new executor + def initialize + super + @internal_executor = SimpleExecutorService.new + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new("no block given") unless block_given? + return false unless running? + + event = Concurrent::Event.new + @internal_executor.post do + begin + task.call(*args) + ensure + event.set + end + end + event.wait + + true + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_executor_service.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_executor_service.rb new file mode 100644 index 0000000000..5835d1f623 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_executor_service.rb @@ -0,0 +1,100 @@ +if Concurrent.on_jruby? + + require 'concurrent/errors' + require 'concurrent/utility/engine' + require 'concurrent/executor/abstract_executor_service' + + module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class JavaExecutorService < AbstractExecutorService + java_import 'java.lang.Runnable' + + FALLBACK_POLICY_CLASSES = { + abort: java.util.concurrent.ThreadPoolExecutor::AbortPolicy, + discard: java.util.concurrent.ThreadPoolExecutor::DiscardPolicy, + caller_runs: java.util.concurrent.ThreadPoolExecutor::CallerRunsPolicy + }.freeze + private_constant :FALLBACK_POLICY_CLASSES + + def initialize(*args, &block) + super + ns_make_executor_runnable + end + + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return handle_fallback(*args, &task) unless running? + @executor.submit_runnable Job.new(args, task) + true + rescue Java::JavaUtilConcurrent::RejectedExecutionException + raise RejectedExecutionError + end + + def wait_for_termination(timeout = nil) + if timeout.nil? + ok = @executor.awaitTermination(60, java.util.concurrent.TimeUnit::SECONDS) until ok + true + else + @executor.awaitTermination(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) + end + end + + def shutdown + synchronize do + self.ns_auto_terminate = false + @executor.shutdown + nil + end + end + + def kill + synchronize do + self.ns_auto_terminate = false + @executor.shutdownNow + nil + end + end + + private + + def ns_running? + !(ns_shuttingdown? || ns_shutdown?) + end + + def ns_shuttingdown? + if @executor.respond_to? :isTerminating + @executor.isTerminating + else + false + end + end + + def ns_shutdown? + @executor.isShutdown || @executor.isTerminated + end + + def ns_make_executor_runnable + if !defined?(@executor.submit_runnable) + @executor.class.class_eval do + java_alias :submit_runnable, :submit, [java.lang.Runnable.java_class] + end + end + end + + class Job + include Runnable + def initialize(args, block) + @args = args + @block = block + end + + def run + @block.call(*@args) + end + end + private_constant :Job + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_single_thread_executor.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_single_thread_executor.rb new file mode 100644 index 0000000000..1cf59b0659 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_single_thread_executor.rb @@ -0,0 +1,29 @@ +if Concurrent.on_jruby? + + require 'concurrent/executor/java_executor_service' + require 'concurrent/executor/serial_executor_service' + + module Concurrent + + # @!macro single_thread_executor + # @!macro abstract_executor_service_public_api + # @!visibility private + class JavaSingleThreadExecutor < JavaExecutorService + include SerialExecutorService + + # @!macro single_thread_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + private + + def ns_initialize(opts) + @executor = java.util.concurrent.Executors.newSingleThreadExecutor + @fallback_policy = opts.fetch(:fallback_policy, :discard) + raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy) + self.auto_terminate = opts.fetch(:auto_terminate, true) + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_thread_pool_executor.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_thread_pool_executor.rb new file mode 100644 index 0000000000..6308e4f4d7 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/java_thread_pool_executor.rb @@ -0,0 +1,123 @@ +if Concurrent.on_jruby? + + require 'concurrent/executor/java_executor_service' + + module Concurrent + + # @!macro thread_pool_executor + # @!macro thread_pool_options + # @!visibility private + class JavaThreadPoolExecutor < JavaExecutorService + + # @!macro thread_pool_executor_constant_default_max_pool_size + DEFAULT_MAX_POOL_SIZE = java.lang.Integer::MAX_VALUE # 2147483647 + + # @!macro thread_pool_executor_constant_default_min_pool_size + DEFAULT_MIN_POOL_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_max_queue_size + DEFAULT_MAX_QUEUE_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_thread_timeout + DEFAULT_THREAD_IDLETIMEOUT = 60 + + # @!macro thread_pool_executor_attr_reader_max_length + attr_reader :max_length + + # @!macro thread_pool_executor_attr_reader_max_queue + attr_reader :max_queue + + # @!macro thread_pool_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + # @!macro executor_service_method_can_overflow_question + def can_overflow? + @max_queue != 0 + end + + # @!macro thread_pool_executor_attr_reader_min_length + def min_length + @executor.getCorePoolSize + end + + # @!macro thread_pool_executor_attr_reader_max_length + def max_length + @executor.getMaximumPoolSize + end + + # @!macro thread_pool_executor_attr_reader_length + def length + @executor.getPoolSize + end + + # @!macro thread_pool_executor_attr_reader_largest_length + def largest_length + @executor.getLargestPoolSize + end + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + def scheduled_task_count + @executor.getTaskCount + end + + # @!macro thread_pool_executor_attr_reader_completed_task_count + def completed_task_count + @executor.getCompletedTaskCount + end + + # @!macro thread_pool_executor_attr_reader_idletime + def idletime + @executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS) + end + + # @!macro thread_pool_executor_attr_reader_queue_length + def queue_length + @executor.getQueue.size + end + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + def remaining_capacity + @max_queue == 0 ? -1 : @executor.getQueue.remainingCapacity + end + + # @!macro executor_service_method_running_question + def running? + super && !@executor.isTerminating + end + + private + + def ns_initialize(opts) + min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i + max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i + idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i + @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i + @fallback_policy = opts.fetch(:fallback_policy, :abort) + + raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if max_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if max_length > DEFAULT_MAX_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if min_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length + raise ArgumentError.new("#{fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.include?(@fallback_policy) + + if @max_queue == 0 + queue = java.util.concurrent.LinkedBlockingQueue.new + else + queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue) + end + + @executor = java.util.concurrent.ThreadPoolExecutor.new( + min_length, + max_length, + idletime, + java.util.concurrent.TimeUnit::SECONDS, + queue, + FALLBACK_POLICY_CLASSES[@fallback_policy].new) + + self.auto_terminate = opts.fetch(:auto_terminate, true) + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_executor_service.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_executor_service.rb new file mode 100644 index 0000000000..7b2ee73775 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_executor_service.rb @@ -0,0 +1,78 @@ +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/atomic/event' + +module Concurrent + + # @!macro abstract_executor_service_public_api + # @!visibility private + class RubyExecutorService < AbstractExecutorService + safe_initialization! + + def initialize(*args, &block) + super + @StopEvent = Event.new + @StoppedEvent = Event.new + end + + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + synchronize do + # If the executor is shut down, reject this task + return handle_fallback(*args, &task) unless running? + ns_execute(*args, &task) + true + end + end + + def shutdown + synchronize do + break unless running? + self.ns_auto_terminate = false + stop_event.set + ns_shutdown_execution + end + true + end + + def kill + synchronize do + break if shutdown? + self.ns_auto_terminate = false + stop_event.set + ns_kill_execution + stopped_event.set + end + true + end + + def wait_for_termination(timeout = nil) + stopped_event.wait(timeout) + end + + private + + def stop_event + @StopEvent + end + + def stopped_event + @StoppedEvent + end + + def ns_shutdown_execution + stopped_event.set + end + + def ns_running? + !stop_event.set? + end + + def ns_shuttingdown? + !(ns_running? || ns_shutdown?) + end + + def ns_shutdown? + stopped_event.set? + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_single_thread_executor.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_single_thread_executor.rb new file mode 100644 index 0000000000..305a49e621 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_single_thread_executor.rb @@ -0,0 +1,22 @@ +require 'concurrent/executor/ruby_thread_pool_executor' + +module Concurrent + + # @!macro single_thread_executor + # @!macro abstract_executor_service_public_api + # @!visibility private + class RubySingleThreadExecutor < RubyThreadPoolExecutor + + # @!macro single_thread_executor_method_initialize + def initialize(opts = {}) + super( + min_threads: 1, + max_threads: 1, + max_queue: 0, + idletime: DEFAULT_THREAD_IDLETIMEOUT, + fallback_policy: opts.fetch(:fallback_policy, :discard), + auto_terminate: opts.fetch(:auto_terminate, true) + ) + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_thread_pool_executor.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_thread_pool_executor.rb new file mode 100644 index 0000000000..92fbd315f8 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/ruby_thread_pool_executor.rb @@ -0,0 +1,362 @@ +require 'thread' +require 'concurrent/atomic/event' +require 'concurrent/concern/logging' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/utility/monotonic_time' + +module Concurrent + + # @!macro thread_pool_executor + # @!macro thread_pool_options + # @!visibility private + class RubyThreadPoolExecutor < RubyExecutorService + + # @!macro thread_pool_executor_constant_default_max_pool_size + DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE + + # @!macro thread_pool_executor_constant_default_min_pool_size + DEFAULT_MIN_POOL_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_max_queue_size + DEFAULT_MAX_QUEUE_SIZE = 0 + + # @!macro thread_pool_executor_constant_default_thread_timeout + DEFAULT_THREAD_IDLETIMEOUT = 60 + + # @!macro thread_pool_executor_attr_reader_max_length + attr_reader :max_length + + # @!macro thread_pool_executor_attr_reader_min_length + attr_reader :min_length + + # @!macro thread_pool_executor_attr_reader_idletime + attr_reader :idletime + + # @!macro thread_pool_executor_attr_reader_max_queue + attr_reader :max_queue + + # @!macro thread_pool_executor_method_initialize + def initialize(opts = {}) + super(opts) + end + + # @!macro thread_pool_executor_attr_reader_largest_length + def largest_length + synchronize { @largest_length } + end + + # @!macro thread_pool_executor_attr_reader_scheduled_task_count + def scheduled_task_count + synchronize { @scheduled_task_count } + end + + # @!macro thread_pool_executor_attr_reader_completed_task_count + def completed_task_count + synchronize { @completed_task_count } + end + + # @!macro executor_service_method_can_overflow_question + def can_overflow? + synchronize { ns_limited_queue? } + end + + # @!macro thread_pool_executor_attr_reader_length + def length + synchronize { @pool.length } + end + + # @!macro thread_pool_executor_attr_reader_queue_length + def queue_length + synchronize { @queue.length } + end + + # @!macro thread_pool_executor_attr_reader_remaining_capacity + def remaining_capacity + synchronize do + if ns_limited_queue? + @max_queue - @queue.length + else + -1 + end + end + end + + # @!visibility private + def remove_busy_worker(worker) + synchronize { ns_remove_busy_worker worker } + end + + # @!visibility private + def ready_worker(worker) + synchronize { ns_ready_worker worker } + end + + # @!visibility private + def worker_not_old_enough(worker) + synchronize { ns_worker_not_old_enough worker } + end + + # @!visibility private + def worker_died(worker) + synchronize { ns_worker_died worker } + end + + # @!visibility private + def worker_task_completed + synchronize { @completed_task_count += 1 } + end + + private + + # @!visibility private + def ns_initialize(opts) + @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i + @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i + @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i + @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i + @fallback_policy = opts.fetch(:fallback_policy, :abort) + raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy) + + raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE + raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length + + self.auto_terminate = opts.fetch(:auto_terminate, true) + + @pool = [] # all workers + @ready = [] # used as a stash (most idle worker is at the start) + @queue = [] # used as queue + # @ready or @queue is empty at all times + @scheduled_task_count = 0 + @completed_task_count = 0 + @largest_length = 0 + @ruby_pid = $$ # detects if Ruby has forked + + @gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented + @next_gc_time = Concurrent.monotonic_time + @gc_interval + end + + # @!visibility private + def ns_limited_queue? + @max_queue != 0 + end + + # @!visibility private + def ns_execute(*args, &task) + ns_reset_if_forked + + if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task) + @scheduled_task_count += 1 + else + handle_fallback(*args, &task) + end + + ns_prune_pool if @next_gc_time < Concurrent.monotonic_time + end + + # @!visibility private + def ns_shutdown_execution + ns_reset_if_forked + + if @pool.empty? + # nothing to do + stopped_event.set + end + + if @queue.empty? + # no more tasks will be accepted, just stop all workers + @pool.each(&:stop) + end + end + + # @!visibility private + def ns_kill_execution + # TODO log out unprocessed tasks in queue + # TODO try to shutdown first? + @pool.each(&:kill) + @pool.clear + @ready.clear + end + + # tries to assign task to a worker, tries to get one from @ready or to create new one + # @return [true, false] if task is assigned to a worker + # + # @!visibility private + def ns_assign_worker(*args, &task) + # keep growing if the pool is not at the minimum yet + worker = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker + if worker + worker << [task, args] + true + else + false + end + rescue ThreadError + # Raised when the operating system refuses to create the new thread + return false + end + + # tries to enqueue task + # @return [true, false] if enqueued + # + # @!visibility private + def ns_enqueue(*args, &task) + if !ns_limited_queue? || @queue.size < @max_queue + @queue << [task, args] + true + else + false + end + end + + # @!visibility private + def ns_worker_died(worker) + ns_remove_busy_worker worker + replacement_worker = ns_add_busy_worker + ns_ready_worker replacement_worker, false if replacement_worker + end + + # creates new worker which has to receive work to do after it's added + # @return [nil, Worker] nil of max capacity is reached + # + # @!visibility private + def ns_add_busy_worker + return if @pool.size >= @max_length + + @pool << (worker = Worker.new(self)) + @largest_length = @pool.length if @pool.length > @largest_length + worker + end + + # handle ready worker, giving it new job or assigning back to @ready + # + # @!visibility private + def ns_ready_worker(worker, success = true) + task_and_args = @queue.shift + if task_and_args + worker << task_and_args + else + # stop workers when !running?, do not return them to @ready + if running? + @ready.push(worker) + else + worker.stop + end + end + end + + # returns back worker to @ready which was not idle for enough time + # + # @!visibility private + def ns_worker_not_old_enough(worker) + # let's put workers coming from idle_test back to the start (as the oldest worker) + @ready.unshift(worker) + true + end + + # removes a worker which is not in not tracked in @ready + # + # @!visibility private + def ns_remove_busy_worker(worker) + @pool.delete(worker) + stopped_event.set if @pool.empty? && !running? + true + end + + # try oldest worker if it is idle for enough time, it's returned back at the start + # + # @!visibility private + def ns_prune_pool + return if @pool.size <= @min_length + + last_used = @ready.shift + last_used << :idle_test if last_used + + @next_gc_time = Concurrent.monotonic_time + @gc_interval + end + + def ns_reset_if_forked + if $$ != @ruby_pid + @queue.clear + @ready.clear + @pool.clear + @scheduled_task_count = 0 + @completed_task_count = 0 + @largest_length = 0 + @ruby_pid = $$ + end + end + + # @!visibility private + class Worker + include Concern::Logging + + def initialize(pool) + # instance variables accessed only under pool's lock so no need to sync here again + @queue = Queue.new + @pool = pool + @thread = create_worker @queue, pool, pool.idletime + end + + def <<(message) + @queue << message + end + + def stop + @queue << :stop + end + + def kill + @thread.kill + end + + private + + def create_worker(queue, pool, idletime) + Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime| + last_message = Concurrent.monotonic_time + catch(:stop) do + loop do + + case message = my_queue.pop + when :idle_test + if (Concurrent.monotonic_time - last_message) > my_idletime + my_pool.remove_busy_worker(self) + throw :stop + else + my_pool.worker_not_old_enough(self) + end + + when :stop + my_pool.remove_busy_worker(self) + throw :stop + + else + task, args = message + run_task my_pool, task, args + last_message = Concurrent.monotonic_time + + my_pool.ready_worker(self) + end + end + end + end + end + + def run_task(pool, task, args) + task.call(*args) + pool.worker_task_completed + rescue => ex + # let it fail + log DEBUG, ex + rescue Exception => ex + log ERROR, ex + pool.worker_died(self) + throw :stop + end + end + + private_constant :Worker + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/safe_task_executor.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/safe_task_executor.rb new file mode 100644 index 0000000000..414aa641f5 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/safe_task_executor.rb @@ -0,0 +1,35 @@ +require 'concurrent/synchronization' + +module Concurrent + + # A simple utility class that executes a callable and returns and array of three elements: + # success - indicating if the callable has been executed without errors + # value - filled by the callable result if it has been executed without errors, nil otherwise + # reason - the error risen by the callable if it has been executed with errors, nil otherwise + class SafeTaskExecutor < Synchronization::LockableObject + + def initialize(task, opts = {}) + @task = task + @exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError + super() # ensures visibility + end + + # @return [Array] + def execute(*args) + synchronize do + success = false + value = reason = nil + + begin + value = @task.call(*args) + success = true + rescue @exception_class => ex + reason = ex + success = false + end + + [success, value, reason] + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serial_executor_service.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serial_executor_service.rb new file mode 100644 index 0000000000..f1c38ecfa9 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serial_executor_service.rb @@ -0,0 +1,34 @@ +require 'concurrent/executor/executor_service' + +module Concurrent + + # Indicates that the including `ExecutorService` guarantees + # that all operations will occur in the order they are post and that no + # two operations may occur simultaneously. This module provides no + # functionality and provides no guarantees. That is the responsibility + # of the including class. This module exists solely to allow the including + # object to be interrogated for its serialization status. + # + # @example + # class Foo + # include Concurrent::SerialExecutor + # end + # + # foo = Foo.new + # + # foo.is_a? Concurrent::ExecutorService #=> true + # foo.is_a? Concurrent::SerialExecutor #=> true + # foo.serialized? #=> true + # + # @!visibility private + module SerialExecutorService + include ExecutorService + + # @!macro executor_service_method_serialized_question + # + # @note Always returns `true` + def serialized? + true + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serialized_execution.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serialized_execution.rb new file mode 100644 index 0000000000..d314e90616 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serialized_execution.rb @@ -0,0 +1,107 @@ +require 'concurrent/errors' +require 'concurrent/concern/logging' +require 'concurrent/synchronization' + +module Concurrent + + # Ensures passed jobs in a serialized order never running at the same time. + class SerializedExecution < Synchronization::LockableObject + include Concern::Logging + + def initialize() + super() + synchronize { ns_initialize } + end + + Job = Struct.new(:executor, :args, :block) do + def call + block.call(*args) + end + end + + # Submit a task to the executor for asynchronous processing. + # + # @param [Executor] executor to be used for this job + # + # @param [Array] args zero or more arguments to be passed to the task + # + # @yield the asynchronous task to perform + # + # @return [Boolean] `true` if the task is queued, `false` if the executor + # is not running + # + # @raise [ArgumentError] if no task is given + def post(executor, *args, &task) + posts [[executor, args, task]] + true + end + + # As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not + # be interleaved by other tasks. + # + # @param [Array, Proc)>] posts array of triplets where + # first is a {ExecutorService}, second is array of args for task, third is a task (Proc) + def posts(posts) + # if can_overflow? + # raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow' + # end + + return nil if posts.empty? + + jobs = posts.map { |executor, args, task| Job.new executor, args, task } + + job_to_post = synchronize do + if @being_executed + @stash.push(*jobs) + nil + else + @being_executed = true + @stash.push(*jobs[1..-1]) + jobs.first + end + end + + call_job job_to_post if job_to_post + true + end + + private + + def ns_initialize + @being_executed = false + @stash = [] + end + + def call_job(job) + did_it_run = begin + job.executor.post { work(job) } + true + rescue RejectedExecutionError => ex + false + end + + # TODO not the best idea to run it myself + unless did_it_run + begin + work job + rescue => ex + # let it fail + log DEBUG, ex + end + end + end + + # ensures next job is executed if any is stashed + def work(job) + job.call + ensure + synchronize do + job = @stash.shift || (@being_executed = false) + end + + # TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end + # of this block + call_job job if job + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serialized_execution_delegator.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serialized_execution_delegator.rb new file mode 100644 index 0000000000..8197781b52 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/serialized_execution_delegator.rb @@ -0,0 +1,28 @@ +require 'delegate' +require 'concurrent/executor/serial_executor_service' +require 'concurrent/executor/serialized_execution' + +module Concurrent + + # A wrapper/delegator for any `ExecutorService` that + # guarantees serialized execution of tasks. + # + # @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html) + # @see Concurrent::SerializedExecution + class SerializedExecutionDelegator < SimpleDelegator + include SerialExecutorService + + def initialize(executor) + @executor = executor + @serializer = SerializedExecution.new + super(executor) + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + @serializer.post(@executor, *args, &task) + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/simple_executor_service.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/simple_executor_service.rb new file mode 100644 index 0000000000..b278dbf5e9 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/simple_executor_service.rb @@ -0,0 +1,100 @@ +require 'concurrent/atomics' +require 'concurrent/executor/executor_service' + +module Concurrent + + # An executor service in which every operation spawns a new, + # independently operating thread. + # + # This is perhaps the most inefficient executor service in this + # library. It exists mainly for testing an debugging. Thread creation + # and management is expensive in Ruby and this executor performs no + # resource pooling. This can be very beneficial during testing and + # debugging because it decouples the using code from the underlying + # executor implementation. In production this executor will likely + # lead to suboptimal performance. + # + # @note Intended for use primarily in testing and debugging. + class SimpleExecutorService < RubyExecutorService + + # @!macro executor_service_method_post + def self.post(*args) + raise ArgumentError.new('no block given') unless block_given? + Thread.new(*args) do + Thread.current.abort_on_exception = false + yield(*args) + end + true + end + + # @!macro executor_service_method_left_shift + def self.<<(task) + post(&task) + self + end + + # @!macro executor_service_method_post + def post(*args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + @count.increment + Thread.new(*args) do + Thread.current.abort_on_exception = false + begin + yield(*args) + ensure + @count.decrement + @stopped.set if @running.false? && @count.value == 0 + end + end + end + + # @!macro executor_service_method_left_shift + def <<(task) + post(&task) + self + end + + # @!macro executor_service_method_running_question + def running? + @running.true? + end + + # @!macro executor_service_method_shuttingdown_question + def shuttingdown? + @running.false? && ! @stopped.set? + end + + # @!macro executor_service_method_shutdown_question + def shutdown? + @stopped.set? + end + + # @!macro executor_service_method_shutdown + def shutdown + @running.make_false + @stopped.set if @count.value == 0 + true + end + + # @!macro executor_service_method_kill + def kill + @running.make_false + @stopped.set + true + end + + # @!macro executor_service_method_wait_for_termination + def wait_for_termination(timeout = nil) + @stopped.wait(timeout) + end + + private + + def ns_initialize + @running = Concurrent::AtomicBoolean.new(true) + @stopped = Concurrent::Event.new + @count = Concurrent::AtomicFixnum.new(0) + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/single_thread_executor.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/single_thread_executor.rb new file mode 100644 index 0000000000..797cb1899e --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/single_thread_executor.rb @@ -0,0 +1,56 @@ +require 'concurrent/executor/ruby_single_thread_executor' + +module Concurrent + + if Concurrent.on_jruby? + require 'concurrent/executor/java_single_thread_executor' + end + + SingleThreadExecutorImplementation = case + when Concurrent.on_jruby? + JavaSingleThreadExecutor + else + RubySingleThreadExecutor + end + private_constant :SingleThreadExecutorImplementation + + # @!macro single_thread_executor + # + # A thread pool with a single thread an unlimited queue. Should the thread + # die for any reason it will be removed and replaced, thus ensuring that + # the executor will always remain viable and available to process jobs. + # + # A common pattern for background processing is to create a single thread + # on which an infinite loop is run. The thread's loop blocks on an input + # source (perhaps blocking I/O or a queue) and processes each input as it + # is received. This pattern has several issues. The thread itself is highly + # susceptible to errors during processing. Also, the thread itself must be + # constantly monitored and restarted should it die. `SingleThreadExecutor` + # encapsulates all these bahaviors. The task processor is highly resilient + # to errors from within tasks. Also, should the thread die it will + # automatically be restarted. + # + # The API and behavior of this class are based on Java's `SingleThreadExecutor`. + # + # @!macro abstract_executor_service_public_api + class SingleThreadExecutor < SingleThreadExecutorImplementation + + # @!macro single_thread_executor_method_initialize + # + # Create a new thread pool. + # + # @option opts [Symbol] :fallback_policy (:discard) the policy for handling new + # tasks that are received when the queue size has reached + # `max_queue` or the executor has shut down + # + # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified + # in `FALLBACK_POLICIES` + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html + + # @!method initialize(opts = {}) + # @!macro single_thread_executor_method_initialize + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/thread_pool_executor.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/thread_pool_executor.rb new file mode 100644 index 0000000000..72e1bae857 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/thread_pool_executor.rb @@ -0,0 +1,87 @@ +require 'concurrent/utility/engine' +require 'concurrent/executor/ruby_thread_pool_executor' + +module Concurrent + + if Concurrent.on_jruby? + require 'concurrent/executor/java_thread_pool_executor' + end + + ThreadPoolExecutorImplementation = case + when Concurrent.on_jruby? + JavaThreadPoolExecutor + else + RubyThreadPoolExecutor + end + private_constant :ThreadPoolExecutorImplementation + + # @!macro thread_pool_executor + # + # An abstraction composed of one or more threads and a task queue. Tasks + # (blocks or `proc` objects) are submitted to the pool and added to the queue. + # The threads in the pool remove the tasks and execute them in the order + # they were received. + # + # A `ThreadPoolExecutor` will automatically adjust the pool size according + # to the bounds set by `min-threads` and `max-threads`. When a new task is + # submitted and fewer than `min-threads` threads are running, a new thread + # is created to handle the request, even if other worker threads are idle. + # If there are more than `min-threads` but less than `max-threads` threads + # running, a new thread will be created only if the queue is full. + # + # Threads that are idle for too long will be garbage collected, down to the + # configured minimum options. Should a thread crash it, too, will be garbage collected. + # + # `ThreadPoolExecutor` is based on the Java class of the same name. From + # the official Java documentation; + # + # > Thread pools address two different problems: they usually provide + # > improved performance when executing large numbers of asynchronous tasks, + # > due to reduced per-task invocation overhead, and they provide a means + # > of bounding and managing the resources, including threads, consumed + # > when executing a collection of tasks. Each ThreadPoolExecutor also + # > maintains some basic statistics, such as the number of completed tasks. + # > + # > To be useful across a wide range of contexts, this class provides many + # > adjustable parameters and extensibility hooks. However, programmers are + # > urged to use the more convenient Executors factory methods + # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation), + # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single + # > background thread), that preconfigure settings for the most common usage + # > scenarios. + # + # @!macro thread_pool_options + # + # @!macro thread_pool_executor_public_api + class ThreadPoolExecutor < ThreadPoolExecutorImplementation + + # @!macro thread_pool_executor_method_initialize + # + # Create a new thread pool. + # + # @param [Hash] opts the options which configure the thread pool. + # + # @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum + # number of threads to be created + # @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) When a new task is submitted + # and fewer than `min_threads` are running, a new thread is created + # @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum + # number of seconds a thread may be idle before being reclaimed + # @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum + # number of tasks allowed in the work queue at any one time; a value of + # zero means the queue may grow without bound + # @option opts [Symbol] :fallback_policy (:abort) the policy for handling new + # tasks that are received when the queue size has reached + # `max_queue` or the executor has shut down + # + # @raise [ArgumentError] if `:max_threads` is less than one + # @raise [ArgumentError] if `:min_threads` is less than zero + # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified + # in `FALLBACK_POLICIES` + # + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html + + # @!method initialize(opts = {}) + # @!macro thread_pool_executor_method_initialize + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/timer_set.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/timer_set.rb new file mode 100644 index 0000000000..bd99ef601b --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executor/timer_set.rb @@ -0,0 +1,175 @@ +require 'concurrent/scheduled_task' +require 'concurrent/atomic/event' +require 'concurrent/collection/non_concurrent_priority_queue' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/single_thread_executor' + +require 'concurrent/options' + +module Concurrent + + # Executes a collection of tasks, each after a given delay. A master task + # monitors the set and schedules each task for execution at the appropriate + # time. Tasks are run on the global thread pool or on the supplied executor. + # Each task is represented as a `ScheduledTask`. + # + # @see Concurrent::ScheduledTask + # + # @!macro monotonic_clock_warning + class TimerSet < RubyExecutorService + + # Create a new set of timed tasks. + # + # @!macro executor_options + # + # @param [Hash] opts the options used to specify the executor on which to perform actions + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:task` returns the global task pool, + # `:operation` returns the global operation pool, and `:immediate` returns a new + # `ImmediateExecutor` object. + def initialize(opts = {}) + super(opts) + end + + # Post a task to be execute run after a given delay (in seconds). If the + # delay is less than 1/100th of a second the task will be immediately post + # to the executor. + # + # @param [Float] delay the number of seconds to wait for before executing the task. + # @param [Array] args the arguments passed to the task on execution. + # + # @yield the task to be performed. + # + # @return [Concurrent::ScheduledTask, false] IVar representing the task if the post + # is successful; false after shutdown. + # + # @raise [ArgumentError] if the intended execution time is not in the future. + # @raise [ArgumentError] if no block is given. + def post(delay, *args, &task) + raise ArgumentError.new('no block given') unless block_given? + return false unless running? + opts = { + executor: @task_executor, + args: args, + timer_set: self + } + task = ScheduledTask.execute(delay, opts, &task) # may raise exception + task.unscheduled? ? false : task + end + + # Begin an immediate shutdown. In-progress tasks will be allowed to + # complete but enqueued tasks will be dismissed and no new tasks + # will be accepted. Has no additional effect if the thread pool is + # not running. + def kill + shutdown + end + + private :<< + + private + + # Initialize the object. + # + # @param [Hash] opts the options to create the object with. + # @!visibility private + def ns_initialize(opts) + @queue = Collection::NonConcurrentPriorityQueue.new(order: :min) + @task_executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @timer_executor = SingleThreadExecutor.new + @condition = Event.new + @ruby_pid = $$ # detects if Ruby has forked + self.auto_terminate = opts.fetch(:auto_terminate, true) + end + + # Post the task to the internal queue. + # + # @note This is intended as a callback method from ScheduledTask + # only. It is not intended to be used directly. Post a task + # by using the `SchedulesTask#execute` method. + # + # @!visibility private + def post_task(task) + synchronize{ ns_post_task(task) } + end + + # @!visibility private + def ns_post_task(task) + return false unless ns_running? + ns_reset_if_forked + if (task.initial_delay) <= 0.01 + task.executor.post{ task.process_task } + else + @queue.push(task) + # only post the process method when the queue is empty + @timer_executor.post(&method(:process_tasks)) if @queue.length == 1 + @condition.set + end + true + end + + # Remove the given task from the queue. + # + # @note This is intended as a callback method from `ScheduledTask` + # only. It is not intended to be used directly. Cancel a task + # by using the `ScheduledTask#cancel` method. + # + # @!visibility private + def remove_task(task) + synchronize{ @queue.delete(task) } + end + + # `ExecutorService` callback called during shutdown. + # + # @!visibility private + def ns_shutdown_execution + ns_reset_if_forked + @queue.clear + @timer_executor.kill + stopped_event.set + end + + def ns_reset_if_forked + if $$ != @ruby_pid + @queue.clear + @condition.reset + @ruby_pid = $$ + end + end + + # Run a loop and execute tasks in the scheduled order and at the approximate + # scheduled time. If no tasks remain the thread will exit gracefully so that + # garbage collection can occur. If there are no ready tasks it will sleep + # for up to 60 seconds waiting for the next scheduled task. + # + # @!visibility private + def process_tasks + loop do + task = synchronize { @condition.reset; @queue.peek } + break unless task + + now = Concurrent.monotonic_time + diff = task.schedule_time - now + + if diff <= 0 + # We need to remove the task from the queue before passing + # it to the executor, to avoid race conditions where we pass + # the peek'ed task to the executor and then pop a different + # one that's been added in the meantime. + # + # Note that there's no race condition between the peek and + # this pop - this pop could retrieve a different task from + # the peek, but that task would be due to fire now anyway + # (because @queue is a priority queue, and this thread is + # the only reader, so whatever timer is at the head of the + # queue now must have the same pop time, or a closer one, as + # when we peeked). + task = synchronize { @queue.pop } + task.executor.post{ task.process_task } + else + @condition.wait([diff, 60].min) + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executors.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executors.rb new file mode 100644 index 0000000000..eb1972ce69 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/executors.rb @@ -0,0 +1,20 @@ +require 'concurrent/executor/abstract_executor_service' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/fixed_thread_pool' +require 'concurrent/executor/immediate_executor' +require 'concurrent/executor/indirect_immediate_executor' +require 'concurrent/executor/java_executor_service' +require 'concurrent/executor/java_single_thread_executor' +require 'concurrent/executor/java_thread_pool_executor' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/executor/ruby_single_thread_executor' +require 'concurrent/executor/ruby_thread_pool_executor' +require 'concurrent/executor/cached_thread_pool' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/executor/serial_executor_service' +require 'concurrent/executor/serialized_execution' +require 'concurrent/executor/serialized_execution_delegator' +require 'concurrent/executor/single_thread_executor' +require 'concurrent/executor/thread_pool_executor' +require 'concurrent/executor/timer_set' diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/future.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/future.rb new file mode 100644 index 0000000000..1af182ecb2 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/future.rb @@ -0,0 +1,141 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/ivar' +require 'concurrent/executor/safe_task_executor' + +require 'concurrent/options' + +# TODO (pitr-ch 14-Mar-2017): deprecate, Future, Promise, etc. + + +module Concurrent + + # {include:file:docs-source/future.md} + # + # @!macro copy_options + # + # @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module + # @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html java.util.concurrent.Future + class Future < IVar + + # Create a new `Future` in the `:unscheduled` state. + # + # @yield the asynchronous operation to perform + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] if no block is given + def initialize(opts = {}, &block) + raise ArgumentError.new('no block given') unless block_given? + super(NULL, opts.merge(__task_from_block__: block), &nil) + end + + # Execute an `:unscheduled` `Future`. Immediately sets the state to `:pending` and + # passes the block to a new thread/thread pool for eventual execution. + # Does nothing if the `Future` is in any state other than `:unscheduled`. + # + # @return [Future] a reference to `self` + # + # @example Instance and execute in separate steps + # future = Concurrent::Future.new{ sleep(1); 42 } + # future.state #=> :unscheduled + # future.execute + # future.state #=> :pending + # + # @example Instance and execute in one line + # future = Concurrent::Future.new{ sleep(1); 42 }.execute + # future.state #=> :pending + def execute + if compare_and_set_state(:pending, :unscheduled) + @executor.post{ safe_execute(@task, @args) } + self + end + end + + # Create a new `Future` object with the given block, execute it, and return the + # `:pending` object. + # + # @yield the asynchronous operation to perform + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] if no block is given + # + # @return [Future] the newly created `Future` in the `:pending` state + # + # @example + # future = Concurrent::Future.execute{ sleep(1); 42 } + # future.state #=> :pending + def self.execute(opts = {}, &block) + Future.new(opts, &block).execute + end + + # @!macro ivar_set_method + def set(value = NULL, &block) + check_for_block_or_value!(block_given?, value) + synchronize do + if @state != :unscheduled + raise MultipleAssignmentError + else + @task = block || Proc.new { value } + end + end + execute + end + + # Attempt to cancel the operation if it has not already processed. + # The operation can only be cancelled while still `pending`. It cannot + # be cancelled once it has begun processing or has completed. + # + # @return [Boolean] was the operation successfully cancelled. + def cancel + if compare_and_set_state(:cancelled, :pending) + complete(false, nil, CancelledOperationError.new) + true + else + false + end + end + + # Has the operation been successfully cancelled? + # + # @return [Boolean] + def cancelled? + state == :cancelled + end + + # Wait the given number of seconds for the operation to complete. + # On timeout attempt to cancel the operation. + # + # @param [Numeric] timeout the maximum time in seconds to wait. + # @return [Boolean] true if the operation completed before the timeout + # else false + def wait_or_cancel(timeout) + wait(timeout) + if complete? + true + else + cancel + false + end + end + + protected + + def ns_initialize(value, opts) + super + @state = :unscheduled + @task = opts[:__task_from_block__] + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @args = get_arguments_from(opts) + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/hash.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/hash.rb new file mode 100644 index 0000000000..9919079f70 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/hash.rb @@ -0,0 +1,59 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!macro concurrent_hash + # + # A thread-safe subclass of Hash. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`, + # which takes the lock repeatedly when reading an item. + # + # @see http://ruby-doc.org/core-2.2.0/Hash.html Ruby standard library `Hash` + + # @!macro internal_implementation_note + HashImplementation = case + when Concurrent.on_cruby? + # Because MRI never runs code in parallel, the existing + # non-thread-safe structures should usually work fine. + ::Hash + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubyHash < ::Hash + include JRuby::Synchronized + end + JRubyHash + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxHash < ::Hash + end + ThreadSafe::Util.make_synchronized_on_rbx RbxHash + RbxHash + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubyHash < ::Hash + end + + ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyHash + TruffleRubyHash + + else + warn 'Possibly unsupported Ruby implementation' + ::Hash + end + private_constant :HashImplementation + + # @!macro concurrent_hash + class Hash < HashImplementation + end + +end + diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/immutable_struct.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/immutable_struct.rb new file mode 100644 index 0000000000..05b8035c02 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/immutable_struct.rb @@ -0,0 +1,93 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization' + +module Concurrent + + # A thread-safe, immutable variation of Ruby's standard `Struct`. + # + # @see http://ruby-doc.org/core-2.2.0/Struct.html Ruby standard library `Struct` + module ImmutableStruct + include Synchronization::AbstractStruct + + def self.included(base) + base.safe_initialization! + end + + # @!macro struct_values + def values + ns_values + end + + alias_method :to_a, :values + + # @!macro struct_values_at + def values_at(*indexes) + ns_values_at(indexes) + end + + # @!macro struct_inspect + def inspect + ns_inspect + end + + alias_method :to_s, :inspect + + # @!macro struct_merge + def merge(other, &block) + ns_merge(other, &block) + end + + # @!macro struct_to_h + def to_h + ns_to_h + end + + # @!macro struct_get + def [](member) + ns_get(member) + end + + # @!macro struct_equality + def ==(other) + ns_equality(other) + end + + # @!macro struct_each + def each(&block) + return enum_for(:each) unless block_given? + ns_each(&block) + end + + # @!macro struct_each_pair + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + ns_each_pair(&block) + end + + # @!macro struct_select + def select(&block) + return enum_for(:select) unless block_given? + ns_select(&block) + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + Synchronization::AbstractStruct.define_struct_class(ImmutableStruct, Synchronization::Object, name, members, &block) + end + end + end.new + private_constant :FACTORY + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/ivar.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/ivar.rb new file mode 100644 index 0000000000..2a724db467 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/ivar.rb @@ -0,0 +1,207 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/collection/copy_on_write_observer_set' +require 'concurrent/concern/obligation' +require 'concurrent/concern/observable' +require 'concurrent/synchronization' + +module Concurrent + + # An `IVar` is like a future that you can assign. As a future is a value that + # is being computed that you can wait on, an `IVar` is a value that is waiting + # to be assigned, that you can wait on. `IVars` are single assignment and + # deterministic. + # + # Then, express futures as an asynchronous computation that assigns an `IVar`. + # The `IVar` becomes the primitive on which [futures](Future) and + # [dataflow](Dataflow) are built. + # + # An `IVar` is a single-element container that is normally created empty, and + # can only be set once. The I in `IVar` stands for immutable. Reading an + # `IVar` normally blocks until it is set. It is safe to set and read an `IVar` + # from different threads. + # + # If you want to have some parallel task set the value in an `IVar`, you want + # a `Future`. If you want to create a graph of parallel tasks all executed + # when the values they depend on are ready you want `dataflow`. `IVar` is + # generally a low-level primitive. + # + # ## Examples + # + # Create, set and get an `IVar` + # + # ```ruby + # ivar = Concurrent::IVar.new + # ivar.set 14 + # ivar.value #=> 14 + # ivar.set 2 # would now be an error + # ``` + # + # ## See Also + # + # 1. For the theory: Arvind, R. Nikhil, and K. Pingali. + # [I-Structures: Data structures for parallel computing](http://dl.acm.org/citation.cfm?id=69562). + # In Proceedings of Workshop on Graph Reduction, 1986. + # 2. For recent application: + # [DataDrivenFuture in Habanero Java from Rice](http://www.cs.rice.edu/~vs3/hjlib/doc/edu/rice/hj/api/HjDataDrivenFuture.html). + class IVar < Synchronization::LockableObject + include Concern::Obligation + include Concern::Observable + + # Create a new `IVar` in the `:pending` state with the (optional) initial value. + # + # @param [Object] value the initial value + # @param [Hash] opts the options to create a message with + # @option opts [String] :dup_on_deref (false) call `#dup` before returning + # the data + # @option opts [String] :freeze_on_deref (false) call `#freeze` before + # returning the data + # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc + def initialize(value = NULL, opts = {}, &block) + if value != NULL && block_given? + raise ArgumentError.new('provide only a value or a block') + end + super(&nil) + synchronize { ns_initialize(value, opts, &block) } + end + + # Add an observer on this object that will receive notification on update. + # + # Upon completion the `IVar` will notify all observers in a thread-safe way. + # The `func` method of the observer will be called with three arguments: the + # `Time` at which the `Future` completed the asynchronous operation, the + # final `value` (or `nil` on rejection), and the final `reason` (or `nil` on + # fulfillment). + # + # @param [Object] observer the object that will be notified of changes + # @param [Symbol] func symbol naming the method to call when this + # `Observable` has changes` + def add_observer(observer = nil, func = :update, &block) + raise ArgumentError.new('cannot provide both an observer and a block') if observer && block + direct_notification = false + + if block + observer = block + func = :call + end + + synchronize do + if event.set? + direct_notification = true + else + observers.add_observer(observer, func) + end + end + + observer.send(func, Time.now, self.value, reason) if direct_notification + observer + end + + # @!macro ivar_set_method + # Set the `IVar` to a value and wake or notify all threads waiting on it. + # + # @!macro ivar_set_parameters_and_exceptions + # @param [Object] value the value to store in the `IVar` + # @yield A block operation to use for setting the value + # @raise [ArgumentError] if both a value and a block are given + # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already + # been set or otherwise completed + # + # @return [IVar] self + def set(value = NULL) + check_for_block_or_value!(block_given?, value) + raise MultipleAssignmentError unless compare_and_set_state(:processing, :pending) + + begin + value = yield if block_given? + complete_without_notification(true, value, nil) + rescue => ex + complete_without_notification(false, nil, ex) + end + + notify_observers(self.value, reason) + self + end + + # @!macro ivar_fail_method + # Set the `IVar` to failed due to some error and wake or notify all threads waiting on it. + # + # @param [Object] reason for the failure + # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already + # been set or otherwise completed + # @return [IVar] self + def fail(reason = StandardError.new) + complete(false, nil, reason) + end + + # Attempt to set the `IVar` with the given value or block. Return a + # boolean indicating the success or failure of the set operation. + # + # @!macro ivar_set_parameters_and_exceptions + # + # @return [Boolean] true if the value was set else false + def try_set(value = NULL, &block) + set(value, &block) + true + rescue MultipleAssignmentError + false + end + + protected + + # @!visibility private + def ns_initialize(value, opts) + value = yield if block_given? + init_obligation + self.observers = Collection::CopyOnWriteObserverSet.new + set_deref_options(opts) + + @state = :pending + if value != NULL + ns_complete_without_notification(true, value, nil) + end + end + + # @!visibility private + def safe_execute(task, args = []) + if compare_and_set_state(:processing, :pending) + success, val, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) + complete(success, val, reason) + yield(success, val, reason) if block_given? + end + end + + # @!visibility private + def complete(success, value, reason) + complete_without_notification(success, value, reason) + notify_observers(self.value, reason) + self + end + + # @!visibility private + def complete_without_notification(success, value, reason) + synchronize { ns_complete_without_notification(success, value, reason) } + self + end + + # @!visibility private + def notify_observers(value, reason) + observers.notify_and_delete_observers{ [Time.now, value, reason] } + end + + # @!visibility private + def ns_complete_without_notification(success, value, reason) + raise MultipleAssignmentError if [:fulfilled, :rejected].include? @state + set_state(success, value, reason) + event.set + end + + # @!visibility private + def check_for_block_or_value!(block_given, value) # :nodoc: + if (block_given && value != NULL) || (! block_given && value == NULL) + raise ArgumentError.new('must set with either a value or a block') + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/map.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/map.rb new file mode 100644 index 0000000000..5b7144747d --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/map.rb @@ -0,0 +1,337 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/synchronization' +require 'concurrent/utility/engine' + +module Concurrent + # @!visibility private + module Collection + + # @!visibility private + MapImplementation = case + when Concurrent.on_jruby? + # noinspection RubyResolve + JRubyMapBackend + when Concurrent.on_cruby? + require 'concurrent/collection/map/mri_map_backend' + MriMapBackend + when Concurrent.on_rbx? || Concurrent.on_truffleruby? + require 'concurrent/collection/map/atomic_reference_map_backend' + AtomicReferenceMapBackend + else + warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation' + require 'concurrent/collection/map/synchronized_map_backend' + SynchronizedMapBackend + end + end + + # `Concurrent::Map` is a hash-like object and should have much better performance + # characteristics, especially under high concurrency, than `Concurrent::Hash`. + # However, `Concurrent::Map `is not strictly semantically equivalent to a ruby `Hash` + # -- for instance, it does not necessarily retain ordering by insertion time as `Hash` + # does. For most uses it should do fine though, and we recommend you consider + # `Concurrent::Map` instead of `Concurrent::Hash` for your concurrency-safe hash needs. + class Map < Collection::MapImplementation + + # @!macro map.atomic_method + # This method is atomic. + + # @!macro map.atomic_method_with_block + # This method is atomic. + # @note Atomic methods taking a block do not allow the `self` instance + # to be used within the block. Doing so will cause a deadlock. + + # @!method compute_if_absent(key) + # Compute and store new value for key if the key is absent. + # @param [Object] key + # @yield new value + # @yieldreturn [Object] new value + # @return [Object] new value or current value + # @!macro map.atomic_method_with_block + + # @!method compute_if_present(key) + # Compute and store new value for key if the key is present. + # @param [Object] key + # @yield new value + # @yieldparam old_value [Object] + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method compute(key) + # Compute and store new value for key. + # @param [Object] key + # @yield compute new value from old one + # @yieldparam old_value [Object, nil] old_value, or nil when key is absent + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method merge_pair(key, value) + # If the key is absent, the value is stored, otherwise new value is + # computed with a block. + # @param [Object] key + # @param [Object] value + # @yield compute new value from old one + # @yieldparam old_value [Object] old value + # @yieldreturn [Object, nil] new value, when nil the key is removed + # @return [Object, nil] new value or nil + # @!macro map.atomic_method_with_block + + # @!method replace_pair(key, old_value, new_value) + # Replaces old_value with new_value if key exists and current value + # matches old_value + # @param [Object] key + # @param [Object] old_value + # @param [Object] new_value + # @return [true, false] true if replaced + # @!macro map.atomic_method + + # @!method replace_if_exists(key, new_value) + # Replaces current value with new_value if key exists + # @param [Object] key + # @param [Object] new_value + # @return [Object, nil] old value or nil + # @!macro map.atomic_method + + # @!method get_and_set(key, value) + # Get the current value under key and set new value. + # @param [Object] key + # @param [Object] value + # @return [Object, nil] old value or nil when the key was absent + # @!macro map.atomic_method + + # @!method delete(key) + # Delete key and its value. + # @param [Object] key + # @return [Object, nil] old value or nil when the key was absent + # @!macro map.atomic_method + + # @!method delete_pair(key, value) + # Delete pair and its value if current value equals the provided value. + # @param [Object] key + # @param [Object] value + # @return [true, false] true if deleted + # @!macro map.atomic_method + + + def initialize(options = nil, &block) + if options.kind_of?(::Hash) + validate_options_hash!(options) + else + options = nil + end + + super(options) + @default_proc = block + end + + # Get a value with key + # @param [Object] key + # @return [Object] the value + def [](key) + if value = super # non-falsy value is an existing mapping, return it right away + value + # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call + # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value + # would be returned) + # note: nil == value check is not technically necessary + elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) + @default_proc.call(self, key) + else + value + end + end + + alias_method :get, :[] + # TODO (pitr-ch 30-Oct-2018): doc + alias_method :put, :[]= + + # Get a value with key, or default_value when key is absent, + # or fail when no default_value is given. + # @param [Object] key + # @param [Object] default_value + # @yield default value for a key + # @yieldparam key [Object] + # @yieldreturn [Object] default value + # @return [Object] the value or default value + # @raise [KeyError] when key is missing and no default_value is provided + # @!macro map_method_not_atomic + # @note The "fetch-then-act" methods of `Map` are not atomic. `Map` is intended + # to be use as a concurrency primitive with strong happens-before + # guarantees. It is not intended to be used as a high-level abstraction + # supporting complex operations. All read and write operations are + # thread safe, but no guarantees are made regarding race conditions + # between the fetch operation and yielding to the block. Additionally, + # this method does not support recursion. This is due to internal + # constraints that are very unlikely to change in the near future. + def fetch(key, default_value = NULL) + if NULL != (value = get_or_default(key, NULL)) + value + elsif block_given? + yield key + elsif NULL != default_value + default_value + else + raise_fetch_no_key + end + end + + # Fetch value with key, or store default value when key is absent, + # or fail when no default_value is given. This is a two step operation, + # therefore not atomic. The store can overwrite other concurrently + # stored value. + # @param [Object] key + # @param [Object] default_value + # @yield default value for a key + # @yieldparam key [Object] + # @yieldreturn [Object] default value + # @return [Object] the value or default value + # @!macro map.atomic_method_with_block + def fetch_or_store(key, default_value = NULL) + fetch(key) do + put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) + end + end + + # Insert value into map with key if key is absent in one atomic step. + # @param [Object] key + # @param [Object] value + # @return [Object, nil] the value or nil when key was present + def put_if_absent(key, value) + computed = false + result = compute_if_absent(key) do + computed = true + value + end + computed ? nil : result + end unless method_defined?(:put_if_absent) + + # Is the value stored in the map. Iterates over all values. + # @param [Object] value + # @return [true, false] + def value?(value) + each_value do |v| + return true if value.equal?(v) + end + false + end + + # All keys + # @return [::Array] keys + def keys + arr = [] + each_pair { |k, v| arr << k } + arr + end unless method_defined?(:keys) + + # All values + # @return [::Array] values + def values + arr = [] + each_pair { |k, v| arr << v } + arr + end unless method_defined?(:values) + + # Iterates over each key. + # @yield for each key in the map + # @yieldparam key [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_key + each_pair { |k, v| yield k } + end unless method_defined?(:each_key) + + # Iterates over each value. + # @yield for each value in the map + # @yieldparam value [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_value + each_pair { |k, v| yield v } + end unless method_defined?(:each_value) + + # Iterates over each key value pair. + # @yield for each key value pair in the map + # @yieldparam key [Object] + # @yieldparam value [Object] + # @return [self] + # @!macro map.atomic_method_with_block + def each_pair + return enum_for :each_pair unless block_given? + super + end + + alias_method :each, :each_pair unless method_defined?(:each) + + # Find key of a value. + # @param [Object] value + # @return [Object, nil] key or nil when not found + def key(value) + each_pair { |k, v| return k if v == value } + nil + end unless method_defined?(:key) + alias_method :index, :key if RUBY_VERSION < '1.9' + + # Is map empty? + # @return [true, false] + def empty? + each_pair { |k, v| return false } + true + end unless method_defined?(:empty?) + + # The size of map. + # @return [Integer] size + def size + count = 0 + each_pair { |k, v| count += 1 } + count + end unless method_defined?(:size) + + # @!visibility private + def marshal_dump + raise TypeError, "can't dump hash with default proc" if @default_proc + h = {} + each_pair { |k, v| h[k] = v } + h + end + + # @!visibility private + def marshal_load(hash) + initialize + populate_from(hash) + end + + undef :freeze + + # @!visibility private + def inspect + format '%s entries=%d default_proc=%s>', to_s[0..-2], size.to_s, @default_proc.inspect + end + + private + + def raise_fetch_no_key + raise KeyError, 'key not found' + end + + def initialize_copy(other) + super + populate_from(other) + end + + def populate_from(hash) + hash.each_pair { |k, v| self[k] = v } + self + end + + def validate_options_hash!(options) + if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0) + raise ArgumentError, ":initial_capacity must be a positive Integer" + end + if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) + raise ArgumentError, ":load_factor must be a number between 0 and 1" + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/maybe.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/maybe.rb new file mode 100644 index 0000000000..7ba3d3ebb5 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/maybe.rb @@ -0,0 +1,229 @@ +require 'concurrent/synchronization' + +module Concurrent + + # A `Maybe` encapsulates an optional value. A `Maybe` either contains a value + # of (represented as `Just`), or it is empty (represented as `Nothing`). Using + # `Maybe` is a good way to deal with errors or exceptional cases without + # resorting to drastic measures such as exceptions. + # + # `Maybe` is a replacement for the use of `nil` with better type checking. + # + # For compatibility with {Concurrent::Concern::Obligation} the predicate and + # accessor methods are aliased as `fulfilled?`, `rejected?`, `value`, and + # `reason`. + # + # ## Motivation + # + # A common pattern in languages with pattern matching, such as Erlang and + # Haskell, is to return *either* a value *or* an error from a function + # Consider this Erlang code: + # + # ```erlang + # case file:consult("data.dat") of + # {ok, Terms} -> do_something_useful(Terms); + # {error, Reason} -> lager:error(Reason) + # end. + # ``` + # + # In this example the standard library function `file:consult` returns a + # [tuple](http://erlang.org/doc/reference_manual/data_types.html#id69044) + # with two elements: an [atom](http://erlang.org/doc/reference_manual/data_types.html#id64134) + # (similar to a ruby symbol) and a variable containing ancillary data. On + # success it returns the atom `ok` and the data from the file. On failure it + # returns `error` and a string with an explanation of the problem. With this + # pattern there is no ambiguity regarding success or failure. If the file is + # empty the return value cannot be misinterpreted as an error. And when an + # error occurs the return value provides useful information. + # + # In Ruby we tend to return `nil` when an error occurs or else we raise an + # exception. Both of these idioms are problematic. Returning `nil` is + # ambiguous because `nil` may also be a valid value. It also lacks + # information pertaining to the nature of the error. Raising an exception + # is both expensive and usurps the normal flow of control. All of these + # problems can be solved with the use of a `Maybe`. + # + # A `Maybe` is unambiguous with regard to whether or not it contains a value. + # When `Just` it contains a value, when `Nothing` it does not. When `Just` + # the value it contains may be `nil`, which is perfectly valid. When + # `Nothing` the reason for the lack of a value is contained as well. The + # previous Erlang example can be duplicated in Ruby in a principled way by + # having functions return `Maybe` objects: + # + # ```ruby + # result = MyFileUtils.consult("data.dat") # returns a Maybe + # if result.just? + # do_something_useful(result.value) # or result.just + # else + # logger.error(result.reason) # or result.nothing + # end + # ``` + # + # @example Returning a Maybe from a Function + # module MyFileUtils + # def self.consult(path) + # file = File.open(path, 'r') + # Concurrent::Maybe.just(file.read) + # rescue => ex + # return Concurrent::Maybe.nothing(ex) + # ensure + # file.close if file + # end + # end + # + # maybe = MyFileUtils.consult('bogus.file') + # maybe.just? #=> false + # maybe.nothing? #=> true + # maybe.reason #=> # + # + # maybe = MyFileUtils.consult('README.md') + # maybe.just? #=> true + # maybe.nothing? #=> false + # maybe.value #=> "# Concurrent Ruby\n[![Gem Version..." + # + # @example Using Maybe with a Block + # result = Concurrent::Maybe.from do + # Client.find(10) # Client is an ActiveRecord model + # end + # + # # -- if the record was found + # result.just? #=> true + # result.value #=> # + # + # # -- if the record was not found + # result.just? #=> false + # result.reason #=> ActiveRecord::RecordNotFound + # + # @example Using Maybe with the Null Object Pattern + # # In a Rails controller... + # result = ClientService.new(10).find # returns a Maybe + # render json: result.or(NullClient.new) + # + # @see https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html Haskell Data.Maybe + # @see https://github.com/purescript/purescript-maybe/blob/master/docs/Data.Maybe.md PureScript Data.Maybe + class Maybe < Synchronization::Object + include Comparable + safe_initialization! + + # Indicates that the given attribute has not been set. + # When `Just` the {#nothing} getter will return `NONE`. + # When `Nothing` the {#just} getter will return `NONE`. + NONE = ::Object.new.freeze + + # The value of a `Maybe` when `Just`. Will be `NONE` when `Nothing`. + attr_reader :just + + # The reason for the `Maybe` when `Nothing`. Will be `NONE` when `Just`. + attr_reader :nothing + + private_class_method :new + + # Create a new `Maybe` using the given block. + # + # Runs the given block passing all function arguments to the block as block + # arguments. If the block runs to completion without raising an exception + # a new `Just` is created with the value set to the return value of the + # block. If the block raises an exception a new `Nothing` is created with + # the reason being set to the raised exception. + # + # @param [Array] args Zero or more arguments to pass to the block. + # @yield The block from which to create a new `Maybe`. + # @yieldparam [Array] args Zero or more block arguments passed as + # arguments to the function. + # + # @return [Maybe] The newly created object. + # + # @raise [ArgumentError] when no block given. + def self.from(*args) + raise ArgumentError.new('no block given') unless block_given? + begin + value = yield(*args) + return new(value, NONE) + rescue => ex + return new(NONE, ex) + end + end + + # Create a new `Just` with the given value. + # + # @param [Object] value The value to set for the new `Maybe` object. + # + # @return [Maybe] The newly created object. + def self.just(value) + return new(value, NONE) + end + + # Create a new `Nothing` with the given (optional) reason. + # + # @param [Exception] error The reason to set for the new `Maybe` object. + # When given a string a new `StandardError` will be created with the + # argument as the message. When no argument is given a new + # `StandardError` with an empty message will be created. + # + # @return [Maybe] The newly created object. + def self.nothing(error = '') + if error.is_a?(Exception) + nothing = error + else + nothing = StandardError.new(error.to_s) + end + return new(NONE, nothing) + end + + # Is this `Maybe` a `Just` (successfully fulfilled with a value)? + # + # @return [Boolean] True if `Just` or false if `Nothing`. + def just? + ! nothing? + end + alias :fulfilled? :just? + + # Is this `Maybe` a `nothing` (rejected with an exception upon fulfillment)? + # + # @return [Boolean] True if `Nothing` or false if `Just`. + def nothing? + @nothing != NONE + end + alias :rejected? :nothing? + + alias :value :just + + alias :reason :nothing + + # Comparison operator. + # + # @return [Integer] 0 if self and other are both `Nothing`; + # -1 if self is `Nothing` and other is `Just`; + # 1 if self is `Just` and other is nothing; + # `self.just <=> other.just` if both self and other are `Just`. + def <=>(other) + if nothing? + other.nothing? ? 0 : -1 + else + other.nothing? ? 1 : just <=> other.just + end + end + + # Return either the value of self or the given default value. + # + # @return [Object] The value of self when `Just`; else the given default. + def or(other) + just? ? just : other + end + + private + + # Create a new `Maybe` with the given attributes. + # + # @param [Object] just The value when `Just` else `NONE`. + # @param [Exception, Object] nothing The exception when `Nothing` else `NONE`. + # + # @return [Maybe] The new `Maybe`. + # + # @!visibility private + def initialize(just, nothing) + @just = just + @nothing = nothing + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/mutable_struct.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/mutable_struct.rb new file mode 100644 index 0000000000..836b7f4585 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/mutable_struct.rb @@ -0,0 +1,229 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/synchronization' + +module Concurrent + + # An thread-safe variation of Ruby's standard `Struct`. Values can be set at + # construction or safely changed at any time during the object's lifecycle. + # + # @see http://ruby-doc.org/core-2.2.0/Struct.html Ruby standard library `Struct` + module MutableStruct + include Synchronization::AbstractStruct + + # @!macro struct_new + # + # Factory for creating new struct classes. + # + # ``` + # new([class_name] [, member_name]+>) -> StructClass click to toggle source + # new([class_name] [, member_name]+>) {|StructClass| block } -> StructClass + # new(value, ...) -> obj + # StructClass[value, ...] -> obj + # ``` + # + # The first two forms are used to create a new struct subclass `class_name` + # that can contain a value for each member_name . This subclass can be + # used to create instances of the structure like any other Class . + # + # If the `class_name` is omitted an anonymous struct class will be created. + # Otherwise, the name of this struct will appear as a constant in the struct class, + # so it must be unique for all structs under this base class and must start with a + # capital letter. Assigning a struct class to a constant also gives the class + # the name of the constant. + # + # If a block is given it will be evaluated in the context of `StructClass`, passing + # the created class as a parameter. This is the recommended way to customize a struct. + # Subclassing an anonymous struct creates an extra anonymous class that will never be used. + # + # The last two forms create a new instance of a struct subclass. The number of value + # parameters must be less than or equal to the number of attributes defined for the + # struct. Unset parameters default to nil. Passing more parameters than number of attributes + # will raise an `ArgumentError`. + # + # @see http://ruby-doc.org/core-2.2.0/Struct.html#method-c-new Ruby standard library `Struct#new` + + # @!macro struct_values + # + # Returns the values for this struct as an Array. + # + # @return [Array] the values for this struct + # + def values + synchronize { ns_values } + end + alias_method :to_a, :values + + # @!macro struct_values_at + # + # Returns the struct member values for each selector as an Array. + # + # A selector may be either an Integer offset or a Range of offsets (as in `Array#values_at`). + # + # @param [Fixnum, Range] indexes the index(es) from which to obatin the values (in order) + def values_at(*indexes) + synchronize { ns_values_at(indexes) } + end + + # @!macro struct_inspect + # + # Describe the contents of this struct in a string. + # + # @return [String] the contents of this struct in a string + def inspect + synchronize { ns_inspect } + end + alias_method :to_s, :inspect + + # @!macro struct_merge + # + # Returns a new struct containing the contents of `other` and the contents + # of `self`. If no block is specified, the value for entries with duplicate + # keys will be that of `other`. Otherwise the value for each duplicate key + # is determined by calling the block with the key, its value in `self` and + # its value in `other`. + # + # @param [Hash] other the hash from which to set the new values + # @yield an options block for resolving duplicate keys + # @yieldparam [String, Symbol] member the name of the member which is duplicated + # @yieldparam [Object] selfvalue the value of the member in `self` + # @yieldparam [Object] othervalue the value of the member in `other` + # + # @return [Synchronization::AbstractStruct] a new struct with the new values + # + # @raise [ArgumentError] of given a member that is not defined in the struct + def merge(other, &block) + synchronize { ns_merge(other, &block) } + end + + # @!macro struct_to_h + # + # Returns a hash containing the names and values for the struct’s members. + # + # @return [Hash] the names and values for the struct’s members + def to_h + synchronize { ns_to_h } + end + + # @!macro struct_get + # + # Attribute Reference + # + # @param [Symbol, String, Integer] member the string or symbol name of the member + # for which to obtain the value or the member's index + # + # @return [Object] the value of the given struct member or the member at the given index. + # + # @raise [NameError] if the member does not exist + # @raise [IndexError] if the index is out of range. + def [](member) + synchronize { ns_get(member) } + end + + # @!macro struct_equality + # + # Equality + # + # @return [Boolean] true if other has the same struct subclass and has + # equal member values (according to `Object#==`) + def ==(other) + synchronize { ns_equality(other) } + end + + # @!macro struct_each + # + # Yields the value of each struct member in order. If no block is given + # an enumerator is returned. + # + # @yield the operation to be performed on each struct member + # @yieldparam [Object] value each struct value (in order) + def each(&block) + return enum_for(:each) unless block_given? + synchronize { ns_each(&block) } + end + + # @!macro struct_each_pair + # + # Yields the name and value of each struct member in order. If no block is + # given an enumerator is returned. + # + # @yield the operation to be performed on each struct member/value pair + # @yieldparam [Object] member each struct member (in order) + # @yieldparam [Object] value each struct value (in order) + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + synchronize { ns_each_pair(&block) } + end + + # @!macro struct_select + # + # Yields each member value from the struct to the block and returns an Array + # containing the member values from the struct for which the given block + # returns a true value (equivalent to `Enumerable#select`). + # + # @yield the operation to be performed on each struct member + # @yieldparam [Object] value each struct value (in order) + # + # @return [Array] an array containing each value for which the block returns true + def select(&block) + return enum_for(:select) unless block_given? + synchronize { ns_select(&block) } + end + + # @!macro struct_set + # + # Attribute Assignment + # + # Sets the value of the given struct member or the member at the given index. + # + # @param [Symbol, String, Integer] member the string or symbol name of the member + # for which to obtain the value or the member's index + # + # @return [Object] the value of the given struct member or the member at the given index. + # + # @raise [NameError] if the name does not exist + # @raise [IndexError] if the index is out of range. + def []=(member, value) + if member.is_a? Integer + length = synchronize { @values.length } + if member >= length + raise IndexError.new("offset #{member} too large for struct(size:#{length})") + end + synchronize { @values[member] = value } + else + send("#{member}=", value) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + clazz = Synchronization::AbstractStruct.define_struct_class(MutableStruct, Synchronization::LockableObject, name, members, &block) + members.each_with_index do |member, index| + clazz.send :remove_method, member + clazz.send(:define_method, member) do + synchronize { @values[index] } + end + clazz.send(:define_method, "#{member}=") do |value| + synchronize { @values[index] = value } + end + end + clazz + end + end + end.new + private_constant :FACTORY + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/mvar.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/mvar.rb new file mode 100644 index 0000000000..9034711bf5 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/mvar.rb @@ -0,0 +1,242 @@ +require 'concurrent/concern/dereferenceable' +require 'concurrent/synchronization' + +module Concurrent + + # An `MVar` is a synchronized single element container. They are empty or + # contain one item. Taking a value from an empty `MVar` blocks, as does + # putting a value into a full one. You can either think of them as blocking + # queue of length one, or a special kind of mutable variable. + # + # On top of the fundamental `#put` and `#take` operations, we also provide a + # `#mutate` that is atomic with respect to operations on the same instance. + # These operations all support timeouts. + # + # We also support non-blocking operations `#try_put!` and `#try_take!`, a + # `#set!` that ignores existing values, a `#value` that returns the value + # without removing it or returns `MVar::EMPTY`, and a `#modify!` that yields + # `MVar::EMPTY` if the `MVar` is empty and can be used to set `MVar::EMPTY`. + # You shouldn't use these operations in the first instance. + # + # `MVar` is a [Dereferenceable](Dereferenceable). + # + # `MVar` is related to M-structures in Id, `MVar` in Haskell and `SyncVar` in Scala. + # + # Note that unlike the original Haskell paper, our `#take` is blocking. This is how + # Haskell and Scala do it today. + # + # @!macro copy_options + # + # ## See Also + # + # 1. P. Barth, R. Nikhil, and Arvind. [M-Structures: Extending a parallel, non- strict, functional language with state](http://dl.acm.org/citation.cfm?id=652538). In Proceedings of the 5th + # ACM Conference on Functional Programming Languages and Computer Architecture (FPCA), 1991. + # + # 2. S. Peyton Jones, A. Gordon, and S. Finne. [Concurrent Haskell](http://dl.acm.org/citation.cfm?id=237794). + # In Proceedings of the 23rd Symposium on Principles of Programming Languages + # (PoPL), 1996. + class MVar < Synchronization::Object + include Concern::Dereferenceable + safe_initialization! + + # Unique value that represents that an `MVar` was empty + EMPTY = ::Object.new + + # Unique value that represents that an `MVar` timed out before it was able + # to produce a value. + TIMEOUT = ::Object.new + + # Create a new `MVar`, either empty or with an initial value. + # + # @param [Hash] opts the options controlling how the future will be processed + # + # @!macro deref_options + def initialize(value = EMPTY, opts = {}) + @value = value + @mutex = Mutex.new + @empty_condition = ConditionVariable.new + @full_condition = ConditionVariable.new + set_deref_options(opts) + end + + # Remove the value from an `MVar`, leaving it empty, and blocking if there + # isn't a value. A timeout can be set to limit the time spent blocked, in + # which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value that was taken, or `TIMEOUT` + def take(timeout = nil) + @mutex.synchronize do + wait_for_full(timeout) + + # If we timed out we'll still be empty + if unlocked_full? + value = @value + @value = EMPTY + @empty_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # acquires lock on the from an `MVAR`, yields the value to provided block, + # and release lock. A timeout can be set to limit the time spent blocked, + # in which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value returned by the block, or `TIMEOUT` + def borrow(timeout = nil) + @mutex.synchronize do + wait_for_full(timeout) + + # if we timeoud out we'll still be empty + if unlocked_full? + yield @value + else + TIMEOUT + end + end + end + + # Put a value into an `MVar`, blocking if there is already a value until + # it is empty. A timeout can be set to limit the time spent blocked, in + # which case it returns `TIMEOUT` if the time is exceeded. + # @return [Object] the value that was put, or `TIMEOUT` + def put(value, timeout = nil) + @mutex.synchronize do + wait_for_empty(timeout) + + # If we timed out we won't be empty + if unlocked_empty? + @value = value + @full_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # Atomically `take`, yield the value to a block for transformation, and then + # `put` the transformed value. Returns the transformed value. A timeout can + # be set to limit the time spent blocked, in which case it returns `TIMEOUT` + # if the time is exceeded. + # @return [Object] the transformed value, or `TIMEOUT` + def modify(timeout = nil) + raise ArgumentError.new('no block given') unless block_given? + + @mutex.synchronize do + wait_for_full(timeout) + + # If we timed out we'll still be empty + if unlocked_full? + value = @value + @value = yield value + @full_condition.signal + apply_deref_options(value) + else + TIMEOUT + end + end + end + + # Non-blocking version of `take`, that returns `EMPTY` instead of blocking. + def try_take! + @mutex.synchronize do + if unlocked_full? + value = @value + @value = EMPTY + @empty_condition.signal + apply_deref_options(value) + else + EMPTY + end + end + end + + # Non-blocking version of `put`, that returns whether or not it was successful. + def try_put!(value) + @mutex.synchronize do + if unlocked_empty? + @value = value + @full_condition.signal + true + else + false + end + end + end + + # Non-blocking version of `put` that will overwrite an existing value. + def set!(value) + @mutex.synchronize do + old_value = @value + @value = value + @full_condition.signal + apply_deref_options(old_value) + end + end + + # Non-blocking version of `modify` that will yield with `EMPTY` if there is no value yet. + def modify! + raise ArgumentError.new('no block given') unless block_given? + + @mutex.synchronize do + value = @value + @value = yield value + if unlocked_empty? + @empty_condition.signal + else + @full_condition.signal + end + apply_deref_options(value) + end + end + + # Returns if the `MVar` is currently empty. + def empty? + @mutex.synchronize { @value == EMPTY } + end + + # Returns if the `MVar` currently contains a value. + def full? + !empty? + end + + protected + + def synchronize(&block) + @mutex.synchronize(&block) + end + + private + + def unlocked_empty? + @value == EMPTY + end + + def unlocked_full? + ! unlocked_empty? + end + + def wait_for_full(timeout) + wait_while(@full_condition, timeout) { unlocked_empty? } + end + + def wait_for_empty(timeout) + wait_while(@empty_condition, timeout) { unlocked_full? } + end + + def wait_while(condition, timeout) + if timeout.nil? + while yield + condition.wait(@mutex) + end + else + stop = Concurrent.monotonic_time + timeout + while yield && timeout > 0.0 + condition.wait(@mutex, timeout) + timeout = stop - Concurrent.monotonic_time + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/options.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/options.rb new file mode 100644 index 0000000000..bdd22a9df1 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/options.rb @@ -0,0 +1,42 @@ +require 'concurrent/configuration' + +module Concurrent + + # @!visibility private + module Options + + # Get the requested `Executor` based on the values set in the options hash. + # + # @param [Hash] opts the options defining the requested executor + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:fast` returns the global fast executor, + # `:io` returns the global io executor, and `:immediate` returns a new + # `ImmediateExecutor` object. + # + # @return [Executor, nil] the requested thread pool, or nil when no option specified + # + # @!visibility private + def self.executor_from_options(opts = {}) # :nodoc: + if identifier = opts.fetch(:executor, nil) + executor(identifier) + else + nil + end + end + + def self.executor(executor_identifier) + case executor_identifier + when :fast + Concurrent.global_fast_executor + when :io + Concurrent.global_io_executor + when :immediate + Concurrent.global_immediate_executor + when Concurrent::ExecutorService + executor_identifier + else + raise ArgumentError, "executor not recognized by '#{executor_identifier}'" + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/promise.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/promise.rb new file mode 100644 index 0000000000..f5f31ebbac --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/promise.rb @@ -0,0 +1,579 @@ +require 'thread' +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/ivar' +require 'concurrent/executor/safe_task_executor' + +require 'concurrent/options' + +module Concurrent + + PromiseExecutionError = Class.new(StandardError) + + # Promises are inspired by the JavaScript [Promises/A](http://wiki.commonjs.org/wiki/Promises/A) + # and [Promises/A+](http://promises-aplus.github.io/promises-spec/) specifications. + # + # > A promise represents the eventual value returned from the single + # > completion of an operation. + # + # Promises are similar to futures and share many of the same behaviours. + # Promises are far more robust, however. Promises can be chained in a tree + # structure where each promise may have zero or more children. Promises are + # chained using the `then` method. The result of a call to `then` is always + # another promise. Promises are resolved asynchronously (with respect to the + # main thread) but in a strict order: parents are guaranteed to be resolved + # before their children, children before their younger siblings. The `then` + # method takes two parameters: an optional block to be executed upon parent + # resolution and an optional callable to be executed upon parent failure. The + # result of each promise is passed to each of its children upon resolution. + # When a promise is rejected all its children will be summarily rejected and + # will receive the reason. + # + # Promises have several possible states: *:unscheduled*, *:pending*, + # *:processing*, *:rejected*, or *:fulfilled*. These are also aggregated as + # `#incomplete?` and `#complete?`. When a Promise is created it is set to + # *:unscheduled*. Once the `#execute` method is called the state becomes + # *:pending*. Once a job is pulled from the thread pool's queue and is given + # to a thread for processing (often immediately upon `#post`) the state + # becomes *:processing*. The future will remain in this state until processing + # is complete. A future that is in the *:unscheduled*, *:pending*, or + # *:processing* is considered `#incomplete?`. A `#complete?` Promise is either + # *:rejected*, indicating that an exception was thrown during processing, or + # *:fulfilled*, indicating success. If a Promise is *:fulfilled* its `#value` + # will be updated to reflect the result of the operation. If *:rejected* the + # `reason` will be updated with a reference to the thrown exception. The + # predicate methods `#unscheduled?`, `#pending?`, `#rejected?`, and + # `#fulfilled?` can be called at any time to obtain the state of the Promise, + # as can the `#state` method, which returns a symbol. + # + # Retrieving the value of a promise is done through the `value` (alias: + # `deref`) method. Obtaining the value of a promise is a potentially blocking + # operation. When a promise is *rejected* a call to `value` will return `nil` + # immediately. When a promise is *fulfilled* a call to `value` will + # immediately return the current value. When a promise is *pending* a call to + # `value` will block until the promise is either *rejected* or *fulfilled*. A + # *timeout* value can be passed to `value` to limit how long the call will + # block. If `nil` the call will block indefinitely. If `0` the call will not + # block. Any other integer or float value will indicate the maximum number of + # seconds to block. + # + # Promises run on the global thread pool. + # + # @!macro copy_options + # + # ### Examples + # + # Start by requiring promises + # + # ```ruby + # require 'concurrent' + # ``` + # + # Then create one + # + # ```ruby + # p = Concurrent::Promise.execute do + # # do something + # 42 + # end + # ``` + # + # Promises can be chained using the `then` method. The `then` method accepts a + # block and an executor, to be executed on fulfillment, and a callable argument to be executed + # on rejection. The result of the each promise is passed as the block argument + # to chained promises. + # + # ```ruby + # p = Concurrent::Promise.new{10}.then{|x| x * 2}.then{|result| result - 10 }.execute + # ``` + # + # And so on, and so on, and so on... + # + # ```ruby + # p = Concurrent::Promise.fulfill(20). + # then{|result| result - 10 }. + # then{|result| result * 3 }. + # then(executor: different_executor){|result| result % 5 }.execute + # ``` + # + # The initial state of a newly created Promise depends on the state of its parent: + # - if parent is *unscheduled* the child will be *unscheduled* + # - if parent is *pending* the child will be *pending* + # - if parent is *fulfilled* the child will be *pending* + # - if parent is *rejected* the child will be *pending* (but will ultimately be *rejected*) + # + # Promises are executed asynchronously from the main thread. By the time a + # child Promise finishes intialization it may be in a different state than its + # parent (by the time a child is created its parent may have completed + # execution and changed state). Despite being asynchronous, however, the order + # of execution of Promise objects in a chain (or tree) is strictly defined. + # + # There are multiple ways to create and execute a new `Promise`. Both ways + # provide identical behavior: + # + # ```ruby + # # create, operate, then execute + # p1 = Concurrent::Promise.new{ "Hello World!" } + # p1.state #=> :unscheduled + # p1.execute + # + # # create and immediately execute + # p2 = Concurrent::Promise.new{ "Hello World!" }.execute + # + # # execute during creation + # p3 = Concurrent::Promise.execute{ "Hello World!" } + # ``` + # + # Once the `execute` method is called a `Promise` becomes `pending`: + # + # ```ruby + # p = Concurrent::Promise.execute{ "Hello, world!" } + # p.state #=> :pending + # p.pending? #=> true + # ``` + # + # Wait a little bit, and the promise will resolve and provide a value: + # + # ```ruby + # p = Concurrent::Promise.execute{ "Hello, world!" } + # sleep(0.1) + # + # p.state #=> :fulfilled + # p.fulfilled? #=> true + # p.value #=> "Hello, world!" + # ``` + # + # If an exception occurs, the promise will be rejected and will provide + # a reason for the rejection: + # + # ```ruby + # p = Concurrent::Promise.execute{ raise StandardError.new("Here comes the Boom!") } + # sleep(0.1) + # + # p.state #=> :rejected + # p.rejected? #=> true + # p.reason #=> "#" + # ``` + # + # #### Rejection + # + # When a promise is rejected all its children will be rejected and will + # receive the rejection `reason` as the rejection callable parameter: + # + # ```ruby + # p = Concurrent::Promise.execute { Thread.pass; raise StandardError } + # + # c1 = p.then(-> reason { 42 }) + # c2 = p.then(-> reason { raise 'Boom!' }) + # + # c1.wait.state #=> :fulfilled + # c1.value #=> 45 + # c2.wait.state #=> :rejected + # c2.reason #=> # + # ``` + # + # Once a promise is rejected it will continue to accept children that will + # receive immediately rejection (they will be executed asynchronously). + # + # #### Aliases + # + # The `then` method is the most generic alias: it accepts a block to be + # executed upon parent fulfillment and a callable to be executed upon parent + # rejection. At least one of them should be passed. The default block is `{ + # |result| result }` that fulfills the child with the parent value. The + # default callable is `{ |reason| raise reason }` that rejects the child with + # the parent reason. + # + # - `on_success { |result| ... }` is the same as `then {|result| ... }` + # - `rescue { |reason| ... }` is the same as `then(Proc.new { |reason| ... } )` + # - `rescue` is aliased by `catch` and `on_error` + class Promise < IVar + + # Initialize a new Promise with the provided options. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @option opts [Promise] :parent the parent `Promise` when building a chain/tree + # @option opts [Proc] :on_fulfill fulfillment handler + # @option opts [Proc] :on_reject rejection handler + # @option opts [object, Array] :args zero or more arguments to be passed + # the task block on execution + # + # @yield The block operation to be performed asynchronously. + # + # @raise [ArgumentError] if no block is given + # + # @see http://wiki.commonjs.org/wiki/Promises/A + # @see http://promises-aplus.github.io/promises-spec/ + def initialize(opts = {}, &block) + opts.delete_if { |k, v| v.nil? } + super(NULL, opts.merge(__promise_body_from_block__: block), &nil) + end + + # Create a new `Promise` and fulfill it immediately. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @raise [ArgumentError] if no block is given + # + # @return [Promise] the newly created `Promise` + def self.fulfill(value, opts = {}) + Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, true, value, nil) } + end + + # Create a new `Promise` and reject it immediately. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @raise [ArgumentError] if no block is given + # + # @return [Promise] the newly created `Promise` + def self.reject(reason, opts = {}) + Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, false, nil, reason) } + end + + # Execute an `:unscheduled` `Promise`. Immediately sets the state to `:pending` and + # passes the block to a new thread/thread pool for eventual execution. + # Does nothing if the `Promise` is in any state other than `:unscheduled`. + # + # @return [Promise] a reference to `self` + def execute + if root? + if compare_and_set_state(:pending, :unscheduled) + set_pending + realize(@promise_body) + end + else + @parent.execute + end + self + end + + # @!macro ivar_set_method + # + # @raise [Concurrent::PromiseExecutionError] if not the root promise + def set(value = NULL, &block) + raise PromiseExecutionError.new('supported only on root promise') unless root? + check_for_block_or_value!(block_given?, value) + synchronize do + if @state != :unscheduled + raise MultipleAssignmentError + else + @promise_body = block || Proc.new { |result| value } + end + end + execute + end + + # @!macro ivar_fail_method + # + # @raise [Concurrent::PromiseExecutionError] if not the root promise + def fail(reason = StandardError.new) + set { raise reason } + end + + # Create a new `Promise` object with the given block, execute it, and return the + # `:pending` object. + # + # @!macro executor_and_deref_options + # + # @!macro promise_init_options + # + # @return [Promise] the newly created `Promise` in the `:pending` state + # + # @raise [ArgumentError] if no block is given + # + # @example + # promise = Concurrent::Promise.execute{ sleep(1); 42 } + # promise.state #=> :pending + def self.execute(opts = {}, &block) + new(opts, &block).execute + end + + # Chain a new promise off the current promise. + # + # @return [Promise] the new promise + # @yield The block operation to be performed asynchronously. + # @overload then(rescuer, executor, &block) + # @param [Proc] rescuer An optional rescue block to be executed if the + # promise is rejected. + # @param [ThreadPool] executor An optional thread pool executor to be used + # in the new Promise + # @overload then(rescuer, executor: executor, &block) + # @param [Proc] rescuer An optional rescue block to be executed if the + # promise is rejected. + # @param [ThreadPool] executor An optional thread pool executor to be used + # in the new Promise + def then(*args, &block) + if args.last.is_a?(::Hash) + executor = args.pop[:executor] + rescuer = args.first + else + rescuer, executor = args + end + + executor ||= @executor + + raise ArgumentError.new('rescuers and block are both missing') if rescuer.nil? && !block_given? + block = Proc.new { |result| result } unless block_given? + child = Promise.new( + parent: self, + executor: executor, + on_fulfill: block, + on_reject: rescuer + ) + + synchronize do + child.state = :pending if @state == :pending + child.on_fulfill(apply_deref_options(@value)) if @state == :fulfilled + child.on_reject(@reason) if @state == :rejected + @children << child + end + + child + end + + # Chain onto this promise an action to be undertaken on success + # (fulfillment). + # + # @yield The block to execute + # + # @return [Promise] self + def on_success(&block) + raise ArgumentError.new('no block given') unless block_given? + self.then(&block) + end + + # Chain onto this promise an action to be undertaken on failure + # (rejection). + # + # @yield The block to execute + # + # @return [Promise] self + def rescue(&block) + self.then(block) + end + + alias_method :catch, :rescue + alias_method :on_error, :rescue + + # Yield the successful result to the block that returns a promise. If that + # promise is also successful the result is the result of the yielded promise. + # If either part fails the whole also fails. + # + # @example + # Promise.execute { 1 }.flat_map { |v| Promise.execute { v + 2 } }.value! #=> 3 + # + # @return [Promise] + def flat_map(&block) + child = Promise.new( + parent: self, + executor: ImmediateExecutor.new, + ) + + on_error { |e| child.on_reject(e) } + on_success do |result1| + begin + inner = block.call(result1) + inner.execute + inner.on_success { |result2| child.on_fulfill(result2) } + inner.on_error { |e| child.on_reject(e) } + rescue => e + child.on_reject(e) + end + end + + child + end + + # Builds a promise that produces the result of promises in an Array + # and fails if any of them fails. + # + # @overload zip(*promises) + # @param [Array] promises + # + # @overload zip(*promises, opts) + # @param [Array] promises + # @param [Hash] opts the configuration options + # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. + # @option opts [Boolean] :execute (true) execute promise before returning + # + # @return [Promise] + def self.zip(*promises) + opts = promises.last.is_a?(::Hash) ? promises.pop.dup : {} + opts[:executor] ||= ImmediateExecutor.new + zero = if !opts.key?(:execute) || opts.delete(:execute) + fulfill([], opts) + else + Promise.new(opts) { [] } + end + + promises.reduce(zero) do |p1, p2| + p1.flat_map do |results| + p2.then do |next_result| + results << next_result + end + end + end + end + + # Builds a promise that produces the result of self and others in an Array + # and fails if any of them fails. + # + # @overload zip(*promises) + # @param [Array] others + # + # @overload zip(*promises, opts) + # @param [Array] others + # @param [Hash] opts the configuration options + # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. + # @option opts [Boolean] :execute (true) execute promise before returning + # + # @return [Promise] + def zip(*others) + self.class.zip(self, *others) + end + + # Aggregates a collection of promises and executes the `then` condition + # if all aggregated promises succeed. Executes the `rescue` handler with + # a `Concurrent::PromiseExecutionError` if any of the aggregated promises + # fail. Upon execution will execute any of the aggregate promises that + # were not already executed. + # + # @!macro promise_self_aggregate + # + # The returned promise will not yet have been executed. Additional `#then` + # and `#rescue` handlers may still be provided. Once the returned promise + # is execute the aggregate promises will be also be executed (if they have + # not been executed already). The results of the aggregate promises will + # be checked upon completion. The necessary `#then` and `#rescue` blocks + # on the aggregating promise will then be executed as appropriate. If the + # `#rescue` handlers are executed the raises exception will be + # `Concurrent::PromiseExecutionError`. + # + # @param [Array] promises Zero or more promises to aggregate + # @return [Promise] an unscheduled (not executed) promise that aggregates + # the promises given as arguments + def self.all?(*promises) + aggregate(:all?, *promises) + end + + # Aggregates a collection of promises and executes the `then` condition + # if any aggregated promises succeed. Executes the `rescue` handler with + # a `Concurrent::PromiseExecutionError` if any of the aggregated promises + # fail. Upon execution will execute any of the aggregate promises that + # were not already executed. + # + # @!macro promise_self_aggregate + def self.any?(*promises) + aggregate(:any?, *promises) + end + + protected + + def ns_initialize(value, opts) + super + + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + @args = get_arguments_from(opts) + + @parent = opts.fetch(:parent) { nil } + @on_fulfill = opts.fetch(:on_fulfill) { Proc.new { |result| result } } + @on_reject = opts.fetch(:on_reject) { Proc.new { |reason| raise reason } } + + @promise_body = opts[:__promise_body_from_block__] || Proc.new { |result| result } + @state = :unscheduled + @children = [] + end + + # Aggregate a collection of zero or more promises under a composite promise, + # execute the aggregated promises and collect them into a standard Ruby array, + # call the given Ruby `Ennnumerable` predicate (such as `any?`, `all?`, `none?`, + # or `one?`) on the collection checking for the success or failure of each, + # then executing the composite's `#then` handlers if the predicate returns + # `true` or executing the composite's `#rescue` handlers if the predicate + # returns false. + # + # @!macro promise_self_aggregate + def self.aggregate(method, *promises) + composite = Promise.new do + completed = promises.collect do |promise| + promise.execute if promise.unscheduled? + promise.wait + promise + end + unless completed.empty? || completed.send(method){|promise| promise.fulfilled? } + raise PromiseExecutionError + end + end + composite + end + + # @!visibility private + def set_pending + synchronize do + @state = :pending + @children.each { |c| c.set_pending } + end + end + + # @!visibility private + def root? # :nodoc: + @parent.nil? + end + + # @!visibility private + def on_fulfill(result) + realize Proc.new { @on_fulfill.call(result) } + nil + end + + # @!visibility private + def on_reject(reason) + realize Proc.new { @on_reject.call(reason) } + nil + end + + # @!visibility private + def notify_child(child) + if_state(:fulfilled) { child.on_fulfill(apply_deref_options(@value)) } + if_state(:rejected) { child.on_reject(@reason) } + end + + # @!visibility private + def complete(success, value, reason) + children_to_notify = synchronize do + set_state!(success, value, reason) + @children.dup + end + + children_to_notify.each { |child| notify_child(child) } + observers.notify_and_delete_observers{ [Time.now, self.value, reason] } + end + + # @!visibility private + def realize(task) + @executor.post do + success, value, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) + complete(success, value, reason) + end + end + + # @!visibility private + def set_state!(success, value, reason) + set_state(success, value, reason) + event.set + end + + # @!visibility private + def synchronized_set_state!(success, value, reason) + synchronize { set_state!(success, value, reason) } + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/promises.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/promises.rb new file mode 100644 index 0000000000..5c5853ceb4 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/promises.rb @@ -0,0 +1,1936 @@ +require 'concurrent/synchronization' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/atomic/atomic_fixnum' +require 'concurrent/collection/lock_free_stack' +require 'concurrent/errors' +require 'concurrent/re_include' + +module Concurrent + + # {include:file:docs-source/promises-main.md} + module Promises + + # @!macro promises.param.default_executor + # @param [Executor, :io, :fast] default_executor Instance of an executor or a name of the + # global executor. Default executor propagates to chained futures unless overridden with + # executor parameter or changed with {AbstractEventFuture#with_default_executor}. + # + # @!macro promises.param.executor + # @param [Executor, :io, :fast] executor Instance of an executor or a name of the + # global executor. The task is executed on it, default executor remains unchanged. + # + # @!macro promises.param.args + # @param [Object] args arguments which are passed to the task when it's executed. + # (It might be prepended with other arguments, see the @yeild section). + # + # @!macro promises.shortcut.on + # Shortcut of {#$0_on} with default `:io` executor supplied. + # @see #$0_on + # + # @!macro promises.shortcut.using + # Shortcut of {#$0_using} with default `:io` executor supplied. + # @see #$0_using + # + # @!macro promise.param.task-future + # @yieldreturn will become result of the returned Future. + # Its returned value becomes {Future#value} fulfilling it, + # raised exception becomes {Future#reason} rejecting it. + # + # @!macro promise.param.callback + # @yieldreturn is forgotten. + + # Container of all {Future}, {Event} factory methods. They are never constructed directly with + # new. + module FactoryMethods + extend ReInclude + + module Configuration + # @return [Executor, :io, :fast] the executor which is used when none is supplied + # to a factory method. The method can be overridden in the receivers of + # `include FactoryMethod` + def default_executor + :io + end + end + + include Configuration + + # @!macro promises.shortcut.on + # @return [ResolvableEvent] + def resolvable_event + resolvable_event_on default_executor + end + + # Created resolvable event, user is responsible for resolving the event once by + # {Promises::ResolvableEvent#resolve}. + # + # @!macro promises.param.default_executor + # @return [ResolvableEvent] + def resolvable_event_on(default_executor = self.default_executor) + ResolvableEventPromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [ResolvableFuture] + def resolvable_future + resolvable_future_on default_executor + end + + # Creates resolvable future, user is responsible for resolving the future once by + # {Promises::ResolvableFuture#resolve}, {Promises::ResolvableFuture#fulfill}, + # or {Promises::ResolvableFuture#reject} + # + # @!macro promises.param.default_executor + # @return [ResolvableFuture] + def resolvable_future_on(default_executor = self.default_executor) + ResolvableFuturePromise.new(default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def future(*args, &task) + future_on(default_executor, *args, &task) + end + + # @!macro promises.future-on1 + # Constructs new Future which will be resolved after block is evaluated on default executor. + # Evaluation begins immediately. + # + # @!macro promises.future-on2 + # @!macro promises.param.default_executor + # @!macro promises.param.args + # @yield [*args] to the task. + # @!macro promise.param.task-future + # @return [Future] + def future_on(default_executor, *args, &task) + ImmediateEventPromise.new(default_executor).future.then(*args, &task) + end + + # Creates resolved future with will be either fulfilled with the given value or rejection with + # the given reason. + # + # @!macro promises.param.default_executor + # @return [Future] + def resolved_future(fulfilled, value, reason, default_executor = self.default_executor) + ImmediateFuturePromise.new(default_executor, fulfilled, value, reason).future + end + + # Creates resolved future with will be fulfilled with the given value. + # + # @!macro promises.param.default_executor + # @return [Future] + def fulfilled_future(value, default_executor = self.default_executor) + resolved_future true, value, nil, default_executor + end + + # Creates resolved future with will be rejected with the given reason. + # + # @!macro promises.param.default_executor + # @return [Future] + def rejected_future(reason, default_executor = self.default_executor) + resolved_future false, nil, reason, default_executor + end + + # Creates resolved event. + # + # @!macro promises.param.default_executor + # @return [Event] + def resolved_event(default_executor = self.default_executor) + ImmediateEventPromise.new(default_executor).event + end + + # General constructor. Behaves differently based on the argument's type. It's provided for convenience + # but it's better to be explicit. + # + # @see rejected_future, resolved_event, fulfilled_future + # @!macro promises.param.default_executor + # @return [Event, Future] + # + # @overload create(nil, default_executor = self.default_executor) + # @param [nil] nil + # @return [Event] resolved event. + # + # @overload create(a_future, default_executor = self.default_executor) + # @param [Future] a_future + # @return [Future] a future which will be resolved when a_future is. + # + # @overload create(an_event, default_executor = self.default_executor) + # @param [Event] an_event + # @return [Event] an event which will be resolved when an_event is. + # + # @overload create(exception, default_executor = self.default_executor) + # @param [Exception] exception + # @return [Future] a rejected future with the exception as its reason. + # + # @overload create(value, default_executor = self.default_executor) + # @param [Object] value when none of the above overloads fits + # @return [Future] a fulfilled future with the value. + def make_future(argument = nil, default_executor = self.default_executor) + case argument + when AbstractEventFuture + # returning wrapper would change nothing + argument + when Exception + rejected_future argument, default_executor + when nil + resolved_event default_executor + else + fulfilled_future argument, default_executor + end + end + + # @!macro promises.shortcut.on + # @return [Future] + def delay(*args, &task) + delay_on default_executor, *args, &task + end + + # @!macro promises.future-on1 + # The task will be evaluated only after the future is touched, see {AbstractEventFuture#touch} + # + # @!macro promises.future-on2 + def delay_on(default_executor, *args, &task) + DelayPromise.new(default_executor).event.chain(*args, &task) + end + + # @!macro promises.shortcut.on + # @return [Future] + def schedule(intended_time, *args, &task) + schedule_on default_executor, intended_time, *args, &task + end + + # @!macro promises.future-on1 + # The task is planned for execution in intended_time. + # + # @!macro promises.future-on2 + # @!macro promises.param.intended_time + # @param [Numeric, Time] intended_time `Numeric` means to run in `intended_time` seconds. + # `Time` means to run on `intended_time`. + def schedule_on(default_executor, intended_time, *args, &task) + ScheduledPromise.new(default_executor, intended_time).event.chain(*args, &task) + end + + # @!macro promises.shortcut.on + # @return [Future] + def zip_futures(*futures_and_or_events) + zip_futures_on default_executor, *futures_and_or_events + end + + # Creates new future which is resolved after all futures_and_or_events are resolved. + # Its value is array of zipped future values. Its reason is array of reasons for rejection. + # If there is an error it rejects. + # @!macro promises.event-conversion + # If event is supplied, which does not have value and can be only resolved, it's + # represented as `:fulfilled` with value `nil`. + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def zip_futures_on(default_executor, *futures_and_or_events) + ZipFuturesPromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + alias_method :zip, :zip_futures + + # @!macro promises.shortcut.on + # @return [Event] + def zip_events(*futures_and_or_events) + zip_events_on default_executor, *futures_and_or_events + end + + # Creates new event which is resolved after all futures_and_or_events are resolved. + # (Future is resolved when fulfilled or rejected.) + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def zip_events_on(default_executor, *futures_and_or_events) + ZipEventsPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_resolved_future(*futures_and_or_events) + any_resolved_future_on default_executor, *futures_and_or_events + end + + alias_method :any, :any_resolved_future + + # Creates new future which is resolved after first futures_and_or_events is resolved. + # Its result equals result of the first resolved future. + # @!macro promises.any-touch + # If resolved it does not propagate {AbstractEventFuture#touch}, leaving delayed + # futures un-executed if they are not required any more. + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_resolved_future_on(default_executor, *futures_and_or_events) + AnyResolvedFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_fulfilled_future(*futures_and_or_events) + any_fulfilled_future_on default_executor, *futures_and_or_events + end + + # Creates new future which is resolved after first of futures_and_or_events is fulfilled. + # Its result equals result of the first resolved future or if all futures_and_or_events reject, + # it has reason of the last resolved future. + # @!macro promises.any-touch + # @!macro promises.event-conversion + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Future] + def any_fulfilled_future_on(default_executor, *futures_and_or_events) + AnyFulfilledFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def any_event(*futures_and_or_events) + any_event_on default_executor, *futures_and_or_events + end + + # Creates new event which becomes resolved after first of the futures_and_or_events resolves. + # @!macro promises.any-touch + # + # @!macro promises.param.default_executor + # @param [AbstractEventFuture] futures_and_or_events + # @return [Event] + def any_event_on(default_executor, *futures_and_or_events) + AnyResolvedEventPromise.new_blocked_by(futures_and_or_events, default_executor).event + end + + # TODO consider adding first(count, *futures) + # TODO consider adding zip_by(slice, *futures) processing futures in slices + # TODO or rather a generic aggregator taking a function + end + + module InternalStates + # @private + class State + def resolved? + raise NotImplementedError + end + + def to_sym + raise NotImplementedError + end + end + + private_constant :State + + # @private + class Pending < State + def resolved? + false + end + + def to_sym + :pending + end + end + + private_constant :Pending + + # @private + class ResolvedWithResult < State + def resolved? + true + end + + def to_sym + :resolved + end + + def result + [fulfilled?, value, reason] + end + + def fulfilled? + raise NotImplementedError + end + + def value + raise NotImplementedError + end + + def reason + raise NotImplementedError + end + + def apply + raise NotImplementedError + end + end + + private_constant :ResolvedWithResult + + # @private + class Fulfilled < ResolvedWithResult + + def initialize(value) + @Value = value + end + + def fulfilled? + true + end + + def apply(args, block) + block.call value, *args + end + + def value + @Value + end + + def reason + nil + end + + def to_sym + :fulfilled + end + end + + private_constant :Fulfilled + + # @private + class FulfilledArray < Fulfilled + def apply(args, block) + block.call(*value, *args) + end + end + + private_constant :FulfilledArray + + # @private + class Rejected < ResolvedWithResult + def initialize(reason) + @Reason = reason + end + + def fulfilled? + false + end + + def value + nil + end + + def reason + @Reason + end + + def to_sym + :rejected + end + + def apply(args, block) + block.call reason, *args + end + end + + private_constant :Rejected + + # @private + class PartiallyRejected < ResolvedWithResult + def initialize(value, reason) + super() + @Value = value + @Reason = reason + end + + def fulfilled? + false + end + + def to_sym + :rejected + end + + def value + @Value + end + + def reason + @Reason + end + + def apply(args, block) + block.call(*reason, *args) + end + end + + private_constant :PartiallyRejected + + PENDING = Pending.new + RESOLVED = Fulfilled.new(nil) + + def RESOLVED.to_sym + :resolved + end + + private_constant :PENDING, :RESOLVED + end + + private_constant :InternalStates + + # Common ancestor of {Event} and {Future} classes, many shared methods are defined here. + class AbstractEventFuture < Synchronization::Object + safe_initialization! + private(*attr_atomic(:internal_state) - [:internal_state]) + + include InternalStates + + def initialize(promise, default_executor) + super() + @Lock = Mutex.new + @Condition = ConditionVariable.new + @Promise = promise + @DefaultExecutor = default_executor + @Callbacks = LockFreeStack.new + @Waiters = AtomicFixnum.new 0 + self.internal_state = PENDING + end + + private :initialize + + # @!macro promises.shortcut.event-future + # @see Event#$0 + # @see Future#$0 + + # @!macro promises.param.timeout + # @param [Numeric] timeout the maximum time in second to wait. + + # @!macro promises.warn.blocks + # @note This function potentially blocks current thread until the Future is resolved. + # Be careful it can deadlock. Try to chain instead. + + # Returns its state. + # @return [Symbol] + # + # @overload an_event.state + # @return [:pending, :resolved] + # @overload a_future.state + # Both :fulfilled, :rejected implies :resolved. + # @return [:pending, :fulfilled, :rejected] + def state + internal_state.to_sym + end + + # Is it in pending state? + # @return [Boolean] + def pending? + !internal_state.resolved? + end + + # Is it in resolved state? + # @return [Boolean] + def resolved? + internal_state.resolved? + end + + # Propagates touch. Requests all the delayed futures, which it depends on, to be + # executed. This method is called by any other method requiring resolved state, like {#wait}. + # @return [self] + def touch + @Promise.touch + self + end + + # @!macro promises.touches + # Calls {AbstractEventFuture#touch}. + + # @!macro promises.method.wait + # Wait (block the Thread) until receiver is {#resolved?}. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [Future, true, false] self implies timeout was not used, true implies timeout was used + # and it was resolved, false implies it was not resolved within timeout. + def wait(timeout = nil) + result = wait_until_resolved(timeout) + timeout ? result : self + end + + # Returns default executor. + # @return [Executor] default executor + # @see #with_default_executor + # @see FactoryMethods#future_on + # @see FactoryMethods#resolvable_future + # @see FactoryMethods#any_fulfilled_future_on + # @see similar + def default_executor + @DefaultExecutor + end + + # @!macro promises.shortcut.on + # @return [Future] + def chain(*args, &task) + chain_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @return [Future] + # @!macro promise.param.task-future + # + # @overload an_event.chain_on(executor, *args, &task) + # @yield [*args] to the task. + # @overload a_future.chain_on(executor, *args, &task) + # @yield [fulfilled, value, reason, *args] to the task. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Exception] reason + def chain_on(executor, *args, &task) + ChainPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @return [String] Short string representation. + def to_s + format '%s %s>', super[0..-2], state + end + + alias_method :inspect, :to_s + + # Resolves the resolvable when receiver is resolved. + # + # @param [Resolvable] resolvable + # @return [self] + def chain_resolvable(resolvable) + on_resolution! { resolvable.resolve_with internal_state } + end + + alias_method :tangle, :chain_resolvable + + # @!macro promises.shortcut.using + # @return [self] + def on_resolution(*args, &callback) + on_resolution_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # resolved. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution!(*args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution!(*args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Exception] reason + def on_resolution!(*args, &callback) + add_callback :callback_on_resolution, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is resolved. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # + # @overload an_event.on_resolution_using(executor, *args, &callback) + # @yield [*args] to the callback. + # @overload a_future.on_resolution_using(executor, *args, &callback) + # @yield [fulfilled, value, reason, *args] to the callback. + # @yieldparam [true, false] fulfilled + # @yieldparam [Object] value + # @yieldparam [Exception] reason + def on_resolution_using(executor, *args, &callback) + add_callback :async_callback_on_resolution, executor, args, callback + end + + # @!macro promises.method.with_default_executor + # Crates new object with same class with the executor set as its new default executor. + # Any futures depending on it will use the new default executor. + # @!macro promises.shortcut.event-future + # @abstract + # @return [AbstractEventFuture] + def with_default_executor(executor) + raise NotImplementedError + end + + # @!visibility private + def resolve_with(state, raise_on_reassign = true) + if compare_and_set_internal_state(PENDING, state) + # go to synchronized block only if there were waiting threads + @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 + call_callbacks state + else + return rejected_resolution(raise_on_reassign, state) + end + self + end + + # For inspection. + # @!visibility private + # @return [Array] + def blocks + @Callbacks.each_with_object([]) do |(method, args), promises| + promises.push(args[0]) if method == :callback_notify_blocked + end + end + + # For inspection. + # @!visibility private + def callbacks + @Callbacks.each.to_a + end + + # For inspection. + # @!visibility private + def promise + @Promise + end + + # For inspection. + # @!visibility private + def touched? + promise.touched? + end + + # For inspection. + # @!visibility private + def waiting_threads + @Waiters.each.to_a + end + + # @!visibility private + def add_callback_notify_blocked(promise, index) + add_callback :callback_notify_blocked, promise, index + end + + # @!visibility private + def add_callback_clear_delayed_node(node) + add_callback(:callback_clear_delayed_node, node) + end + + private + + def add_callback(method, *args) + state = internal_state + if state.resolved? + call_callback method, state, args + else + @Callbacks.push [method, args] + state = internal_state + # take back if it was resolved in the meanwhile + call_callbacks state if state.resolved? + end + self + end + + def callback_clear_delayed_node(state, node) + node.value = nil + end + + # @return [Boolean] + def wait_until_resolved(timeout) + return true if resolved? + + touch + + @Lock.synchronize do + @Waiters.increment + begin + unless resolved? + @Condition.wait @Lock, timeout + end + ensure + # JRuby may raise ConcurrencyError + @Waiters.decrement + end + end + resolved? + end + + def call_callback(method, state, args) + self.send method, state, *args + end + + def call_callbacks(state) + method, args = @Callbacks.pop + while method + call_callback method, state, args + method, args = @Callbacks.pop + end + end + + def with_async(executor, *args, &block) + Concurrent.executor(executor).post(*args, &block) + end + + def async_callback_on_resolution(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_resolution st, ar, cb + end + end + + def callback_notify_blocked(state, promise, index) + promise.on_blocker_resolution self, index + end + end + + # Represents an event which will happen in future (will be resolved). The event is either + # pending or resolved. It should be always resolved. Use {Future} to communicate rejections and + # cancellation. + class Event < AbstractEventFuture + + alias_method :then, :chain + + + # @!macro promises.method.zip + # Creates a new event or a future which will be resolved when receiver and other are. + # Returns an event if receiver and other are events, otherwise returns a future. + # If just one of the parties is Future then the result + # of the returned future is equal to the result of the supplied future. If both are futures + # then the result is as described in {FactoryMethods#zip_futures_on}. + # + # @return [Future, Event] + def zip(other) + if other.is_a?(Future) + ZipFutureEventPromise.new_blocked_by2(other, self, @DefaultExecutor).future + else + ZipEventEventPromise.new_blocked_by2(self, other, @DefaultExecutor).event + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. + # + # @return [Event] + def any(event_or_future) + AnyResolvedEventPromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).event + end + + alias_method :|, :any + + # Creates new event dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Event] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end + + # @!macro promise.method.schedule + # Creates new event dependent on receiver scheduled to execute on/in intended_time. + # In time is interpreted from the moment the receiver is resolved, therefore it inserts + # delay into the chain. + # + # @!macro promises.param.intended_time + # @return [Event] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event + end.flat_event + end + + # Converts event to a future. The future is fulfilled when the event is resolved, the future may never fail. + # + # @return [Future] + def to_future + future = Promises.resolvable_future + ensure + chain_resolvable(future) + end + + # Returns self, since this is event + # @return [Event] + def to_event + self + end + + # @!macro promises.method.with_default_executor + # @return [Event] + def with_default_executor(executor) + EventWrapperPromise.new_blocked_by1(self, executor).event + end + + private + + def rejected_resolution(raise_on_reassign, state) + Concurrent::MultipleAssignmentError.new('Event can be resolved only once') if raise_on_reassign + return false + end + + def callback_on_resolution(state, args, callback) + callback.call(*args) + end + end + + # Represents a value which will become available in future. May reject with a reason instead, + # e.g. when the tasks raises an exception. + class Future < AbstractEventFuture + + # Is it in fulfilled state? + # @return [Boolean] + def fulfilled? + state = internal_state + state.resolved? && state.fulfilled? + end + + # Is it in rejected state? + # @return [Boolean] + def rejected? + state = internal_state + state.resolved? && !state.fulfilled? + end + + # @!macro promises.warn.nil + # @note Make sure returned `nil` is not confused with timeout, no value when rejected, + # no reason when fulfilled, etc. + # Use more exact methods if needed, like {#wait}, {#value!}, {#result}, etc. + + # @!macro promises.method.value + # Return value of the future. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @return [Object, nil] the value of the Future when fulfilled, nil on timeout or rejection. + def value(timeout = nil) + internal_state.value if wait_until_resolved timeout + end + + # Returns reason of future's rejection. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.warn.nil + # @!macro promises.param.timeout + # @return [Exception, nil] nil on timeout or fulfillment. + def reason(timeout = nil) + internal_state.reason if wait_until_resolved timeout + end + + # Returns triplet fulfilled?, value, reason. + # @!macro promises.touches + # + # @!macro promises.warn.blocks + # @!macro promises.param.timeout + # @return [Array(Boolean, Object, Exception), nil] triplet of fulfilled?, value, reason, or nil + # on timeout. + def result(timeout = nil) + internal_state.result if wait_until_resolved timeout + end + + # @!macro promises.method.wait + # @raise [Exception] {#reason} on rejection + def wait!(timeout = nil) + result = wait_until_resolved!(timeout) + timeout ? result : self + end + + # @!macro promises.method.value + # @return [Object, nil] the value of the Future when fulfilled, nil on timeout. + # @raise [Exception] {#reason} on rejection + def value!(timeout = nil) + internal_state.value if wait_until_resolved! timeout + end + + # Allows rejected Future to be risen with `raise` method. + # @example + # raise Promises.rejected_future(StandardError.new("boom")) + # @raise [StandardError] when raising not rejected future + # @return [Exception] + def exception(*args) + raise Concurrent::Error, 'it is not rejected' unless rejected? + reason = Array(internal_state.reason).flatten.compact + if reason.size > 1 + ex = Concurrent::MultipleErrors.new reason + ex.set_backtrace(caller) + ex + else + ex = reason[0].clone.exception(*args) + ex.set_backtrace Array(ex.backtrace) + caller + ex + end + end + + # @!macro promises.shortcut.on + # @return [Future] + def then(*args, &task) + then_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it fulfills. Does not run + # the task if it rejects. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [value, *args] to the task. + def then_on(executor, *args, &task) + ThenPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.shortcut.on + # @return [Future] + def rescue(*args, &task) + rescue_on @DefaultExecutor, *args, &task + end + + # Chains the task to be executed asynchronously on executor after it rejects. Does not run + # the task if it fulfills. It will resolve though, triggering any dependent futures. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.task-future + # @return [Future] + # @yield [reason, *args] to the task. + def rescue_on(executor, *args, &task) + RescuePromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future + end + + # @!macro promises.method.zip + # @return [Future] + def zip(other) + if other.is_a?(Future) + ZipFuturesPromise.new_blocked_by2(self, other, @DefaultExecutor).future + else + ZipFutureEventPromise.new_blocked_by2(self, other, @DefaultExecutor).future + end + end + + alias_method :&, :zip + + # Creates a new event which will be resolved when the first of receiver, `event_or_future` + # resolves. Returning future will have value nil if event_or_future is event and resolves + # first. + # + # @return [Future] + def any(event_or_future) + AnyResolvedFuturePromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).future + end + + alias_method :|, :any + + # Creates new future dependent on receiver which will not evaluate until touched, see {#touch}. + # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. + # + # @return [Future] + def delay + event = DelayPromise.new(@DefaultExecutor).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end + + # @!macro promise.method.schedule + # @return [Future] + def schedule(intended_time) + chain do + event = ScheduledPromise.new(@DefaultExecutor, intended_time).event + ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future + end.flat + end + + # @!macro promises.method.with_default_executor + # @return [Future] + def with_default_executor(executor) + FutureWrapperPromise.new_blocked_by1(self, executor).future + end + + # Creates new future which will have result of the future returned by receiver. If receiver + # rejects it will have its rejection. + # + # @param [Integer] level how many levels of futures should flatten + # @return [Future] + def flat_future(level = 1) + FlatFuturePromise.new_blocked_by1(self, level, @DefaultExecutor).future + end + + alias_method :flat, :flat_future + + # Creates new event which will be resolved when the returned event by receiver is. + # Be careful if the receiver rejects it will just resolve since Event does not hold reason. + # + # @return [Event] + def flat_event + FlatEventPromise.new_blocked_by1(self, @DefaultExecutor).event + end + + # @!macro promises.shortcut.using + # @return [self] + def on_fulfillment(*args, &callback) + on_fulfillment_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment!(*args, &callback) + add_callback :callback_on_fulfillment, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # fulfilled. Does nothing on rejection. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [value, *args] to the callback. + def on_fulfillment_using(executor, *args, &callback) + add_callback :async_callback_on_fulfillment, executor, args, callback + end + + # @!macro promises.shortcut.using + # @return [self] + def on_rejection(*args, &callback) + on_rejection_using @DefaultExecutor, *args, &callback + end + + # Stores the callback to be executed synchronously on resolving thread after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection!(*args, &callback) + add_callback :callback_on_rejection, args, callback + end + + # Stores the callback to be executed asynchronously on executor after it is + # rejected. Does nothing on fulfillment. + # + # @!macro promises.param.executor + # @!macro promises.param.args + # @!macro promise.param.callback + # @return [self] + # @yield [reason, *args] to the callback. + def on_rejection_using(executor, *args, &callback) + add_callback :async_callback_on_rejection, executor, args, callback + end + + # Allows to use futures as green threads. The receiver has to evaluate to a future which + # represents what should be done next. It basically flattens indefinitely until non Future + # values is returned which becomes result of the returned future. Any encountered exception + # will become reason of the returned future. + # + # @return [Future] + # @example + # body = lambda do |v| + # v += 1 + # v < 5 ? Promises.future(v, &body) : v + # end + # Promises.future(0, &body).run.value! # => 5 + def run + RunFuturePromise.new_blocked_by1(self, @DefaultExecutor).future + end + + # @!visibility private + def apply(args, block) + internal_state.apply args, block + end + + # Converts future to event which is resolved when future is resolved by fulfillment or rejection. + # + # @return [Event] + def to_event + event = Promises.resolvable_event + ensure + chain_resolvable(event) + end + + # Returns self, since this is a future + # @return [Future] + def to_future + self + end + + private + + def rejected_resolution(raise_on_reassign, state) + if raise_on_reassign + raise Concurrent::MultipleAssignmentError.new( + "Future can be resolved only once. It's #{result}, trying to set #{state.result}.", + current_result: result, new_result: state.result) + end + return false + end + + def wait_until_resolved!(timeout = nil) + result = wait_until_resolved(timeout) + raise self if rejected? + result + end + + def async_callback_on_fulfillment(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_fulfillment st, ar, cb + end + end + + def async_callback_on_rejection(state, executor, args, callback) + with_async(executor, state, args, callback) do |st, ar, cb| + callback_on_rejection st, ar, cb + end + end + + def callback_on_fulfillment(state, args, callback) + state.apply args, callback if state.fulfilled? + end + + def callback_on_rejection(state, args, callback) + state.apply args, callback unless state.fulfilled? + end + + def callback_on_resolution(state, args, callback) + callback.call(*state.result, *args) + end + + end + + # Marker module of Future, Event resolved manually by user. + module Resolvable + end + + # A Event which can be resolved by user. + class ResolvableEvent < Event + include Resolvable + + + # @!macro raise_on_reassign + # @raise [MultipleAssignmentError] when already resolved and raise_on_reassign is true. + + # @!macro promise.param.raise_on_reassign + # @param [Boolean] raise_on_reassign should method raise exception if already resolved + # @return [self, false] false is returner when raise_on_reassign is false and the receiver + # is already resolved. + # + + # Makes the event resolved, which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + def resolve(raise_on_reassign = true) + resolve_with RESOLVED, raise_on_reassign + end + + # Creates new event wrapping receiver, effectively hiding the resolve method. + # + # @return [Event] + def with_hidden_resolvable + @with_hidden_resolvable ||= EventWrapperPromise.new_blocked_by1(self, @DefaultExecutor).event + end + end + + # A Future which can be resolved by user. + class ResolvableFuture < Future + include Resolvable + + # Makes the future resolved with result of triplet `fulfilled?`, `value`, `reason`, + # which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + def resolve(fulfilled = true, value = nil, reason = nil, raise_on_reassign = true) + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason), raise_on_reassign) + end + + # Makes the future fulfilled with `value`, + # which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + def fulfill(value, raise_on_reassign = true) + promise.fulfill(value, raise_on_reassign) + end + + # Makes the future rejected with `reason`, + # which triggers all dependent futures. + # + # @!macro promise.param.raise_on_reassign + def reject(reason, raise_on_reassign = true) + promise.reject(reason, raise_on_reassign) + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + def evaluate_to(*args, &block) + promise.evaluate_to(*args, block) + end + + # Evaluates the block and sets its result as future's value fulfilling, if the block raises + # an exception the future rejects with it. + # @yield [*args] to the block. + # @yieldreturn [Object] value + # @return [self] + # @raise [Exception] also raise reason on rejection. + def evaluate_to!(*args, &block) + promise.evaluate_to(*args, block).wait! + end + + # Creates new future wrapping receiver, effectively hiding the resolve method and similar. + # + # @return [Future] + def with_hidden_resolvable + @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked_by1(self, @DefaultExecutor).future + end + end + + # @abstract + # @private + class AbstractPromise < Synchronization::Object + safe_initialization! + include InternalStates + + def initialize(future) + super() + @Future = future + end + + def future + @Future + end + + alias_method :event, :future + + def default_executor + future.default_executor + end + + def state + future.state + end + + def touch + end + + def to_s + format '%s %s>', super[0..-2], @Future + end + + alias_method :inspect, :to_s + + def delayed_because + nil + end + + private + + def resolve_with(new_state, raise_on_reassign = true) + @Future.resolve_with(new_state, raise_on_reassign) + end + + # @return [Future] + def evaluate_to(*args, block) + resolve_with Fulfilled.new(block.call(*args)) + rescue Exception => error + resolve_with Rejected.new(error) + raise error unless error.is_a?(StandardError) + end + end + + class ResolvableEventPromise < AbstractPromise + def initialize(default_executor) + super ResolvableEvent.new(self, default_executor) + end + end + + class ResolvableFuturePromise < AbstractPromise + def initialize(default_executor) + super ResolvableFuture.new(self, default_executor) + end + + def fulfill(value, raise_on_reassign) + resolve_with Fulfilled.new(value), raise_on_reassign + end + + def reject(reason, raise_on_reassign) + resolve_with Rejected.new(reason), raise_on_reassign + end + + public :evaluate_to + end + + # @abstract + class InnerPromise < AbstractPromise + end + + # @abstract + class BlockedPromise < InnerPromise + + private_class_method :new + + def self.new_blocked_by1(blocker, *args, &block) + blocker_delayed = blocker.promise.delayed_because + promise = new(blocker_delayed, 1, *args, &block) + blocker.add_callback_notify_blocked promise, 0 + promise + end + + def self.new_blocked_by2(blocker1, blocker2, *args, &block) + blocker_delayed1 = blocker1.promise.delayed_because + blocker_delayed2 = blocker2.promise.delayed_because + delayed = if blocker_delayed1 && blocker_delayed2 + # TODO (pitr-ch 23-Dec-2016): use arrays when we know it will not grow (only flat adds delay) + LockFreeStack.of2(blocker_delayed1, blocker_delayed2) + else + blocker_delayed1 || blocker_delayed2 + end + promise = new(delayed, 2, *args, &block) + blocker1.add_callback_notify_blocked promise, 0 + blocker2.add_callback_notify_blocked promise, 1 + promise + end + + def self.new_blocked_by(blockers, *args, &block) + delayed = blockers.reduce(nil) { |d, f| add_delayed d, f.promise.delayed_because } + promise = new(delayed, blockers.size, *args, &block) + blockers.each_with_index { |f, i| f.add_callback_notify_blocked promise, i } + promise + end + + def self.add_delayed(delayed1, delayed2) + if delayed1 && delayed2 + delayed1.push delayed2 + delayed1 + else + delayed1 || delayed2 + end + end + + def initialize(delayed, blockers_count, future) + super(future) + @Delayed = delayed + @Countdown = AtomicFixnum.new blockers_count + end + + def on_blocker_resolution(future, index) + countdown = process_on_blocker_resolution(future, index) + resolvable = resolvable?(countdown, future, index) + + on_resolvable(future, index) if resolvable + end + + def delayed_because + @Delayed + end + + def touch + clear_and_propagate_touch + end + + # for inspection only + def blocked_by + blocked_by = [] + ObjectSpace.each_object(AbstractEventFuture) { |o| blocked_by.push o if o.blocks.include? self } + blocked_by + end + + private + + def clear_and_propagate_touch(stack_or_element = @Delayed) + return if stack_or_element.nil? + + if stack_or_element.is_a? LockFreeStack + stack_or_element.clear_each { |element| clear_and_propagate_touch element } + else + stack_or_element.touch unless stack_or_element.nil? # if still present + end + end + + # @return [true,false] if resolvable + def resolvable?(countdown, future, index) + countdown.zero? + end + + def process_on_blocker_resolution(future, index) + @Countdown.decrement + end + + def on_resolvable(resolved_future, index) + raise NotImplementedError + end + end + + # @abstract + class BlockedTaskPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + raise ArgumentError, 'no block given' unless block_given? + super delayed, 1, Future.new(self, default_executor) + @Executor = executor + @Task = task + @Args = args + end + + def executor + @Executor + end + end + + class ThenPromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.fulfilled? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class RescuePromise < BlockedTaskPromise + private + + def initialize(delayed, blockers_count, default_executor, executor, args, &task) + super delayed, blockers_count, default_executor, executor, args, &task + end + + def on_resolvable(resolved_future, index) + if resolved_future.rejected? + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to lambda { future.apply args, task } + end + else + resolve_with resolved_future.internal_state + end + end + end + + class ChainPromise < BlockedTaskPromise + private + + def on_resolvable(resolved_future, index) + if Future === resolved_future + Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| + evaluate_to(*future.result, *args, task) + end + else + Concurrent.executor(@Executor).post(@Args, @Task) do |args, task| + evaluate_to(*args, task) + end + end + end + end + + # will be immediately resolved + class ImmediateEventPromise < InnerPromise + def initialize(default_executor) + super Event.new(self, default_executor).resolve_with(RESOLVED) + end + end + + class ImmediateFuturePromise < InnerPromise + def initialize(default_executor, fulfilled, value, reason) + super Future.new(self, default_executor). + resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason)) + end + end + + class AbstractFlatPromise < BlockedPromise + + def initialize(delayed_because, blockers_count, event_or_future) + delayed = LockFreeStack.of1(self) + super(delayed, blockers_count, event_or_future) + # noinspection RubyArgCount + @Touched = AtomicBoolean.new false + @DelayedBecause = delayed_because || LockFreeStack.new + + event_or_future.add_callback_clear_delayed_node delayed.peek + end + + def touch + if @Touched.make_true + clear_and_propagate_touch @DelayedBecause + end + end + + private + + def touched? + @Touched.value + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + + def resolvable?(countdown, future, index) + !@Future.internal_state.resolved? && super(countdown, future, index) + end + + def add_delayed_of(future) + delayed = future.promise.delayed_because + if touched? + clear_and_propagate_touch delayed + else + BlockedPromise.add_delayed @DelayedBecause, delayed + clear_and_propagate_touch @DelayedBecause if touched? + end + end + + end + + class FlatEventPromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with RESOLVED + return countdown + end + + value = internal_state.value + case value + when Future, Event + add_delayed_of value + value.add_callback_notify_blocked self, nil + countdown + else + resolve_with RESOLVED + end + end + countdown + end + + end + + class FlatFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, levels, default_executor) + raise ArgumentError, 'levels has to be higher than 0' if levels < 1 + # flat promise may result to a future having delayed futures, therefore we have to have empty stack + # to be able to add new delayed futures + super delayed || LockFreeStack.new, 1 + levels, Future.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + countdown = super(future, index) + if countdown.nonzero? + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return countdown + end + + value = internal_state.value + case value + when Future + add_delayed_of value + value.add_callback_notify_blocked self, nil + countdown + when Event + evaluate_to(lambda { raise TypeError, 'cannot flatten to Event' }) + else + evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) + end + end + countdown + end + + end + + class RunFuturePromise < AbstractFlatPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Future.new(self, default_executor) + end + + def process_on_blocker_resolution(future, index) + internal_state = future.internal_state + + unless internal_state.fulfilled? + resolve_with internal_state + return 0 + end + + value = internal_state.value + case value + when Future + add_delayed_of value + value.add_callback_notify_blocked self, nil + else + resolve_with internal_state + end + + 1 + end + end + + class ZipEventEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class ZipFutureEventPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 2, Future.new(self, default_executor) + @result = nil + end + + private + + def process_on_blocker_resolution(future, index) + # first blocking is future, take its result + @result = future.internal_state if index == 0 + # super has to be called after above to piggyback on volatile @Countdown + super future, index + end + + def on_resolvable(resolved_future, index) + resolve_with @result + end + end + + class EventWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Event.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + class FutureWrapperPromise < BlockedPromise + def initialize(delayed, blockers_count, default_executor) + super delayed, 1, Future.new(self, default_executor) + end + + private + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state + end + end + + class ZipFuturesPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super(delayed, blockers_count, Future.new(self, default_executor)) + @Resolutions = ::Array.new(blockers_count, nil) + + on_resolvable nil, nil if blockers_count == 0 + end + + def process_on_blocker_resolution(future, index) + # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never re-sized? + @Resolutions[index] = future.internal_state # has to be set before countdown in super + super future, index + end + + def on_resolvable(resolved_future, index) + all_fulfilled = true + values = ::Array.new(@Resolutions.size) + reasons = ::Array.new(@Resolutions.size) + + @Resolutions.each_with_index do |internal_state, i| + fulfilled, values[i], reasons[i] = internal_state.result + all_fulfilled &&= fulfilled + end + + if all_fulfilled + resolve_with FulfilledArray.new(values) + else + resolve_with PartiallyRejected.new(values, reasons) + end + end + end + + class ZipEventsPromise < BlockedPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + + on_resolvable nil, nil if blockers_count == 0 + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED + end + end + + # @abstract + class AbstractAnyPromise < BlockedPromise + end + + class AnyResolvedFuturePromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Future.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with resolved_future.internal_state, false + end + end + + class AnyResolvedEventPromise < AbstractAnyPromise + + private + + def initialize(delayed, blockers_count, default_executor) + super delayed, blockers_count, Event.new(self, default_executor) + end + + def resolvable?(countdown, future, index) + true + end + + def on_resolvable(resolved_future, index) + resolve_with RESOLVED, false + end + end + + class AnyFulfilledFuturePromise < AnyResolvedFuturePromise + + private + + def resolvable?(countdown, future, index) + future.fulfilled? || + # inlined super from BlockedPromise + countdown.zero? + end + end + + class DelayPromise < InnerPromise + + def initialize(default_executor) + event = Event.new(self, default_executor) + @Delayed = LockFreeStack.of1(self) + super event + event.add_callback_clear_delayed_node @Delayed.peek + end + + def touch + @Future.resolve_with RESOLVED + end + + def delayed_because + @Delayed + end + + end + + class ScheduledPromise < InnerPromise + def intended_time + @IntendedTime + end + + def inspect + "#{to_s[0..-2]} intended_time: #{@IntendedTime}>" + end + + private + + def initialize(default_executor, intended_time) + super Event.new(self, default_executor) + + @IntendedTime = intended_time + + in_seconds = begin + now = Time.now + schedule_time = if @IntendedTime.is_a? Time + @IntendedTime + else + now + @IntendedTime + end + [0, schedule_time.to_f - now.to_f].max + end + + Concurrent.global_timer_set.post(in_seconds) do + @Future.resolve_with RESOLVED + end + end + end + + extend FactoryMethods + + private_constant :AbstractPromise, + :ResolvableEventPromise, + :ResolvableFuturePromise, + :InnerPromise, + :BlockedPromise, + :BlockedTaskPromise, + :ThenPromise, + :RescuePromise, + :ChainPromise, + :ImmediateEventPromise, + :ImmediateFuturePromise, + :AbstractFlatPromise, + :FlatFuturePromise, + :FlatEventPromise, + :RunFuturePromise, + :ZipEventEventPromise, + :ZipFutureEventPromise, + :EventWrapperPromise, + :FutureWrapperPromise, + :ZipFuturesPromise, + :ZipEventsPromise, + :AbstractAnyPromise, + :AnyResolvedFuturePromise, + :AnyFulfilledFuturePromise, + :AnyResolvedEventPromise, + :DelayPromise, + :ScheduledPromise + + + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/re_include.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/re_include.rb new file mode 100644 index 0000000000..516d58cae1 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/re_include.rb @@ -0,0 +1,58 @@ +module Concurrent + + # Methods form module A included to a module B, which is already included into class C, + # will not be visible in the C class. If this module is extended to B then A's methods + # are correctly made visible to C. + # + # @example + # module A + # def a + # :a + # end + # end + # + # module B1 + # end + # + # class C1 + # include B1 + # end + # + # module B2 + # extend Concurrent::ReInclude + # end + # + # class C2 + # include B2 + # end + # + # B1.send :include, A + # B2.send :include, A + # + # C1.new.respond_to? :a # => false + # C2.new.respond_to? :a # => true + module ReInclude + # @!visibility private + def included(base) + (@re_include_to_bases ||= []) << [:include, base] + super(base) + end + + # @!visibility private + def extended(base) + (@re_include_to_bases ||= []) << [:extend, base] + super(base) + end + + # @!visibility private + def include(*modules) + result = super(*modules) + modules.reverse.each do |module_being_included| + (@re_include_to_bases ||= []).each do |method, mod| + mod.send method, module_being_included + end + end + result + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/scheduled_task.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/scheduled_task.rb new file mode 100644 index 0000000000..90f78b00ce --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/scheduled_task.rb @@ -0,0 +1,318 @@ +require 'concurrent/constants' +require 'concurrent/errors' +require 'concurrent/configuration' +require 'concurrent/ivar' +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/utility/monotonic_time' + +require 'concurrent/options' + +module Concurrent + + # `ScheduledTask` is a close relative of `Concurrent::Future` but with one + # important difference: A `Future` is set to execute as soon as possible + # whereas a `ScheduledTask` is set to execute after a specified delay. This + # implementation is loosely based on Java's + # [ScheduledExecutorService](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledExecutorService.html). + # It is a more feature-rich variant of {Concurrent.timer}. + # + # The *intended* schedule time of task execution is set on object construction + # with the `delay` argument. The delay is a numeric (floating point or integer) + # representing a number of seconds in the future. Any other value or a numeric + # equal to or less than zero will result in an exception. The *actual* schedule + # time of task execution is set when the `execute` method is called. + # + # The constructor can also be given zero or more processing options. Currently + # the only supported options are those recognized by the + # [Dereferenceable](Dereferenceable) module. + # + # The final constructor argument is a block representing the task to be performed. + # If no block is given an `ArgumentError` will be raised. + # + # **States** + # + # `ScheduledTask` mixes in the [Obligation](Obligation) module thus giving it + # "future" behavior. This includes the expected lifecycle states. `ScheduledTask` + # has one additional state, however. While the task (block) is being executed the + # state of the object will be `:processing`. This additional state is necessary + # because it has implications for task cancellation. + # + # **Cancellation** + # + # A `:pending` task can be cancelled using the `#cancel` method. A task in any + # other state, including `:processing`, cannot be cancelled. The `#cancel` + # method returns a boolean indicating the success of the cancellation attempt. + # A cancelled `ScheduledTask` cannot be restarted. It is immutable. + # + # **Obligation and Observation** + # + # The result of a `ScheduledTask` can be obtained either synchronously or + # asynchronously. `ScheduledTask` mixes in both the [Obligation](Obligation) + # module and the + # [Observable](http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html) + # module from the Ruby standard library. With one exception `ScheduledTask` + # behaves identically to [Future](Observable) with regard to these modules. + # + # @!macro copy_options + # + # @example Basic usage + # + # require 'concurrent' + # require 'thread' # for Queue + # require 'open-uri' # for open(uri) + # + # class Ticker + # def get_year_end_closing(symbol, year) + # uri = "http://ichart.finance.yahoo.com/table.csv?s=#{symbol}&a=11&b=01&c=#{year}&d=11&e=31&f=#{year}&g=m" + # data = open(uri) {|f| f.collect{|line| line.strip } } + # data[1].split(',')[4].to_f + # end + # end + # + # # Future + # price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013) } + # price.state #=> :pending + # sleep(1) # do other stuff + # price.value #=> 63.65 + # price.state #=> :fulfilled + # + # # ScheduledTask + # task = Concurrent::ScheduledTask.execute(2){ Ticker.new.get_year_end_closing('INTC', 2013) } + # task.state #=> :pending + # sleep(3) # do other stuff + # task.value #=> 25.96 + # + # @example Successful task execution + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } + # task.state #=> :unscheduled + # task.execute + # task.state #=> pending + # + # # wait for it... + # sleep(3) + # + # task.unscheduled? #=> false + # task.pending? #=> false + # task.fulfilled? #=> true + # task.rejected? #=> false + # task.value #=> 'What does the fox say?' + # + # @example One line creation and execution + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' }.execute + # task.state #=> pending + # + # task = Concurrent::ScheduledTask.execute(2){ 'What do you get when you multiply 6 by 9?' } + # task.state #=> pending + # + # @example Failed task execution + # + # task = Concurrent::ScheduledTask.execute(2){ raise StandardError.new('Call me maybe?') } + # task.pending? #=> true + # + # # wait for it... + # sleep(3) + # + # task.unscheduled? #=> false + # task.pending? #=> false + # task.fulfilled? #=> false + # task.rejected? #=> true + # task.value #=> nil + # task.reason #=> # + # + # @example Task execution with observation + # + # observer = Class.new{ + # def update(time, value, reason) + # puts "The task completed at #{time} with value '#{value}'" + # end + # }.new + # + # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } + # task.add_observer(observer) + # task.execute + # task.pending? #=> true + # + # # wait for it... + # sleep(3) + # + # #>> The task completed at 2013-11-07 12:26:09 -0500 with value 'What does the fox say?' + # + # @!macro monotonic_clock_warning + # + # @see Concurrent.timer + class ScheduledTask < IVar + include Comparable + + # The executor on which to execute the task. + # @!visibility private + attr_reader :executor + + # Schedule a task for execution at a specified future time. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @yield the task to be performed + # + # @!macro executor_and_deref_options + # + # @option opts [object, Array] :args zero or more arguments to be passed the task + # block on execution + # + # @raise [ArgumentError] When no block is given + # @raise [ArgumentError] When given a time that is in the past + def initialize(delay, opts = {}, &task) + raise ArgumentError.new('no block given') unless block_given? + raise ArgumentError.new('seconds must be greater than zero') if delay.to_f < 0.0 + + super(NULL, opts, &nil) + + synchronize do + ns_set_state(:unscheduled) + @parent = opts.fetch(:timer_set, Concurrent.global_timer_set) + @args = get_arguments_from(opts) + @delay = delay.to_f + @task = task + @time = nil + @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor + self.observers = Collection::CopyOnNotifyObserverSet.new + end + end + + # The `delay` value given at instanciation. + # + # @return [Float] the initial delay. + def initial_delay + synchronize { @delay } + end + + # The monotonic time at which the the task is scheduled to be executed. + # + # @return [Float] the schedule time or nil if `unscheduled` + def schedule_time + synchronize { @time } + end + + # Comparator which orders by schedule time. + # + # @!visibility private + def <=>(other) + schedule_time <=> other.schedule_time + end + + # Has the task been cancelled? + # + # @return [Boolean] true if the task is in the given state else false + def cancelled? + synchronize { ns_check_state?(:cancelled) } + end + + # In the task execution in progress? + # + # @return [Boolean] true if the task is in the given state else false + def processing? + synchronize { ns_check_state?(:processing) } + end + + # Cancel this task and prevent it from executing. A task can only be + # cancelled if it is pending or unscheduled. + # + # @return [Boolean] true if successfully cancelled else false + def cancel + if compare_and_set_state(:cancelled, :pending, :unscheduled) + complete(false, nil, CancelledOperationError.new) + # To avoid deadlocks this call must occur outside of #synchronize + # Changing the state above should prevent redundant calls + @parent.send(:remove_task, self) + else + false + end + end + + # Reschedule the task using the original delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @return [Boolean] true if successfully rescheduled else false + def reset + synchronize{ ns_reschedule(@delay) } + end + + # Reschedule the task using the given delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @raise [ArgumentError] When given a time that is in the past + def reschedule(delay) + delay = delay.to_f + raise ArgumentError.new('seconds must be greater than zero') if delay < 0.0 + synchronize{ ns_reschedule(delay) } + end + + # Execute an `:unscheduled` `ScheduledTask`. Immediately sets the state to `:pending` + # and starts counting down toward execution. Does nothing if the `ScheduledTask` is + # in any state other than `:unscheduled`. + # + # @return [ScheduledTask] a reference to `self` + def execute + if compare_and_set_state(:pending, :unscheduled) + synchronize{ ns_schedule(@delay) } + end + self + end + + # Create a new `ScheduledTask` object with the given block, execute it, and return the + # `:pending` object. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @!macro executor_and_deref_options + # + # @return [ScheduledTask] the newly created `ScheduledTask` in the `:pending` state + # + # @raise [ArgumentError] if no block is given + def self.execute(delay, opts = {}, &task) + new(delay, opts, &task).execute + end + + # Execute the task. + # + # @!visibility private + def process_task + safe_execute(@task, @args) + end + + protected :set, :try_set, :fail, :complete + + protected + + # Schedule the task using the given delay and the current time. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @!visibility private + def ns_schedule(delay) + @delay = delay + @time = Concurrent.monotonic_time + @delay + @parent.send(:post_task, self) + end + + # Reschedule the task using the given delay and the current time. + # A task can only be reset while it is `:pending`. + # + # @param [Float] delay the number of seconds to wait for before executing the task + # + # @return [Boolean] true if successfully rescheduled else false + # + # @!visibility private + def ns_reschedule(delay) + return false unless ns_check_state?(:pending) + @parent.send(:remove_task, self) && ns_schedule(delay) + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/set.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/set.rb new file mode 100644 index 0000000000..04dc936037 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/set.rb @@ -0,0 +1,66 @@ +require 'concurrent/utility/engine' +require 'concurrent/thread_safe/util' +require 'set' + +module Concurrent + + # @!macro concurrent_set + # + # A thread-safe subclass of Set. This version locks against the object + # itself for every method call, ensuring only one thread can be reading + # or writing at a time. This includes iteration methods like `#each`. + # + # @note `a += b` is **not** a **thread-safe** operation on + # `Concurrent::Set`. It reads Set `a`, then it creates new `Concurrent::Set` + # which is union of `a` and `b`, then it writes the union to `a`. + # The read and write are independent operations they do not form a single atomic + # operation therefore when two `+=` operations are executed concurrently updates + # may be lost. Use `#merge` instead. + # + # @see http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html Ruby standard library `Set` + + + # @!macro internal_implementation_note + SetImplementation = case + when Concurrent.on_cruby? + # Because MRI never runs code in parallel, the existing + # non-thread-safe structures should usually work fine. + ::Set + + when Concurrent.on_jruby? + require 'jruby/synchronized' + + class JRubySet < ::Set + include JRuby::Synchronized + end + JRubySet + + when Concurrent.on_rbx? + require 'monitor' + require 'concurrent/thread_safe/util/data_structures' + + class RbxSet < ::Set + end + ThreadSafe::Util.make_synchronized_on_rbx Concurrent::RbxSet + RbxSet + + when Concurrent.on_truffleruby? + require 'concurrent/thread_safe/util/data_structures' + + class TruffleRubySet < ::Set + end + + ThreadSafe::Util.make_synchronized_on_truffleruby Concurrent::TruffleRubySet + TruffleRubySet + + else + warn 'Possibly unsupported Ruby implementation' + ::Set + end + private_constant :SetImplementation + + # @!macro concurrent_set + class Set < SetImplementation + end +end + diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/settable_struct.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/settable_struct.rb new file mode 100644 index 0000000000..9706cff2da --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/settable_struct.rb @@ -0,0 +1,129 @@ +require 'concurrent/synchronization/abstract_struct' +require 'concurrent/errors' +require 'concurrent/synchronization' + +module Concurrent + + # An thread-safe, write-once variation of Ruby's standard `Struct`. + # Each member can have its value set at most once, either at construction + # or any time thereafter. Attempting to assign a value to a member + # that has already been set will result in a `Concurrent::ImmutabilityError`. + # + # @see http://ruby-doc.org/core-2.2.0/Struct.html Ruby standard library `Struct` + # @see http://en.wikipedia.org/wiki/Final_(Java) Java `final` keyword + module SettableStruct + include Synchronization::AbstractStruct + + # @!macro struct_values + def values + synchronize { ns_values } + end + alias_method :to_a, :values + + # @!macro struct_values_at + def values_at(*indexes) + synchronize { ns_values_at(indexes) } + end + + # @!macro struct_inspect + def inspect + synchronize { ns_inspect } + end + alias_method :to_s, :inspect + + # @!macro struct_merge + def merge(other, &block) + synchronize { ns_merge(other, &block) } + end + + # @!macro struct_to_h + def to_h + synchronize { ns_to_h } + end + + # @!macro struct_get + def [](member) + synchronize { ns_get(member) } + end + + # @!macro struct_equality + def ==(other) + synchronize { ns_equality(other) } + end + + # @!macro struct_each + def each(&block) + return enum_for(:each) unless block_given? + synchronize { ns_each(&block) } + end + + # @!macro struct_each_pair + def each_pair(&block) + return enum_for(:each_pair) unless block_given? + synchronize { ns_each_pair(&block) } + end + + # @!macro struct_select + def select(&block) + return enum_for(:select) unless block_given? + synchronize { ns_select(&block) } + end + + # @!macro struct_set + # + # @raise [Concurrent::ImmutabilityError] if the given member has already been set + def []=(member, value) + if member.is_a? Integer + length = synchronize { @values.length } + if member >= length + raise IndexError.new("offset #{member} too large for struct(size:#{length})") + end + synchronize do + unless @values[member].nil? + raise Concurrent::ImmutabilityError.new('struct member has already been set') + end + @values[member] = value + end + else + send("#{member}=", value) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + # @!macro struct_new + def self.new(*args, &block) + clazz_name = nil + if args.length == 0 + raise ArgumentError.new('wrong number of arguments (0 for 1+)') + elsif args.length > 0 && args.first.is_a?(String) + clazz_name = args.shift + end + FACTORY.define_struct(clazz_name, args, &block) + end + + FACTORY = Class.new(Synchronization::LockableObject) do + def define_struct(name, members, &block) + synchronize do + clazz = Synchronization::AbstractStruct.define_struct_class(SettableStruct, Synchronization::LockableObject, name, members, &block) + members.each_with_index do |member, index| + clazz.send :remove_method, member if clazz.instance_methods.include? member + clazz.send(:define_method, member) do + synchronize { @values[index] } + end + clazz.send(:define_method, "#{member}=") do |value| + synchronize do + unless @values[index].nil? + raise Concurrent::ImmutabilityError.new('struct member has already been set') + end + @values[index] = value + end + end + end + clazz + end + end + end.new + private_constant :FACTORY + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization.rb similarity index 75% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization.rb index 62575baa55..49c68ebbb3 100644 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization.rb +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization.rb @@ -7,15 +7,14 @@ Concurrent.load_native_extensions require 'concurrent/synchronization/mri_object' require 'concurrent/synchronization/jruby_object' require 'concurrent/synchronization/rbx_object' -require 'concurrent/synchronization/truffle_object' +require 'concurrent/synchronization/truffleruby_object' require 'concurrent/synchronization/object' require 'concurrent/synchronization/volatile' require 'concurrent/synchronization/abstract_lockable_object' -require 'concurrent/synchronization/mri_lockable_object' +require 'concurrent/synchronization/mutex_lockable_object' require 'concurrent/synchronization/jruby_lockable_object' require 'concurrent/synchronization/rbx_lockable_object' -require 'concurrent/synchronization/truffle_lockable_object' require 'concurrent/synchronization/lockable_object' @@ -23,8 +22,8 @@ require 'concurrent/synchronization/condition' require 'concurrent/synchronization/lock' module Concurrent - # {include:file:doc/synchronization.md} - # {include:file:doc/synchronization-notes.md} + # {include:file:docs-source/synchronization.md} + # {include:file:docs-source/synchronization-notes.md} module Synchronization end end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/abstract_lockable_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/abstract_lockable_object.rb similarity index 89% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/abstract_lockable_object.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/abstract_lockable_object.rb index be7befc8fa..bc12603364 100644 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/abstract_lockable_object.rb +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/abstract_lockable_object.rb @@ -6,7 +6,7 @@ module Concurrent protected - # @!macro [attach] synchronization_object_method_synchronize + # @!macro synchronization_object_method_synchronize # # @yield runs the block synchronized against this object, # equivalent of java's `synchronize(this) {}` @@ -15,7 +15,7 @@ module Concurrent raise NotImplementedError end - # @!macro [attach] synchronization_object_method_ns_wait_until + # @!macro synchronization_object_method_ns_wait_until # # Wait until condition is met or timeout passes, # protects against spurious wake-ups. @@ -45,7 +45,7 @@ module Concurrent end end - # @!macro [attach] synchronization_object_method_ns_wait + # @!macro synchronization_object_method_ns_wait # # Wait until another thread calls #signal or #broadcast, # spurious wake-ups can happen. @@ -63,7 +63,7 @@ module Concurrent raise NotImplementedError end - # @!macro [attach] synchronization_object_method_ns_signal + # @!macro synchronization_object_method_ns_signal # # Signal one waiting thread. # @return [self] @@ -78,7 +78,7 @@ module Concurrent raise NotImplementedError end - # @!macro [attach] synchronization_object_method_ns_broadcast + # @!macro synchronization_object_method_ns_broadcast # # Broadcast to all waiting threads. # @return [self] diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/abstract_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/abstract_object.rb similarity index 100% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/abstract_object.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/abstract_object.rb diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/abstract_struct.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/abstract_struct.rb new file mode 100644 index 0000000000..7005428b37 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/abstract_struct.rb @@ -0,0 +1,159 @@ +module Concurrent + module Synchronization + + # @!visibility private + # @!macro internal_implementation_note + module AbstractStruct + + # @!visibility private + def initialize(*values) + super() + ns_initialize(*values) + end + + # @!macro struct_length + # + # Returns the number of struct members. + # + # @return [Fixnum] the number of struct members + def length + self.class::MEMBERS.length + end + alias_method :size, :length + + # @!macro struct_members + # + # Returns the struct members as an array of symbols. + # + # @return [Array] the struct members as an array of symbols + def members + self.class::MEMBERS.dup + end + + protected + + # @!macro struct_values + # + # @!visibility private + def ns_values + @values.dup + end + + # @!macro struct_values_at + # + # @!visibility private + def ns_values_at(indexes) + @values.values_at(*indexes) + end + + # @!macro struct_to_h + # + # @!visibility private + def ns_to_h + length.times.reduce({}){|memo, i| memo[self.class::MEMBERS[i]] = @values[i]; memo} + end + + # @!macro struct_get + # + # @!visibility private + def ns_get(member) + if member.is_a? Integer + if member >= @values.length + raise IndexError.new("offset #{member} too large for struct(size:#{@values.length})") + end + @values[member] + else + send(member) + end + rescue NoMethodError + raise NameError.new("no member '#{member}' in struct") + end + + # @!macro struct_equality + # + # @!visibility private + def ns_equality(other) + self.class == other.class && self.values == other.values + end + + # @!macro struct_each + # + # @!visibility private + def ns_each + values.each{|value| yield value } + end + + # @!macro struct_each_pair + # + # @!visibility private + def ns_each_pair + @values.length.times do |index| + yield self.class::MEMBERS[index], @values[index] + end + end + + # @!macro struct_select + # + # @!visibility private + def ns_select + values.select{|value| yield value } + end + + # @!macro struct_inspect + # + # @!visibility private + def ns_inspect + struct = pr_underscore(self.class.ancestors[1]) + clazz = ((self.class.to_s =~ /^#" + end + + # @!macro struct_merge + # + # @!visibility private + def ns_merge(other, &block) + self.class.new(*self.to_h.merge(other, &block).values) + end + + # @!visibility private + def pr_underscore(clazz) + word = clazz.to_s.dup # dup string to workaround JRuby 9.2.0.0 bug https://github.com/jruby/jruby/issues/5229 + word.gsub!(/::/, '/') + word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2') + word.gsub!(/([a-z\d])([A-Z])/,'\1_\2') + word.tr!("-", "_") + word.downcase! + word + end + + # @!visibility private + def self.define_struct_class(parent, base, name, members, &block) + clazz = Class.new(base || Object) do + include parent + self.const_set(:MEMBERS, members.collect{|member| member.to_s.to_sym}.freeze) + def ns_initialize(*values) + raise ArgumentError.new('struct size differs') if values.length > length + @values = values.fill(nil, values.length..length-1) + end + end + unless name.nil? + begin + parent.send :remove_const, name if parent.const_defined? name + parent.const_set(name, clazz) + clazz + rescue NameError + raise NameError.new("identifier #{name} needs to be constant") + end + end + members.each_with_index do |member, index| + clazz.send :remove_method, member if clazz.instance_methods.include? member + clazz.send(:define_method, member) do + @values[index] + end + end + clazz.class_exec(&block) unless block.nil? + clazz + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/condition.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/condition.rb similarity index 100% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/condition.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/condition.rb diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/jruby_lockable_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/jruby_lockable_object.rb similarity index 100% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/jruby_lockable_object.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/jruby_lockable_object.rb diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/jruby_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/jruby_object.rb similarity index 100% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/jruby_object.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/jruby_object.rb diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/lock.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/lock.rb similarity index 100% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/lock.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/lock.rb diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/lockable_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/lockable_object.rb similarity index 87% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/lockable_object.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/lockable_object.rb index a28d5af004..34cc8d974a 100644 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/lockable_object.rb +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/lockable_object.rb @@ -5,18 +5,18 @@ module Concurrent # @!macro internal_implementation_note LockableObjectImplementation = case when Concurrent.on_cruby? && Concurrent.ruby_version(:<=, 1, 9, 3) - MriMonitorLockableObject + MonitorLockableObject when Concurrent.on_cruby? && Concurrent.ruby_version(:>, 1, 9, 3) - MriMutexLockableObject + MutexLockableObject when Concurrent.on_jruby? JRubyLockableObject when Concurrent.on_rbx? RbxLockableObject - when Concurrent.on_truffle? - MriMutexLockableObject + when Concurrent.on_truffleruby? + MutexLockableObject else warn 'Possibly unsupported Ruby implementation' - MriMonitorLockableObject + MonitorLockableObject end private_constant :LockableObjectImplementation @@ -31,7 +31,7 @@ module Concurrent # `Thread#sleep` and `Thread#wakeup` will work as expected but mixing `Synchronization::Object#wait` and # `Thread#wakeup` will not work on all platforms. # - # @see {Event} implementation as an example of this class use + # @see Event implementation as an example of this class use # # @example simple # class AnClass < Synchronization::Object diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/mri_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/mri_object.rb similarity index 100% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/mri_object.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/mri_object.rb diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/mri_lockable_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/mutex_lockable_object.rb similarity index 56% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/mri_lockable_object.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/mutex_lockable_object.rb index 22120280b9..f288c51a1f 100644 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/mri_lockable_object.rb +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/mutex_lockable_object.rb @@ -1,18 +1,19 @@ module Concurrent + # noinspection RubyInstanceVariableNamingConvention module Synchronization # @!visibility private # @!macro internal_implementation_note - class MriLockableObject < AbstractLockableObject + module ConditionSignalling protected def ns_signal - @__condition__.signal + @__Condition__.signal self end def ns_broadcast - @__condition__.broadcast + @__Condition__.broadcast self end end @@ -20,50 +21,54 @@ module Concurrent # @!visibility private # @!macro internal_implementation_note - class MriMutexLockableObject < MriLockableObject + class MutexLockableObject < AbstractLockableObject + include ConditionSignalling + safe_initialization! def initialize(*defaults) super(*defaults) - @__lock__ = ::Mutex.new - @__condition__ = ::ConditionVariable.new + @__Lock__ = ::Mutex.new + @__Condition__ = ::ConditionVariable.new end protected def synchronize - if @__lock__.owned? + if @__Lock__.owned? yield else - @__lock__.synchronize { yield } + @__Lock__.synchronize { yield } end end def ns_wait(timeout = nil) - @__condition__.wait @__lock__, timeout + @__Condition__.wait @__Lock__, timeout self end end # @!visibility private # @!macro internal_implementation_note - class MriMonitorLockableObject < MriLockableObject + class MonitorLockableObject < AbstractLockableObject + include ConditionSignalling + safe_initialization! def initialize(*defaults) super(*defaults) - @__lock__ = ::Monitor.new - @__condition__ = @__lock__.new_cond + @__Lock__ = ::Monitor.new + @__Condition__ = @__Lock__.new_cond end protected def synchronize # TODO may be a problem with lock.synchronize { lock.wait } - @__lock__.synchronize { yield } + @__Lock__.synchronize { yield } end def ns_wait(timeout = nil) - @__condition__.wait timeout + @__Condition__.wait timeout self end end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/object.rb similarity index 93% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/object.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/object.rb index 21b5695df2..1254427c70 100644 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/object.rb +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/object.rb @@ -10,9 +10,10 @@ module Concurrent JRubyObject when Concurrent.on_rbx? RbxObject - when Concurrent.on_truffle? - TruffleObject + when Concurrent.on_truffleruby? + TruffleRubyObject else + warn 'Possibly unsupported Ruby implementation' MriObject end private_constant :ObjectImplementation @@ -134,8 +135,11 @@ module Concurrent private def self.define_initialize_volatile_with_cas - assignments = @volatile_cas_fields.map { |name| "@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }} = AtomicReference.new(nil)" }.join("\n") - class_eval <<-RUBY + assignments = @volatile_cas_fields.map do |name| + "@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }} = Concurrent::AtomicReference.new(nil)" + end.join("\n") + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 def initialize_volatile_with_cas super #{assignments} diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/rbx_lockable_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/rbx_lockable_object.rb similarity index 100% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/rbx_lockable_object.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/rbx_lockable_object.rb diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/rbx_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/rbx_object.rb similarity index 100% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/rbx_object.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/rbx_object.rb diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/truffleruby_object.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/truffleruby_object.rb new file mode 100644 index 0000000000..b25fe2189b --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/truffleruby_object.rb @@ -0,0 +1,46 @@ +module Concurrent + module Synchronization + + module TruffleRubyAttrVolatile + def self.included(base) + base.extend(ClassMethods) + end + + module ClassMethods + def attr_volatile(*names) + names.each do |name| + ivar = :"@volatile_#{name}" + + class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{name} + full_memory_barrier + #{ivar} + end + + def #{name}=(value) + #{ivar} = value + full_memory_barrier + end + RUBY + end + + names.map { |n| [n, :"#{n}="] }.flatten + end + end + + def full_memory_barrier + TruffleRuby.full_memory_barrier + end + end + + # @!visibility private + # @!macro internal_implementation_note + class TruffleRubyObject < AbstractObject + include TruffleRubyAttrVolatile + + def initialize + # nothing to do + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/volatile.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/volatile.rb similarity index 53% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/volatile.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/volatile.rb index 45269dff90..9dffa914ae 100644 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/synchronization/volatile.rb +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/synchronization/volatile.rb @@ -21,14 +21,16 @@ module Concurrent # => 2 Volatile = case - when Concurrent.on_cruby? - MriAttrVolatile - when Concurrent.on_jruby? - JRubyAttrVolatile - when Concurrent.on_rbx? || Concurrent.on_truffle? - RbxAttrVolatile - else - MriAttrVolatile - end + when Concurrent.on_cruby? + MriAttrVolatile + when Concurrent.on_jruby? + JRubyAttrVolatile + when Concurrent.on_rbx? + RbxAttrVolatile + when Concurrent.on_truffleruby? + TruffleRubyAttrVolatile + else + MriAttrVolatile + end end end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/synchronized_delegator.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/synchronized_delegator.rb new file mode 100644 index 0000000000..92e7c45fc5 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/synchronized_delegator.rb @@ -0,0 +1,50 @@ +require 'delegate' +require 'monitor' + +module Concurrent + unless defined?(SynchronizedDelegator) + + # This class provides a trivial way to synchronize all calls to a given object + # by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls + # around the delegated `#send`. Example: + # + # array = [] # not thread-safe on many impls + # array = SynchronizedDelegator.new([]) # thread-safe + # + # A simple `Monitor` provides a very coarse-grained way to synchronize a given + # object, in that it will cause synchronization for methods that have no need + # for it, but this is a trivial way to get thread-safety where none may exist + # currently on some implementations. + # + # This class is currently being considered for inclusion into stdlib, via + # https://bugs.ruby-lang.org/issues/8556 + # + # @!visibility private + class SynchronizedDelegator < SimpleDelegator + def setup + @old_abort = Thread.abort_on_exception + Thread.abort_on_exception = true + end + + def teardown + Thread.abort_on_exception = @old_abort + end + + def initialize(obj) + __setobj__(obj) + @monitor = Monitor.new + end + + def method_missing(method, *args, &block) + monitor = @monitor + begin + monitor.enter + super + ensure + monitor.exit + end + end + + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util.rb new file mode 100644 index 0000000000..c67084a26f --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util.rb @@ -0,0 +1,16 @@ +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # TODO (pitr-ch 15-Oct-2016): migrate to Utility::NativeInteger + FIXNUM_BIT_SIZE = (0.size * 8) - 2 + MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1 + # TODO (pitr-ch 15-Oct-2016): migrate to Utility::ProcessorCounter + CPU_COUNT = 16 # is there a way to determine this? + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/adder.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/adder.rb new file mode 100644 index 0000000000..7a6e8d5c0e --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/adder.rb @@ -0,0 +1,74 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/striped64' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8 + # + # One or more variables that together maintain an initially zero + # sum. When updates (method +add+) are contended across threads, + # the set of variables may grow dynamically to reduce contention. + # Method +sum+ returns the current total combined across the + # variables maintaining the sum. + # + # This class is usually preferable to single +Atomic+ reference when + # multiple threads update a common sum that is used for purposes such + # as collecting statistics, not for fine-grained synchronization + # control. Under low update contention, the two classes have similar + # characteristics. But under high contention, expected throughput of + # this class is significantly higher, at the expense of higher space + # consumption. + # + # @!visibility private + class Adder < Striped64 + # Adds the given value. + def add(x) + if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x} + was_uncontended = true + hash = hash_code + unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x}) + retry_update(x, hash, was_uncontended) {|current_value| current_value + x} + end + end + end + + def increment + add(1) + end + + def decrement + add(-1) + end + + # Returns the current sum. The returned value is _NOT_ an + # atomic snapshot: Invocation in the absence of concurrent + # updates returns an accurate result, but concurrent updates that + # occur while the sum is being calculated might not be + # incorporated. + def sum + x = base + if current_cells = cells + current_cells.each do |cell| + x += cell.value if cell + end + end + x + end + + def reset + internal_reset(0) + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/cheap_lockable.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/cheap_lockable.rb new file mode 100644 index 0000000000..d9b4c58186 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/cheap_lockable.rb @@ -0,0 +1,118 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/volatile' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ + # with the +ConditionVariable+ bundled in. + # + # Usage: + # class A + # include CheapLockable + # + # def do_exlusively + # cheap_synchronize { yield } + # end + # + # def wait_for_something + # cheap_synchronize do + # cheap_wait until resource_available? + # do_something + # cheap_broadcast # wake up others + # end + # end + # end + # + # @!visibility private + module CheapLockable + private + engine = defined?(RUBY_ENGINE) && RUBY_ENGINE + if engine == 'rbx' + # Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects. + def cheap_synchronize + Rubinius.lock(self) + begin + yield + ensure + Rubinius.unlock(self) + end + end + + def cheap_wait + wchan = Rubinius::Channel.new + + begin + waiters = @waiters ||= [] + waiters.push wchan + Rubinius.unlock(self) + signaled = wchan.receive_timeout nil + ensure + Rubinius.lock(self) + + unless signaled or waiters.delete(wchan) + # we timed out, but got signaled afterwards (e.g. while waiting to + # acquire @lock), so pass that signal on to the next waiter + waiters.shift << true unless waiters.empty? + end + end + + self + end + + def cheap_broadcast + waiters = @waiters ||= [] + waiters.shift << true until waiters.empty? + self + end + elsif engine == 'jruby' + # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects + require 'jruby' + + def cheap_synchronize + JRuby.reference0(self).synchronized { yield } + end + + def cheap_wait + JRuby.reference0(self).wait + end + + def cheap_broadcast + JRuby.reference0(self).notify_all + end + else + require 'thread' + + extend Volatile + attr_volatile :mutex + + # Non-reentrant Mutex#syncrhonize + def cheap_synchronize + true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new) + my_mutex.synchronize { yield } + end + + # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup. + # Must only be called in +cheap_broadcast+'s block. + def cheap_wait + conditional_variable = @conditional_variable ||= ConditionVariable.new + conditional_variable.wait(mutex) + end + + # Wakes up all threads waiting for this object's +cheap_synchronize+ lock. + # Must only be called in +cheap_broadcast+'s block. + def cheap_broadcast + if conditional_variable = @conditional_variable + conditional_variable.broadcast + end + end + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/data_structures.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/data_structures.rb new file mode 100644 index 0000000000..ff1e8ed97e --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/data_structures.rb @@ -0,0 +1,63 @@ +require 'concurrent/thread_safe/util' + +# Shim for TruffleRuby.synchronized +if Concurrent.on_truffleruby? && !TruffleRuby.respond_to?(:synchronized) + module TruffleRuby + def self.synchronized(object, &block) + Truffle::System.synchronized(object, &block) + end + end +end + +module Concurrent + module ThreadSafe + module Util + def self.make_synchronized_on_rbx(klass) + klass.class_eval do + private + + def _mon_initialize + @_monitor = Monitor.new unless @_monitor # avoid double initialisation + end + + def self.new(*args) + obj = super(*args) + obj.send(:_mon_initialize) + obj + end + end + + klass.superclass.instance_methods(false).each do |method| + case method + when :new_range, :new_reserved + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args) + obj = super + obj.send(:_mon_initialize) + obj + end + RUBY + else + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args) + monitor = @_monitor + monitor or raise("BUG: Internal monitor was not properly initialized. Please report this to the concurrent-ruby developers.") + monitor.synchronize { super } + end + RUBY + end + end + end + + def self.make_synchronized_on_truffleruby(klass) + klass.superclass.instance_methods(false).each do |method| + klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 + def #{method}(*args, &block) + TruffleRuby.synchronized(self) { super(*args, &block) } + end + RUBY + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/power_of_two_tuple.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/power_of_two_tuple.rb new file mode 100644 index 0000000000..b54be39c4c --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/power_of_two_tuple.rb @@ -0,0 +1,38 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/tuple' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # @!visibility private + class PowerOfTwoTuple < Concurrent::Tuple + + def initialize(size) + raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0 + super(size) + end + + def hash_to_index(hash) + (size - 1) & hash + end + + def volatile_get_by_hash(hash) + volatile_get(hash_to_index(hash)) + end + + def volatile_set_by_hash(hash, value) + volatile_set(hash_to_index(hash), value) + end + + def next_in_size_table + self.class.new(size << 1) + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/striped64.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/striped64.rb new file mode 100644 index 0000000000..5530e977c9 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/striped64.rb @@ -0,0 +1,246 @@ +require 'concurrent/thread_safe/util' +require 'concurrent/thread_safe/util/power_of_two_tuple' +require 'concurrent/thread_safe/util/volatile' +require 'concurrent/thread_safe/util/xor_shift_random' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 + # available in public domain. + # + # Original source code available here: + # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6 + # + # Class holding common representation and mechanics for classes supporting + # dynamic striping on 64bit values. + # + # This class maintains a lazily-initialized table of atomically updated + # variables, plus an extra +base+ field. The table size is a power of two. + # Indexing uses masked per-thread hash codes. Nearly all methods on this + # class are private, accessed directly by subclasses. + # + # Table entries are of class +Cell+; a variant of AtomicLong padded to + # reduce cache contention on most processors. Padding is overkill for most + # Atomics because they are usually irregularly scattered in memory and thus + # don't interfere much with each other. But Atomic objects residing in + # arrays will tend to be placed adjacent to each other, and so will most + # often share cache lines (with a huge negative performance impact) without + # this precaution. + # + # In part because +Cell+s are relatively large, we avoid creating them until + # they are needed. When there is no contention, all updates are made to the + # +base+ field. Upon first contention (a failed CAS on +base+ update), the + # table is initialized to size 2. The table size is doubled upon further + # contention until reaching the nearest power of two greater than or equal + # to the number of CPUS. Table slots remain empty (+nil+) until they are + # needed. + # + # A single spinlock (+busy+) is used for initializing and resizing the + # table, as well as populating slots with new +Cell+s. There is no need for + # a blocking lock: When the lock is not available, threads try other slots + # (or the base). During these retries, there is increased contention and + # reduced locality, which is still better than alternatives. + # + # Per-thread hash codes are initialized to random values. Contention and/or + # table collisions are indicated by failed CASes when performing an update + # operation (see method +retry_update+). Upon a collision, if the table size + # is less than the capacity, it is doubled in size unless some other thread + # holds the lock. If a hashed slot is empty, and lock is available, a new + # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries + # proceed by "double hashing", using a secondary hash (XorShift) to try to + # find a free slot. + # + # The table size is capped because, when there are more threads than CPUs, + # supposing that each thread were bound to a CPU, there would exist a + # perfect hash function mapping threads to slots that eliminates collisions. + # When we reach capacity, we search for this mapping by randomly varying the + # hash codes of colliding threads. Because search is random, and collisions + # only become known via CAS failures, convergence can be slow, and because + # threads are typically not bound to CPUS forever, may not occur at all. + # However, despite these limitations, observed contention rates are + # typically low in these cases. + # + # It is possible for a +Cell+ to become unused when threads that once hashed + # to it terminate, as well as in the case where doubling the table causes no + # thread to hash to it under expanded mask. We do not try to detect or + # remove such cells, under the assumption that for long-running instances, + # observed contention levels will recur, so the cells will eventually be + # needed again; and for short-lived ones, it does not matter. + # + # @!visibility private + class Striped64 + + # Padded variant of AtomicLong supporting only raw accesses plus CAS. + # The +value+ field is placed between pads, hoping that the JVM doesn't + # reorder them. + # + # Optimisation note: It would be possible to use a release-only + # form of CAS here, if it were provided. + # + # @!visibility private + class Cell < Concurrent::AtomicReference + + alias_method :cas, :compare_and_set + + def cas_computed + cas(current_value = value, yield(current_value)) + end + + # @!visibility private + def self.padding + # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot + # TODO (pitr-ch 28-Jul-2018): the padding instance vars may not be created + # hide from yardoc in a method + attr_reader *(12.times.collect{ |i| "padding_#{i}".to_sym }) + end + padding + end + + extend Volatile + attr_volatile :cells, # Table of cells. When non-null, size is a power of 2. + :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS. + :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells. + + alias_method :busy?, :busy + + def initialize + super() + self.busy = false + self.base = 0 + end + + # Handles cases of updates involving initialization, resizing, + # creating new Cells, and/or contention. See above for + # explanation. This method suffers the usual non-modularity + # problems of optimistic retry code, relying on rechecked sets of + # reads. + # + # Arguments: + # [+x+] + # the value + # [+hash_code+] + # hash code used + # [+x+] + # false if CAS failed before call + def retry_update(x, hash_code, was_uncontended) # :yields: current_value + hash = hash_code + collided = false # True if last slot nonempty + while true + if current_cells = cells + if !(cell = current_cells.volatile_get_by_hash(hash)) + if busy? + collided = false + else # Try to attach new Cell + if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell + break + else + redo # Slot is now non-empty + end + end + elsif !was_uncontended # CAS already known to fail + was_uncontended = true # Continue after rehash + elsif cell.cas_computed {|current_value| yield current_value} + break + elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale + collided = false + elsif collided && expand_table_unless_stale(current_cells) + collided = false + redo # Retry with expanded table + else + collided = true + end + hash = XorShiftRandom.xorshift(hash) + + elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base} + break + end + end + self.hash_code = hash + end + + private + # Static per-thread hash code key. Shared across all instances to + # reduce Thread locals pollution and because adjustments due to + # collisions in one table are likely to be appropriate for + # others. + THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym + + # A thread-local hash code accessor. The code is initially + # random, but may be set to a different value upon collisions. + def hash_code + Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get + end + + def hash_code=(hash) + Thread.current[THREAD_LOCAL_KEY] = hash + end + + # Sets base and all +cells+ to the given value. + def internal_reset(initial_value) + current_cells = cells + self.base = initial_value + if current_cells + current_cells.each do |cell| + cell.value = initial_value if cell + end + end + end + + def cas_base_computed + cas_base(current_base = base, yield(current_base)) + end + + def free? + !busy? + end + + def try_initialize_cells(x, hash) + if free? && !cells + try_in_busy do + unless cells # Recheck under lock + new_cells = PowerOfTwoTuple.new(2) + new_cells.volatile_set_by_hash(hash, Cell.new(x)) + self.cells = new_cells + end + end + end + end + + def expand_table_unless_stale(current_cells) + try_in_busy do + if current_cells == cells # Recheck under lock + new_cells = current_cells.next_in_size_table + current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)} + self.cells = new_cells + end + end + end + + def try_to_install_new_cell(new_cell, hash) + try_in_busy do + # Recheck under lock + if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash)) + current_cells.volatile_set(i, new_cell) + end + end + end + + def try_in_busy + if cas_busy(false, true) + begin + yield + ensure + self.busy = false + end + end + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/volatile.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/volatile.rb new file mode 100644 index 0000000000..cdac2a396a --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/volatile.rb @@ -0,0 +1,75 @@ +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # @!visibility private + module Volatile + + # Provides +volatile+ (in the JVM's sense) attribute accessors implemented + # atop of +Concurrent::AtomicReference+. + # + # Usage: + # class Foo + # extend Concurrent::ThreadSafe::Util::Volatile + # attr_volatile :foo, :bar + # + # def initialize(bar) + # super() # must super() into parent initializers before using the volatile attribute accessors + # self.bar = bar + # end + # + # def hello + # my_foo = foo # volatile read + # self.foo = 1 # volatile write + # cas_foo(1, 2) # => true | a strong CAS + # end + # end + def attr_volatile(*attr_names) + return if attr_names.empty? + include(Module.new do + atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = Concurrent::AtomicReference.new"} + initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup| + "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)" + end + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def initialize(*) + super + #{atomic_ref_setup.join('; ')} + end + + def initialize_copy(other) + super + #{initialize_copy_setup.join('; ')} + end + RUBY_EVAL + + attr_names.each do |attr_name| + class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 + def #{attr_name} + @__#{attr_name}.get + end + + def #{attr_name}=(value) + @__#{attr_name}.set(value) + end + + def compare_and_set_#{attr_name}(old_value, new_value) + @__#{attr_name}.compare_and_set(old_value, new_value) + end + RUBY_EVAL + + alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}" + alias_method :"lazy_set_#{attr_name}", :"#{attr_name}=" + end + end) + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/xor_shift_random.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/xor_shift_random.rb new file mode 100644 index 0000000000..bdde2dd8b3 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/thread_safe/util/xor_shift_random.rb @@ -0,0 +1,50 @@ +require 'concurrent/thread_safe/util' + +module Concurrent + + # @!visibility private + module ThreadSafe + + # @!visibility private + module Util + + # A xorshift random number (positive +Fixnum+s) generator, provides + # reasonably cheap way to generate thread local random numbers without + # contending for the global +Kernel.rand+. + # + # Usage: + # x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed + # while true + # if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number + # do_something_at_random + # end + # end + module XorShiftRandom + extend self + MAX_XOR_SHIFTABLE_INT = MAX_INT - 1 + + # Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+. + def get + Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted + end + + # xorshift based on: http://www.jstatsoft.org/v08/i14/paper + if 0.size == 4 + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 3 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 14 + end + else + # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows + def xorshift(x) + x ^= x >> 1 + x ^= (x << 1) & MAX_INT # cut-off Bignum overflow + x ^= x >> 54 + end + end + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/timer_task.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/timer_task.rb new file mode 100644 index 0000000000..e766f2e6a3 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/timer_task.rb @@ -0,0 +1,334 @@ +require 'concurrent/collection/copy_on_notify_observer_set' +require 'concurrent/concern/dereferenceable' +require 'concurrent/concern/observable' +require 'concurrent/atomic/atomic_boolean' +require 'concurrent/executor/executor_service' +require 'concurrent/executor/ruby_executor_service' +require 'concurrent/executor/safe_task_executor' +require 'concurrent/scheduled_task' + +module Concurrent + + # A very common concurrency pattern is to run a thread that performs a task at + # regular intervals. The thread that performs the task sleeps for the given + # interval then wakes up and performs the task. Lather, rinse, repeat... This + # pattern causes two problems. First, it is difficult to test the business + # logic of the task because the task itself is tightly coupled with the + # concurrency logic. Second, an exception raised while performing the task can + # cause the entire thread to abend. In a long-running application where the + # task thread is intended to run for days/weeks/years a crashed task thread + # can pose a significant problem. `TimerTask` alleviates both problems. + # + # When a `TimerTask` is launched it starts a thread for monitoring the + # execution interval. The `TimerTask` thread does not perform the task, + # however. Instead, the TimerTask launches the task on a separate thread. + # Should the task experience an unrecoverable crash only the task thread will + # crash. This makes the `TimerTask` very fault tolerant. Additionally, the + # `TimerTask` thread can respond to the success or failure of the task, + # performing logging or ancillary operations. `TimerTask` can also be + # configured with a timeout value allowing it to kill a task that runs too + # long. + # + # One other advantage of `TimerTask` is that it forces the business logic to + # be completely decoupled from the concurrency logic. The business logic can + # be tested separately then passed to the `TimerTask` for scheduling and + # running. + # + # In some cases it may be necessary for a `TimerTask` to affect its own + # execution cycle. To facilitate this, a reference to the TimerTask instance + # is passed as an argument to the provided block every time the task is + # executed. + # + # The `TimerTask` class includes the `Dereferenceable` mixin module so the + # result of the last execution is always available via the `#value` method. + # Dereferencing options can be passed to the `TimerTask` during construction or + # at any later time using the `#set_deref_options` method. + # + # `TimerTask` supports notification through the Ruby standard library + # {http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html + # Observable} module. On execution the `TimerTask` will notify the observers + # with three arguments: time of execution, the result of the block (or nil on + # failure), and any raised exceptions (or nil on success). If the timeout + # interval is exceeded the observer will receive a `Concurrent::TimeoutError` + # object as the third argument. + # + # @!macro copy_options + # + # @example Basic usage + # task = Concurrent::TimerTask.new{ puts 'Boom!' } + # task.execute + # + # task.execution_interval #=> 60 (default) + # task.timeout_interval #=> 30 (default) + # + # # wait 60 seconds... + # #=> 'Boom!' + # + # task.shutdown #=> true + # + # @example Configuring `:execution_interval` and `:timeout_interval` + # task = Concurrent::TimerTask.new(execution_interval: 5, timeout_interval: 5) do + # puts 'Boom!' + # end + # + # task.execution_interval #=> 5 + # task.timeout_interval #=> 5 + # + # @example Immediate execution with `:run_now` + # task = Concurrent::TimerTask.new(run_now: true){ puts 'Boom!' } + # task.execute + # + # #=> 'Boom!' + # + # @example Last `#value` and `Dereferenceable` mixin + # task = Concurrent::TimerTask.new( + # dup_on_deref: true, + # execution_interval: 5 + # ){ Time.now } + # + # task.execute + # Time.now #=> 2013-11-07 18:06:50 -0500 + # sleep(10) + # task.value #=> 2013-11-07 18:06:55 -0500 + # + # @example Controlling execution from within the block + # timer_task = Concurrent::TimerTask.new(execution_interval: 1) do |task| + # task.execution_interval.times{ print 'Boom! ' } + # print "\n" + # task.execution_interval += 1 + # if task.execution_interval > 5 + # puts 'Stopping...' + # task.shutdown + # end + # end + # + # timer_task.execute # blocking call - this task will stop itself + # #=> Boom! + # #=> Boom! Boom! + # #=> Boom! Boom! Boom! + # #=> Boom! Boom! Boom! Boom! + # #=> Boom! Boom! Boom! Boom! Boom! + # #=> Stopping... + # + # @example Observation + # class TaskObserver + # def update(time, result, ex) + # if result + # print "(#{time}) Execution successfully returned #{result}\n" + # elsif ex.is_a?(Concurrent::TimeoutError) + # print "(#{time}) Execution timed out\n" + # else + # print "(#{time}) Execution failed with error #{ex}\n" + # end + # end + # end + # + # task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ 42 } + # task.add_observer(TaskObserver.new) + # task.execute + # sleep 4 + # + # #=> (2013-10-13 19:08:58 -0400) Execution successfully returned 42 + # #=> (2013-10-13 19:08:59 -0400) Execution successfully returned 42 + # #=> (2013-10-13 19:09:00 -0400) Execution successfully returned 42 + # task.shutdown + # + # task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ sleep } + # task.add_observer(TaskObserver.new) + # task.execute + # + # #=> (2013-10-13 19:07:25 -0400) Execution timed out + # #=> (2013-10-13 19:07:27 -0400) Execution timed out + # #=> (2013-10-13 19:07:29 -0400) Execution timed out + # task.shutdown + # + # task = Concurrent::TimerTask.new(execution_interval: 1){ raise StandardError } + # task.add_observer(TaskObserver.new) + # task.execute + # + # #=> (2013-10-13 19:09:37 -0400) Execution failed with error StandardError + # #=> (2013-10-13 19:09:38 -0400) Execution failed with error StandardError + # #=> (2013-10-13 19:09:39 -0400) Execution failed with error StandardError + # task.shutdown + # + # @see http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html + # @see http://docs.oracle.com/javase/7/docs/api/java/util/TimerTask.html + class TimerTask < RubyExecutorService + include Concern::Dereferenceable + include Concern::Observable + + # Default `:execution_interval` in seconds. + EXECUTION_INTERVAL = 60 + + # Default `:timeout_interval` in seconds. + TIMEOUT_INTERVAL = 30 + + # Create a new TimerTask with the given task and configuration. + # + # @!macro timer_task_initialize + # @param [Hash] opts the options defining task execution. + # @option opts [Integer] :execution_interval number of seconds between + # task executions (default: EXECUTION_INTERVAL) + # @option opts [Integer] :timeout_interval number of seconds a task can + # run before it is considered to have failed (default: TIMEOUT_INTERVAL) + # @option opts [Boolean] :run_now Whether to run the task immediately + # upon instantiation or to wait until the first # execution_interval + # has passed (default: false) + # + # @!macro deref_options + # + # @raise ArgumentError when no block is given. + # + # @yield to the block after :execution_interval seconds have passed since + # the last yield + # @yieldparam task a reference to the `TimerTask` instance so that the + # block can control its own lifecycle. Necessary since `self` will + # refer to the execution context of the block rather than the running + # `TimerTask`. + # + # @return [TimerTask] the new `TimerTask` + def initialize(opts = {}, &task) + raise ArgumentError.new('no block given') unless block_given? + super + set_deref_options opts + end + + # Is the executor running? + # + # @return [Boolean] `true` when running, `false` when shutting down or shutdown + def running? + @running.true? + end + + # Execute a previously created `TimerTask`. + # + # @return [TimerTask] a reference to `self` + # + # @example Instance and execute in separate steps + # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" } + # task.running? #=> false + # task.execute + # task.running? #=> true + # + # @example Instance and execute in one line + # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" }.execute + # task.running? #=> true + def execute + synchronize do + if @running.false? + @running.make_true + schedule_next_task(@run_now ? 0 : @execution_interval) + end + end + self + end + + # Create and execute a new `TimerTask`. + # + # @!macro timer_task_initialize + # + # @example + # task = Concurrent::TimerTask.execute(execution_interval: 10){ print "Hello World\n" } + # task.running? #=> true + def self.execute(opts = {}, &task) + TimerTask.new(opts, &task).execute + end + + # @!attribute [rw] execution_interval + # @return [Fixnum] Number of seconds after the task completes before the + # task is performed again. + def execution_interval + synchronize { @execution_interval } + end + + # @!attribute [rw] execution_interval + # @return [Fixnum] Number of seconds after the task completes before the + # task is performed again. + def execution_interval=(value) + if (value = value.to_f) <= 0.0 + raise ArgumentError.new('must be greater than zero') + else + synchronize { @execution_interval = value } + end + end + + # @!attribute [rw] timeout_interval + # @return [Fixnum] Number of seconds the task can run before it is + # considered to have failed. + def timeout_interval + synchronize { @timeout_interval } + end + + # @!attribute [rw] timeout_interval + # @return [Fixnum] Number of seconds the task can run before it is + # considered to have failed. + def timeout_interval=(value) + if (value = value.to_f) <= 0.0 + raise ArgumentError.new('must be greater than zero') + else + synchronize { @timeout_interval = value } + end + end + + private :post, :<< + + private + + def ns_initialize(opts, &task) + set_deref_options(opts) + + self.execution_interval = opts[:execution] || opts[:execution_interval] || EXECUTION_INTERVAL + self.timeout_interval = opts[:timeout] || opts[:timeout_interval] || TIMEOUT_INTERVAL + @run_now = opts[:now] || opts[:run_now] + @executor = Concurrent::SafeTaskExecutor.new(task) + @running = Concurrent::AtomicBoolean.new(false) + @value = nil + + self.observers = Collection::CopyOnNotifyObserverSet.new + end + + # @!visibility private + def ns_shutdown_execution + @running.make_false + super + end + + # @!visibility private + def ns_kill_execution + @running.make_false + super + end + + # @!visibility private + def schedule_next_task(interval = execution_interval) + ScheduledTask.execute(interval, args: [Concurrent::Event.new], &method(:execute_task)) + nil + end + + # @!visibility private + def execute_task(completion) + return nil unless @running.true? + ScheduledTask.execute(timeout_interval, args: [completion], &method(:timeout_task)) + _success, value, reason = @executor.execute(self) + if completion.try? + self.value = value + schedule_next_task + time = Time.now + observers.notify_observers do + [time, self.value, reason] + end + end + nil + end + + # @!visibility private + def timeout_task(completion) + return unless @running.true? + if completion.try? + self.value = value + schedule_next_task + observers.notify_observers(Time.now, nil, Concurrent::TimeoutError.new) + end + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/tuple.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/tuple.rb new file mode 100644 index 0000000000..f8c4c25d32 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/tuple.rb @@ -0,0 +1,86 @@ +require 'concurrent/atomic/atomic_reference' + +module Concurrent + + # A fixed size array with volatile (synchronized, thread safe) getters/setters. + # Mixes in Ruby's `Enumerable` module for enhanced search, sort, and traversal. + # + # @example + # tuple = Concurrent::Tuple.new(16) + # + # tuple.set(0, :foo) #=> :foo | volatile write + # tuple.get(0) #=> :foo | volatile read + # tuple.compare_and_set(0, :foo, :bar) #=> true | strong CAS + # tuple.cas(0, :foo, :baz) #=> false | strong CAS + # tuple.get(0) #=> :bar | volatile read + # + # @see https://en.wikipedia.org/wiki/Tuple Tuple entry at Wikipedia + # @see http://www.erlang.org/doc/reference_manual/data_types.html#id70396 Erlang Tuple + # @see http://ruby-doc.org/core-2.2.2/Enumerable.html Enumerable + class Tuple + include Enumerable + + # The (fixed) size of the tuple. + attr_reader :size + + # @!visibility private + Tuple = defined?(Rubinius::Tuple) ? Rubinius::Tuple : ::Array + private_constant :Tuple + + # Create a new tuple of the given size. + # + # @param [Integer] size the number of elements in the tuple + def initialize(size) + @size = size + @tuple = tuple = Tuple.new(size) + i = 0 + while i < size + tuple[i] = Concurrent::AtomicReference.new + i += 1 + end + end + + # Get the value of the element at the given index. + # + # @param [Integer] i the index from which to retrieve the value + # @return [Object] the value at the given index or nil if the index is out of bounds + def get(i) + return nil if i >= @size || i < 0 + @tuple[i].get + end + alias_method :volatile_get, :get + + # Set the element at the given index to the given value + # + # @param [Integer] i the index for the element to set + # @param [Object] value the value to set at the given index + # + # @return [Object] the new value of the element at the given index or nil if the index is out of bounds + def set(i, value) + return nil if i >= @size || i < 0 + @tuple[i].set(value) + end + alias_method :volatile_set, :set + + # Set the value at the given index to the new value if and only if the current + # value matches the given old value. + # + # @param [Integer] i the index for the element to set + # @param [Object] old_value the value to compare against the current value + # @param [Object] new_value the value to set at the given index + # + # @return [Boolean] true if the value at the given element was set else false + def compare_and_set(i, old_value, new_value) + return false if i >= @size || i < 0 + @tuple[i].compare_and_set(old_value, new_value) + end + alias_method :cas, :compare_and_set + + # Calls the given block once for each element in self, passing that element as a parameter. + # + # @yieldparam [Object] ref the `Concurrent::AtomicReference` object at the current index + def each + @tuple.each {|ref| yield ref.get} + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/tvar.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/tvar.rb new file mode 100644 index 0000000000..09138c8833 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/tvar.rb @@ -0,0 +1,258 @@ +require 'set' +require 'concurrent/synchronization' + +module Concurrent + + # A `TVar` is a transactional variable - a single-element container that + # is used as part of a transaction - see `Concurrent::atomically`. + # + # @!macro thread_safe_variable_comparison + # + # {include:file:docs-source/tvar.md} + class TVar < Synchronization::Object + safe_initialization! + + # Create a new `TVar` with an initial value. + def initialize(value) + @value = value + @version = 0 + @lock = Mutex.new + end + + # Get the value of a `TVar`. + def value + Concurrent::atomically do + Transaction::current.read(self) + end + end + + # Set the value of a `TVar`. + def value=(value) + Concurrent::atomically do + Transaction::current.write(self, value) + end + end + + # @!visibility private + def unsafe_value # :nodoc: + @value + end + + # @!visibility private + def unsafe_value=(value) # :nodoc: + @value = value + end + + # @!visibility private + def unsafe_version # :nodoc: + @version + end + + # @!visibility private + def unsafe_increment_version # :nodoc: + @version += 1 + end + + # @!visibility private + def unsafe_lock # :nodoc: + @lock + end + + end + + # Run a block that reads and writes `TVar`s as a single atomic transaction. + # With respect to the value of `TVar` objects, the transaction is atomic, in + # that it either happens or it does not, consistent, in that the `TVar` + # objects involved will never enter an illegal state, and isolated, in that + # transactions never interfere with each other. You may recognise these + # properties from database transactions. + # + # There are some very important and unusual semantics that you must be aware of: + # + # * Most importantly, the block that you pass to atomically may be executed + # more than once. In most cases your code should be free of + # side-effects, except for via TVar. + # + # * If an exception escapes an atomically block it will abort the transaction. + # + # * It is undefined behaviour to use callcc or Fiber with atomically. + # + # * If you create a new thread within an atomically, it will not be part of + # the transaction. Creating a thread counts as a side-effect. + # + # Transactions within transactions are flattened to a single transaction. + # + # @example + # a = new TVar(100_000) + # b = new TVar(100) + # + # Concurrent::atomically do + # a.value -= 10 + # b.value += 10 + # end + def atomically + raise ArgumentError.new('no block given') unless block_given? + + # Get the current transaction + + transaction = Transaction::current + + # Are we not already in a transaction (not nested)? + + if transaction.nil? + # New transaction + + begin + # Retry loop + + loop do + + # Create a new transaction + + transaction = Transaction.new + Transaction::current = transaction + + # Run the block, aborting on exceptions + + begin + result = yield + rescue Transaction::AbortError => e + transaction.abort + result = Transaction::ABORTED + rescue Transaction::LeaveError => e + transaction.abort + break result + rescue => e + transaction.abort + raise e + end + # If we can commit, break out of the loop + + if result != Transaction::ABORTED + if transaction.commit + break result + end + end + end + ensure + # Clear the current transaction + + Transaction::current = nil + end + else + # Nested transaction - flatten it and just run the block + + yield + end + end + + # Abort a currently running transaction - see `Concurrent::atomically`. + def abort_transaction + raise Transaction::AbortError.new + end + + # Leave a transaction without committing or aborting - see `Concurrent::atomically`. + def leave_transaction + raise Transaction::LeaveError.new + end + + module_function :atomically, :abort_transaction, :leave_transaction + + private + + class Transaction + + ABORTED = ::Object.new + + ReadLogEntry = Struct.new(:tvar, :version) + + AbortError = Class.new(StandardError) + LeaveError = Class.new(StandardError) + + def initialize + @read_log = [] + @write_log = {} + end + + def read(tvar) + Concurrent::abort_transaction unless valid? + + if @write_log.has_key? tvar + @write_log[tvar] + else + @read_log.push(ReadLogEntry.new(tvar, tvar.unsafe_version)) + tvar.unsafe_value + end + end + + def write(tvar, value) + # Have we already written to this TVar? + + unless @write_log.has_key? tvar + # Try to lock the TVar + + unless tvar.unsafe_lock.try_lock + # Someone else is writing to this TVar - abort + Concurrent::abort_transaction + end + + # If we previously wrote to it, check the version hasn't changed + + @read_log.each do |log_entry| + if log_entry.tvar == tvar and tvar.unsafe_version > log_entry.version + Concurrent::abort_transaction + end + end + end + + # Record the value written + + @write_log[tvar] = value + end + + def abort + unlock + end + + def commit + return false unless valid? + + @write_log.each_pair do |tvar, value| + tvar.unsafe_value = value + tvar.unsafe_increment_version + end + + unlock + + true + end + + def valid? + @read_log.each do |log_entry| + unless @write_log.has_key? log_entry.tvar + if log_entry.tvar.unsafe_version > log_entry.version + return false + end + end + end + + true + end + + def unlock + @write_log.each_key do |tvar| + tvar.unsafe_lock.unlock + end + end + + def self.current + Thread.current[:current_tvar_transaction] + end + + def self.current=(transaction) + Thread.current[:current_tvar_transaction] = transaction + end + + end + +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/at_exit.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/at_exit.rb new file mode 100644 index 0000000000..0e52ca379b --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/at_exit.rb @@ -0,0 +1,97 @@ +require 'logger' +require 'concurrent/synchronization' + +module Concurrent + + # Provides ability to add and remove handlers to be run at `Kernel#at_exit`, order is undefined. + # Each handler is executed at most once. + # + # @!visibility private + class AtExitImplementation < Synchronization::LockableObject + include Logger::Severity + + def initialize(*args) + super() + synchronize { ns_initialize(*args) } + end + + # Add a handler to be run at `Kernel#at_exit` + # @param [Object] handler_id optionally provide an id, if already present, handler is replaced + # @yield the handler + # @return id of the handler + def add(handler_id = nil, &handler) + id = handler_id || handler.object_id + synchronize { @handlers[id] = handler } + id + end + + # Delete a handler by handler_id + # @return [true, false] + def delete(handler_id) + !!synchronize { @handlers.delete handler_id } + end + + # Is handler with handler_id rpesent? + # @return [true, false] + def handler?(handler_id) + synchronize { @handlers.key? handler_id } + end + + # @return copy of the handlers + def handlers + synchronize { @handlers }.clone + end + + # install `Kernel#at_exit` callback to execute added handlers + def install + synchronize do + @installed ||= begin + at_exit { runner } + true + end + self + end + end + + # Will it run during `Kernel#at_exit` + def enabled? + synchronize { @enabled } + end + + # Configure if it runs during `Kernel#at_exit` + def enabled=(value) + synchronize { @enabled = value } + end + + # run the handlers manually + # @return ids of the handlers + def run + handlers, _ = synchronize { handlers, @handlers = @handlers, {} } + handlers.each do |_, handler| + begin + handler.call + rescue => error + Concurrent.global_logger.call(ERROR, error) + end + end + handlers.keys + end + + private + + def ns_initialize(enabled = true) + @handlers = {} + @enabled = enabled + end + + def runner + run if synchronize { @enabled } + end + end + + private_constant :AtExitImplementation + + # @see AtExitImplementation + # @!visibility private + AtExit = AtExitImplementation.new.install +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/utility/engine.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/engine.rb similarity index 84% rename from Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/utility/engine.rb rename to Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/engine.rb index a83f9fa087..bc4173e448 100644 --- a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.0.5/lib/concurrent/utility/engine.rb +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/engine.rb @@ -8,7 +8,7 @@ module Concurrent end def on_jruby_9000? - on_jruby? && ruby_version(:>=, 9, 0, 0, JRUBY_VERSION) + on_jruby? && ruby_version(JRUBY_VERSION, :>=, 9, 0, 0) end def on_cruby? @@ -19,8 +19,8 @@ module Concurrent ruby_engine == 'rbx' end - def on_truffle? - ruby_engine == 'jruby+truffle' + def on_truffleruby? + ruby_engine == 'truffleruby' end def on_windows? @@ -39,7 +39,7 @@ module Concurrent defined?(RUBY_ENGINE) ? RUBY_ENGINE : 'ruby' end - def ruby_version(comparison, major, minor, patch, version = RUBY_VERSION) + def ruby_version(version = RUBY_VERSION, comparison, major, minor, patch) result = (version.split('.').map(&:to_i) <=> [major, minor, patch]) comparisons = { :== => [0], :>= => [1, 0], diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/monotonic_time.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/monotonic_time.rb new file mode 100644 index 0000000000..c9f4b369a4 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/monotonic_time.rb @@ -0,0 +1,58 @@ +require 'concurrent/synchronization' + +module Concurrent + + class_definition = Class.new(Synchronization::LockableObject) do + def initialize + @last_time = Time.now.to_f + super() + end + + if defined?(Process::CLOCK_MONOTONIC) + # @!visibility private + def get_time + Process.clock_gettime(Process::CLOCK_MONOTONIC) + end + elsif Concurrent.on_jruby? + # @!visibility private + def get_time + java.lang.System.nanoTime() / 1_000_000_000.0 + end + else + + # @!visibility private + def get_time + synchronize do + now = Time.now.to_f + if @last_time < now + @last_time = now + else # clock has moved back in time + @last_time += 0.000_001 + end + end + end + + end + end + + # Clock that cannot be set and represents monotonic time since + # some unspecified starting point. + # + # @!visibility private + GLOBAL_MONOTONIC_CLOCK = class_definition.new + private_constant :GLOBAL_MONOTONIC_CLOCK + + # @!macro monotonic_get_time + # + # Returns the current time a tracked by the application monotonic clock. + # + # @return [Float] The current monotonic time since some unspecified + # starting point + # + # @!macro monotonic_clock_warning + def monotonic_time + GLOBAL_MONOTONIC_CLOCK.get_time + end + + module_function :monotonic_time +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/native_extension_loader.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/native_extension_loader.rb new file mode 100644 index 0000000000..a944bd7290 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/native_extension_loader.rb @@ -0,0 +1,79 @@ +require 'concurrent/utility/engine' + +module Concurrent + + module Utility + + # @!visibility private + module NativeExtensionLoader + + def allow_c_extensions? + Concurrent.on_cruby? + end + + def c_extensions_loaded? + defined?(@c_extensions_loaded) && @c_extensions_loaded + end + + def java_extensions_loaded? + defined?(@java_extensions_loaded) && @java_extensions_loaded + end + + def load_native_extensions + unless defined? Synchronization::AbstractObject + raise 'native_extension_loader loaded before Synchronization::AbstractObject' + end + + if Concurrent.on_cruby? && !c_extensions_loaded? + ['concurrent/concurrent_ruby_ext', + "concurrent/#{RUBY_VERSION[0..2]}/concurrent_ruby_ext" + ].each { |p| try_load_c_extension p } + end + + if Concurrent.on_jruby? && !java_extensions_loaded? + begin + require 'concurrent/concurrent_ruby.jar' + set_java_extensions_loaded + rescue LoadError => e + raise e, "Java extensions are required for JRuby.\n" + e.message, e.backtrace + end + end + end + + private + + def load_error_path(error) + if error.respond_to? :path + error.path + else + error.message.split(' -- ').last + end + end + + def set_c_extensions_loaded + @c_extensions_loaded = true + end + + def set_java_extensions_loaded + @java_extensions_loaded = true + end + + def try_load_c_extension(path) + require path + set_c_extensions_loaded + rescue LoadError => e + if load_error_path(e) == path + # move on with pure-Ruby implementations + # TODO (pitr-ch 12-Jul-2018): warning on verbose? + else + raise e + end + end + + end + end + + # @!visibility private + extend Utility::NativeExtensionLoader +end + diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/native_integer.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/native_integer.rb new file mode 100644 index 0000000000..10719e7caa --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/native_integer.rb @@ -0,0 +1,53 @@ +module Concurrent + module Utility + # @private + module NativeInteger + # http://stackoverflow.com/questions/535721/ruby-max-integer + MIN_VALUE = -(2**(0.size * 8 - 2)) + MAX_VALUE = (2**(0.size * 8 - 2) - 1) + + def ensure_upper_bound(value) + if value > MAX_VALUE + raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}") + end + value + end + + def ensure_lower_bound(value) + if value < MIN_VALUE + raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}") + end + value + end + + def ensure_integer(value) + unless value.is_a?(Integer) + raise ArgumentError.new("#{value} is not an Integer") + end + value + end + + def ensure_integer_and_bounds(value) + ensure_integer value + ensure_upper_bound value + ensure_lower_bound value + end + + def ensure_positive(value) + if value < 0 + raise ArgumentError.new("#{value} cannot be negative") + end + value + end + + def ensure_positive_and_no_zero(value) + if value < 1 + raise ArgumentError.new("#{value} cannot be negative or zero") + end + value + end + + extend self + end + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/processor_counter.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/processor_counter.rb new file mode 100644 index 0000000000..6d6ae8dea4 --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/utility/processor_counter.rb @@ -0,0 +1,158 @@ +require 'rbconfig' +require 'concurrent/delay' + +module Concurrent + module Utility + + # @!visibility private + class ProcessorCounter + def initialize + @processor_count = Delay.new { compute_processor_count } + @physical_processor_count = Delay.new { compute_physical_processor_count } + end + + # Number of processors seen by the OS and used for process scheduling. For + # performance reasons the calculated value will be memoized on the first + # call. + # + # When running under JRuby the Java runtime call + # `java.lang.Runtime.getRuntime.availableProcessors` will be used. According + # to the Java documentation this "value may change during a particular + # invocation of the virtual machine... [applications] should therefore + # occasionally poll this property." Subsequently the result will NOT be + # memoized under JRuby. + # + # On Windows the Win32 API will be queried for the + # `NumberOfLogicalProcessors from Win32_Processor`. This will return the + # total number "logical processors for the current instance of the + # processor", which taked into account hyperthreading. + # + # * AIX: /usr/sbin/pmcycles (AIX 5+), /usr/sbin/lsdev + # * Alpha: /usr/bin/nproc (/proc/cpuinfo exists but cannot be used) + # * BSD: /sbin/sysctl + # * Cygwin: /proc/cpuinfo + # * Darwin: /usr/bin/hwprefs, /usr/sbin/sysctl + # * HP-UX: /usr/sbin/ioscan + # * IRIX: /usr/sbin/sysconf + # * Linux: /proc/cpuinfo + # * Minix 3+: /proc/cpuinfo + # * Solaris: /usr/sbin/psrinfo + # * Tru64 UNIX: /usr/sbin/psrinfo + # * UnixWare: /usr/sbin/psrinfo + # + # @return [Integer] number of processors seen by the OS or Java runtime + # + # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb + # + # @see http://docs.oracle.com/javase/6/docs/api/java/lang/Runtime.html#availableProcessors() + # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx + def processor_count + @processor_count.value + end + + # Number of physical processor cores on the current system. For performance + # reasons the calculated value will be memoized on the first call. + # + # On Windows the Win32 API will be queried for the `NumberOfCores from + # Win32_Processor`. This will return the total number "of cores for the + # current instance of the processor." On Unix-like operating systems either + # the `hwprefs` or `sysctl` utility will be called in a subshell and the + # returned value will be used. In the rare case where none of these methods + # work or an exception is raised the function will simply return 1. + # + # @return [Integer] number physical processor cores on the current system + # + # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb + # + # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx + # @see http://www.unix.com/man-page/osx/1/HWPREFS/ + # @see http://linux.die.net/man/8/sysctl + def physical_processor_count + @physical_processor_count.value + end + + private + + def compute_processor_count + if Concurrent.on_jruby? + java.lang.Runtime.getRuntime.availableProcessors + else + os_name = RbConfig::CONFIG["target_os"] + if os_name =~ /mingw|mswin/ + require 'win32ole' + result = WIN32OLE.connect("winmgmts://").ExecQuery( + "select NumberOfLogicalProcessors from Win32_Processor") + result.to_enum.collect(&:NumberOfLogicalProcessors).reduce(:+) + elsif File.readable?("/proc/cpuinfo") && (cpuinfo_count = IO.read("/proc/cpuinfo").scan(/^processor/).size) > 0 + cpuinfo_count + elsif File.executable?("/usr/bin/nproc") + IO.popen("/usr/bin/nproc --all", &:read).to_i + elsif File.executable?("/usr/bin/hwprefs") + IO.popen("/usr/bin/hwprefs thread_count", &:read).to_i + elsif File.executable?("/usr/sbin/psrinfo") + IO.popen("/usr/sbin/psrinfo", &:read).scan(/^.*on-*line/).size + elsif File.executable?("/usr/sbin/ioscan") + IO.popen("/usr/sbin/ioscan -kC processor", &:read).scan(/^.*processor/).size + elsif File.executable?("/usr/sbin/pmcycles") + IO.popen("/usr/sbin/pmcycles -m", &:read).count("\n") + elsif File.executable?("/usr/sbin/lsdev") + IO.popen("/usr/sbin/lsdev -Cc processor -S 1", &:read).count("\n") + elsif File.executable?("/usr/sbin/sysconf") and os_name =~ /irix/i + IO.popen("/usr/sbin/sysconf NPROC_ONLN", &:read).to_i + elsif File.executable?("/usr/sbin/sysctl") + IO.popen("/usr/sbin/sysctl -n hw.ncpu", &:read).to_i + elsif File.executable?("/sbin/sysctl") + IO.popen("/sbin/sysctl -n hw.ncpu", &:read).to_i + else + # TODO (pitr-ch 05-Nov-2016): warn about failures + 1 + end + end + rescue + return 1 + end + + def compute_physical_processor_count + ppc = case RbConfig::CONFIG["target_os"] + when /darwin1/ + IO.popen("/usr/sbin/sysctl -n hw.physicalcpu", &:read).to_i + when /linux/ + cores = {} # unique physical ID / core ID combinations + phy = 0 + IO.read("/proc/cpuinfo").scan(/^physical id.*|^core id.*/) do |ln| + if ln.start_with?("physical") + phy = ln[/\d+/] + elsif ln.start_with?("core") + cid = phy + ":" + ln[/\d+/] + cores[cid] = true if not cores[cid] + end + end + cores.count + when /mswin|mingw/ + require 'win32ole' + result_set = WIN32OLE.connect("winmgmts://").ExecQuery( + "select NumberOfCores from Win32_Processor") + result_set.to_enum.collect(&:NumberOfCores).reduce(:+) + else + processor_count + end + # fall back to logical count if physical info is invalid + ppc > 0 ? ppc : processor_count + rescue + return 1 + end + end + end + + # create the default ProcessorCounter on load + @processor_counter = Utility::ProcessorCounter.new + singleton_class.send :attr_reader, :processor_counter + + def self.processor_count + processor_counter.processor_count + end + + def self.physical_processor_count + processor_counter.physical_processor_count + end +end diff --git a/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/version.rb b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/version.rb new file mode 100644 index 0000000000..6f4e68da4c --- /dev/null +++ b/Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/concurrent-ruby-1.1.3/lib/concurrent/version.rb @@ -0,0 +1,4 @@ +module Concurrent + VERSION = '1.1.3' + EDGE_VERSION = '0.4.1' +end