Cleanup other vendored gems.
Exclude subdirectories and specific files. This makes adding/removing files on newly vendored versions much easier.
This commit is contained in:
parent
1903a8bfb0
commit
6119c78c5c
62
.gitignore
vendored
62
.gitignore
vendored
@ -33,16 +33,66 @@
|
||||
!**/vendor/bundle-standalone/ruby/*/gems/*/lib
|
||||
|
||||
# Ignore partially included gems where we don't need all files
|
||||
**/vendor/bundle-standalone/ruby/*/gems/activesupport-*/lib
|
||||
**/vendor/bundle-standalone/ruby/*/gems/concurrent-ruby-*/lib
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib
|
||||
**/vendor/bundle-standalone/ruby/*/gems/minitest-*/lib
|
||||
**/vendor/bundle-standalone/ruby/*/gems/thread_safe-*/lib
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support.rb
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/all.rb
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/cache.rb
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/cache/
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/concurrency/
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/dependencies.rb
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/dependencies/
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/duration/
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/json.rb
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/json/
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/log_subscriber.rb
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/log_subscriber/
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/messages/
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/multibyte/
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/number_helper.rb
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/number_helper/
|
||||
**/vendor/bundle-standalone/ruby/2.3.0/gems/activesupport-*/lib/active_support/testing/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/latest.rb
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/rails.rb
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/version.rb
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/1.*
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/2.0*
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/2.1*
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/2.2*
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/2.3*
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/2.*.rb
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/force/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/rails/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/random/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/array/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/comparable/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/dir/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/enumerable/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/false_class/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/fixnum/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/float/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/hash/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/integer/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/kernel/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/module/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/nil_class/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/regexp/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/struct/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/struct/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/backports-*/lib/backports/*/true_class/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/concurrent-ruby-*/lib/atomic/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/concurrent-ruby-*/lib/atomic_reference/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/concurrent-ruby-*/lib/collection/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/concurrent-ruby-*/lib/concern/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/concurrent-ruby-*/lib/executor/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/concurrent-ruby-*/lib/synchronization/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/concurrent-ruby-*/lib/thread_safe/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/concurrent-ruby-*/lib/utility/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/i18n-*/lib/i18n/tests*
|
||||
**/vendor/bundle-standalone/ruby/*/gems/thread_safe-*/lib/thread_safe/util
|
||||
|
||||
# Ignore rubocop dependencies we don't wish to vendor
|
||||
# Ignore rubocop's (and other) dependencies we don't wish to vendor
|
||||
**/vendor/bundle-standalone/ruby/*/gems/ast-*/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/jaro_winkler-*/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/minitest-*/lib
|
||||
**/vendor/bundle-standalone/ruby/*/gems/parallel-*/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/parser-*/
|
||||
**/vendor/bundle-standalone/ruby/*/gems/powerpack-*/
|
||||
|
@ -0,0 +1,48 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module ActiveSupport
|
||||
# Wrapping an array in an +ArrayInquirer+ gives a friendlier way to check
|
||||
# its string-like contents:
|
||||
#
|
||||
# variants = ActiveSupport::ArrayInquirer.new([:phone, :tablet])
|
||||
#
|
||||
# variants.phone? # => true
|
||||
# variants.tablet? # => true
|
||||
# variants.desktop? # => false
|
||||
class ArrayInquirer < Array
|
||||
# Passes each element of +candidates+ collection to ArrayInquirer collection.
|
||||
# The method returns true if any element from the ArrayInquirer collection
|
||||
# is equal to the stringified or symbolized form of any element in the +candidates+ collection.
|
||||
#
|
||||
# If +candidates+ collection is not given, method returns true.
|
||||
#
|
||||
# variants = ActiveSupport::ArrayInquirer.new([:phone, :tablet])
|
||||
#
|
||||
# variants.any? # => true
|
||||
# variants.any?(:phone, :tablet) # => true
|
||||
# variants.any?('phone', 'desktop') # => true
|
||||
# variants.any?(:desktop, :watch) # => false
|
||||
def any?(*candidates)
|
||||
if candidates.none?
|
||||
super
|
||||
else
|
||||
candidates.any? do |candidate|
|
||||
include?(candidate.to_sym) || include?(candidate.to_s)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
def respond_to_missing?(name, include_private = false)
|
||||
(name[-1] == "?") || super
|
||||
end
|
||||
|
||||
def method_missing(name, *args)
|
||||
if name[-1] == "?"
|
||||
any?(name[0..-2])
|
||||
else
|
||||
super
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,105 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module ActiveSupport
|
||||
# Backtraces often include many lines that are not relevant for the context
|
||||
# under review. This makes it hard to find the signal amongst the backtrace
|
||||
# noise, and adds debugging time. With a BacktraceCleaner, filters and
|
||||
# silencers are used to remove the noisy lines, so that only the most relevant
|
||||
# lines remain.
|
||||
#
|
||||
# Filters are used to modify lines of data, while silencers are used to remove
|
||||
# lines entirely. The typical filter use case is to remove lengthy path
|
||||
# information from the start of each line, and view file paths relevant to the
|
||||
# app directory instead of the file system root. The typical silencer use case
|
||||
# is to exclude the output of a noisy library from the backtrace, so that you
|
||||
# can focus on the rest.
|
||||
#
|
||||
# bc = ActiveSupport::BacktraceCleaner.new
|
||||
# bc.add_filter { |line| line.gsub(Rails.root.to_s, '') } # strip the Rails.root prefix
|
||||
# bc.add_silencer { |line| line =~ /puma|rubygems/ } # skip any lines from puma or rubygems
|
||||
# bc.clean(exception.backtrace) # perform the cleanup
|
||||
#
|
||||
# To reconfigure an existing BacktraceCleaner (like the default one in Rails)
|
||||
# and show as much data as possible, you can always call
|
||||
# <tt>BacktraceCleaner#remove_silencers!</tt>, which will restore the
|
||||
# backtrace to a pristine state. If you need to reconfigure an existing
|
||||
# BacktraceCleaner so that it does not filter or modify the paths of any lines
|
||||
# of the backtrace, you can call <tt>BacktraceCleaner#remove_filters!</tt>
|
||||
# These two methods will give you a completely untouched backtrace.
|
||||
#
|
||||
# Inspired by the Quiet Backtrace gem by thoughtbot.
|
||||
class BacktraceCleaner
|
||||
def initialize
|
||||
@filters, @silencers = [], []
|
||||
end
|
||||
|
||||
# Returns the backtrace after all filters and silencers have been run
|
||||
# against it. Filters run first, then silencers.
|
||||
def clean(backtrace, kind = :silent)
|
||||
filtered = filter_backtrace(backtrace)
|
||||
|
||||
case kind
|
||||
when :silent
|
||||
silence(filtered)
|
||||
when :noise
|
||||
noise(filtered)
|
||||
else
|
||||
filtered
|
||||
end
|
||||
end
|
||||
alias :filter :clean
|
||||
|
||||
# Adds a filter from the block provided. Each line in the backtrace will be
|
||||
# mapped against this filter.
|
||||
#
|
||||
# # Will turn "/my/rails/root/app/models/person.rb" into "/app/models/person.rb"
|
||||
# backtrace_cleaner.add_filter { |line| line.gsub(Rails.root, '') }
|
||||
def add_filter(&block)
|
||||
@filters << block
|
||||
end
|
||||
|
||||
# Adds a silencer from the block provided. If the silencer returns +true+
|
||||
# for a given line, it will be excluded from the clean backtrace.
|
||||
#
|
||||
# # Will reject all lines that include the word "puma", like "/gems/puma/server.rb" or "/app/my_puma_server/rb"
|
||||
# backtrace_cleaner.add_silencer { |line| line =~ /puma/ }
|
||||
def add_silencer(&block)
|
||||
@silencers << block
|
||||
end
|
||||
|
||||
# Removes all silencers, but leaves in the filters. Useful if your
|
||||
# context of debugging suddenly expands as you suspect a bug in one of
|
||||
# the libraries you use.
|
||||
def remove_silencers!
|
||||
@silencers = []
|
||||
end
|
||||
|
||||
# Removes all filters, but leaves in the silencers. Useful if you suddenly
|
||||
# need to see entire filepaths in the backtrace that you had already
|
||||
# filtered out.
|
||||
def remove_filters!
|
||||
@filters = []
|
||||
end
|
||||
|
||||
private
|
||||
def filter_backtrace(backtrace)
|
||||
@filters.each do |f|
|
||||
backtrace = backtrace.map { |line| f.call(line) }
|
||||
end
|
||||
|
||||
backtrace
|
||||
end
|
||||
|
||||
def silence(backtrace)
|
||||
@silencers.each do |s|
|
||||
backtrace = backtrace.reject { |line| s.call(line) }
|
||||
end
|
||||
|
||||
backtrace
|
||||
end
|
||||
|
||||
def noise(backtrace)
|
||||
backtrace - silence(backtrace)
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,51 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/core_ext/benchmark"
|
||||
require "active_support/core_ext/hash/keys"
|
||||
|
||||
module ActiveSupport
|
||||
module Benchmarkable
|
||||
# Allows you to measure the execution time of a block in a template and
|
||||
# records the result to the log. Wrap this block around expensive operations
|
||||
# or possible bottlenecks to get a time reading for the operation. For
|
||||
# example, let's say you thought your file processing method was taking too
|
||||
# long; you could wrap it in a benchmark block.
|
||||
#
|
||||
# <% benchmark 'Process data files' do %>
|
||||
# <%= expensive_files_operation %>
|
||||
# <% end %>
|
||||
#
|
||||
# That would add something like "Process data files (345.2ms)" to the log,
|
||||
# which you can then use to compare timings when optimizing your code.
|
||||
#
|
||||
# You may give an optional logger level (<tt>:debug</tt>, <tt>:info</tt>,
|
||||
# <tt>:warn</tt>, <tt>:error</tt>) as the <tt>:level</tt> option. The
|
||||
# default logger level value is <tt>:info</tt>.
|
||||
#
|
||||
# <% benchmark 'Low-level files', level: :debug do %>
|
||||
# <%= lowlevel_files_operation %>
|
||||
# <% end %>
|
||||
#
|
||||
# Finally, you can pass true as the third argument to silence all log
|
||||
# activity (other than the timing information) from inside the block. This
|
||||
# is great for boiling down a noisy block to just a single statement that
|
||||
# produces one log line:
|
||||
#
|
||||
# <% benchmark 'Process data files', level: :info, silence: true do %>
|
||||
# <%= expensive_and_chatty_files_operation %>
|
||||
# <% end %>
|
||||
def benchmark(message = "Benchmarking", options = {})
|
||||
if logger
|
||||
options.assert_valid_keys(:level, :silence)
|
||||
options[:level] ||= :info
|
||||
|
||||
result = nil
|
||||
ms = Benchmark.ms { result = options[:silence] ? logger.silence { yield } : yield }
|
||||
logger.send(options[:level], "%s (%.1fms)" % [ message, ms ])
|
||||
result
|
||||
else
|
||||
yield
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,8 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
begin
|
||||
require "builder"
|
||||
rescue LoadError => e
|
||||
$stderr.puts "You don't have builder installed in your application. Please add it to your Gemfile and run bundle install"
|
||||
raise e
|
||||
end
|
@ -0,0 +1,845 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/concern"
|
||||
require "active_support/descendants_tracker"
|
||||
require "active_support/core_ext/array/extract_options"
|
||||
require "active_support/core_ext/class/attribute"
|
||||
require "active_support/core_ext/kernel/reporting"
|
||||
require "active_support/core_ext/kernel/singleton_class"
|
||||
require "active_support/core_ext/string/filters"
|
||||
require "active_support/deprecation"
|
||||
require "thread"
|
||||
|
||||
module ActiveSupport
|
||||
# Callbacks are code hooks that are run at key points in an object's life cycle.
|
||||
# The typical use case is to have a base class define a set of callbacks
|
||||
# relevant to the other functionality it supplies, so that subclasses can
|
||||
# install callbacks that enhance or modify the base functionality without
|
||||
# needing to override or redefine methods of the base class.
|
||||
#
|
||||
# Mixing in this module allows you to define the events in the object's
|
||||
# life cycle that will support callbacks (via +ClassMethods.define_callbacks+),
|
||||
# set the instance methods, procs, or callback objects to be called (via
|
||||
# +ClassMethods.set_callback+), and run the installed callbacks at the
|
||||
# appropriate times (via +run_callbacks+).
|
||||
#
|
||||
# Three kinds of callbacks are supported: before callbacks, run before a
|
||||
# certain event; after callbacks, run after the event; and around callbacks,
|
||||
# blocks that surround the event, triggering it when they yield. Callback code
|
||||
# can be contained in instance methods, procs or lambdas, or callback objects
|
||||
# that respond to certain predetermined methods. See +ClassMethods.set_callback+
|
||||
# for details.
|
||||
#
|
||||
# class Record
|
||||
# include ActiveSupport::Callbacks
|
||||
# define_callbacks :save
|
||||
#
|
||||
# def save
|
||||
# run_callbacks :save do
|
||||
# puts "- save"
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# class PersonRecord < Record
|
||||
# set_callback :save, :before, :saving_message
|
||||
# def saving_message
|
||||
# puts "saving..."
|
||||
# end
|
||||
#
|
||||
# set_callback :save, :after do |object|
|
||||
# puts "saved"
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# person = PersonRecord.new
|
||||
# person.save
|
||||
#
|
||||
# Output:
|
||||
# saving...
|
||||
# - save
|
||||
# saved
|
||||
module Callbacks
|
||||
extend Concern
|
||||
|
||||
included do
|
||||
extend ActiveSupport::DescendantsTracker
|
||||
class_attribute :__callbacks, instance_writer: false, default: {}
|
||||
end
|
||||
|
||||
CALLBACK_FILTER_TYPES = [:before, :after, :around]
|
||||
|
||||
# Runs the callbacks for the given event.
|
||||
#
|
||||
# Calls the before and around callbacks in the order they were set, yields
|
||||
# the block (if given one), and then runs the after callbacks in reverse
|
||||
# order.
|
||||
#
|
||||
# If the callback chain was halted, returns +false+. Otherwise returns the
|
||||
# result of the block, +nil+ if no callbacks have been set, or +true+
|
||||
# if callbacks have been set but no block is given.
|
||||
#
|
||||
# run_callbacks :save do
|
||||
# save
|
||||
# end
|
||||
#
|
||||
#--
|
||||
#
|
||||
# As this method is used in many places, and often wraps large portions of
|
||||
# user code, it has an additional design goal of minimizing its impact on
|
||||
# the visible call stack. An exception from inside a :before or :after
|
||||
# callback can be as noisy as it likes -- but when control has passed
|
||||
# smoothly through and into the supplied block, we want as little evidence
|
||||
# as possible that we were here.
|
||||
def run_callbacks(kind)
|
||||
callbacks = __callbacks[kind.to_sym]
|
||||
|
||||
if callbacks.empty?
|
||||
yield if block_given?
|
||||
else
|
||||
env = Filters::Environment.new(self, false, nil)
|
||||
next_sequence = callbacks.compile
|
||||
|
||||
invoke_sequence = Proc.new do
|
||||
skipped = nil
|
||||
while true
|
||||
current = next_sequence
|
||||
current.invoke_before(env)
|
||||
if current.final?
|
||||
env.value = !env.halted && (!block_given? || yield)
|
||||
elsif current.skip?(env)
|
||||
(skipped ||= []) << current
|
||||
next_sequence = next_sequence.nested
|
||||
next
|
||||
else
|
||||
next_sequence = next_sequence.nested
|
||||
begin
|
||||
target, block, method, *arguments = current.expand_call_template(env, invoke_sequence)
|
||||
target.send(method, *arguments, &block)
|
||||
ensure
|
||||
next_sequence = current
|
||||
end
|
||||
end
|
||||
current.invoke_after(env)
|
||||
skipped.pop.invoke_after(env) while skipped && skipped.first
|
||||
break env.value
|
||||
end
|
||||
end
|
||||
|
||||
# Common case: no 'around' callbacks defined
|
||||
if next_sequence.final?
|
||||
next_sequence.invoke_before(env)
|
||||
env.value = !env.halted && (!block_given? || yield)
|
||||
next_sequence.invoke_after(env)
|
||||
env.value
|
||||
else
|
||||
invoke_sequence.call
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# A hook invoked every time a before callback is halted.
|
||||
# This can be overridden in ActiveSupport::Callbacks implementors in order
|
||||
# to provide better debugging/logging.
|
||||
def halted_callback_hook(filter)
|
||||
end
|
||||
|
||||
module Conditionals # :nodoc:
|
||||
class Value
|
||||
def initialize(&block)
|
||||
@block = block
|
||||
end
|
||||
def call(target, value); @block.call(value); end
|
||||
end
|
||||
end
|
||||
|
||||
module Filters
|
||||
Environment = Struct.new(:target, :halted, :value)
|
||||
|
||||
class Before
|
||||
def self.build(callback_sequence, user_callback, user_conditions, chain_config, filter)
|
||||
halted_lambda = chain_config[:terminator]
|
||||
|
||||
if user_conditions.any?
|
||||
halting_and_conditional(callback_sequence, user_callback, user_conditions, halted_lambda, filter)
|
||||
else
|
||||
halting(callback_sequence, user_callback, halted_lambda, filter)
|
||||
end
|
||||
end
|
||||
|
||||
def self.halting_and_conditional(callback_sequence, user_callback, user_conditions, halted_lambda, filter)
|
||||
callback_sequence.before do |env|
|
||||
target = env.target
|
||||
value = env.value
|
||||
halted = env.halted
|
||||
|
||||
if !halted && user_conditions.all? { |c| c.call(target, value) }
|
||||
result_lambda = -> { user_callback.call target, value }
|
||||
env.halted = halted_lambda.call(target, result_lambda)
|
||||
if env.halted
|
||||
target.send :halted_callback_hook, filter
|
||||
end
|
||||
end
|
||||
|
||||
env
|
||||
end
|
||||
end
|
||||
private_class_method :halting_and_conditional
|
||||
|
||||
def self.halting(callback_sequence, user_callback, halted_lambda, filter)
|
||||
callback_sequence.before do |env|
|
||||
target = env.target
|
||||
value = env.value
|
||||
halted = env.halted
|
||||
|
||||
unless halted
|
||||
result_lambda = -> { user_callback.call target, value }
|
||||
env.halted = halted_lambda.call(target, result_lambda)
|
||||
|
||||
if env.halted
|
||||
target.send :halted_callback_hook, filter
|
||||
end
|
||||
end
|
||||
|
||||
env
|
||||
end
|
||||
end
|
||||
private_class_method :halting
|
||||
end
|
||||
|
||||
class After
|
||||
def self.build(callback_sequence, user_callback, user_conditions, chain_config)
|
||||
if chain_config[:skip_after_callbacks_if_terminated]
|
||||
if user_conditions.any?
|
||||
halting_and_conditional(callback_sequence, user_callback, user_conditions)
|
||||
else
|
||||
halting(callback_sequence, user_callback)
|
||||
end
|
||||
else
|
||||
if user_conditions.any?
|
||||
conditional callback_sequence, user_callback, user_conditions
|
||||
else
|
||||
simple callback_sequence, user_callback
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def self.halting_and_conditional(callback_sequence, user_callback, user_conditions)
|
||||
callback_sequence.after do |env|
|
||||
target = env.target
|
||||
value = env.value
|
||||
halted = env.halted
|
||||
|
||||
if !halted && user_conditions.all? { |c| c.call(target, value) }
|
||||
user_callback.call target, value
|
||||
end
|
||||
|
||||
env
|
||||
end
|
||||
end
|
||||
private_class_method :halting_and_conditional
|
||||
|
||||
def self.halting(callback_sequence, user_callback)
|
||||
callback_sequence.after do |env|
|
||||
unless env.halted
|
||||
user_callback.call env.target, env.value
|
||||
end
|
||||
|
||||
env
|
||||
end
|
||||
end
|
||||
private_class_method :halting
|
||||
|
||||
def self.conditional(callback_sequence, user_callback, user_conditions)
|
||||
callback_sequence.after do |env|
|
||||
target = env.target
|
||||
value = env.value
|
||||
|
||||
if user_conditions.all? { |c| c.call(target, value) }
|
||||
user_callback.call target, value
|
||||
end
|
||||
|
||||
env
|
||||
end
|
||||
end
|
||||
private_class_method :conditional
|
||||
|
||||
def self.simple(callback_sequence, user_callback)
|
||||
callback_sequence.after do |env|
|
||||
user_callback.call env.target, env.value
|
||||
|
||||
env
|
||||
end
|
||||
end
|
||||
private_class_method :simple
|
||||
end
|
||||
end
|
||||
|
||||
class Callback #:nodoc:#
|
||||
def self.build(chain, filter, kind, options)
|
||||
if filter.is_a?(String)
|
||||
raise ArgumentError, <<-MSG.squish
|
||||
Passing string to define a callback is not supported. See the `.set_callback`
|
||||
documentation to see supported values.
|
||||
MSG
|
||||
end
|
||||
|
||||
new chain.name, filter, kind, options, chain.config
|
||||
end
|
||||
|
||||
attr_accessor :kind, :name
|
||||
attr_reader :chain_config
|
||||
|
||||
def initialize(name, filter, kind, options, chain_config)
|
||||
@chain_config = chain_config
|
||||
@name = name
|
||||
@kind = kind
|
||||
@filter = filter
|
||||
@key = compute_identifier filter
|
||||
@if = check_conditionals(Array(options[:if]))
|
||||
@unless = check_conditionals(Array(options[:unless]))
|
||||
end
|
||||
|
||||
def filter; @key; end
|
||||
def raw_filter; @filter; end
|
||||
|
||||
def merge_conditional_options(chain, if_option:, unless_option:)
|
||||
options = {
|
||||
if: @if.dup,
|
||||
unless: @unless.dup
|
||||
}
|
||||
|
||||
options[:if].concat Array(unless_option)
|
||||
options[:unless].concat Array(if_option)
|
||||
|
||||
self.class.build chain, @filter, @kind, options
|
||||
end
|
||||
|
||||
def matches?(_kind, _filter)
|
||||
@kind == _kind && filter == _filter
|
||||
end
|
||||
|
||||
def duplicates?(other)
|
||||
case @filter
|
||||
when Symbol
|
||||
matches?(other.kind, other.filter)
|
||||
else
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
# Wraps code with filter
|
||||
def apply(callback_sequence)
|
||||
user_conditions = conditions_lambdas
|
||||
user_callback = CallTemplate.build(@filter, self)
|
||||
|
||||
case kind
|
||||
when :before
|
||||
Filters::Before.build(callback_sequence, user_callback.make_lambda, user_conditions, chain_config, @filter)
|
||||
when :after
|
||||
Filters::After.build(callback_sequence, user_callback.make_lambda, user_conditions, chain_config)
|
||||
when :around
|
||||
callback_sequence.around(user_callback, user_conditions)
|
||||
end
|
||||
end
|
||||
|
||||
def current_scopes
|
||||
Array(chain_config[:scope]).map { |s| public_send(s) }
|
||||
end
|
||||
|
||||
private
|
||||
def check_conditionals(conditionals)
|
||||
if conditionals.any? { |c| c.is_a?(String) }
|
||||
raise ArgumentError, <<-MSG.squish
|
||||
Passing string to be evaluated in :if and :unless conditional
|
||||
options is not supported. Pass a symbol for an instance method,
|
||||
or a lambda, proc or block, instead.
|
||||
MSG
|
||||
end
|
||||
|
||||
conditionals
|
||||
end
|
||||
|
||||
def compute_identifier(filter)
|
||||
case filter
|
||||
when ::Proc
|
||||
filter.object_id
|
||||
else
|
||||
filter
|
||||
end
|
||||
end
|
||||
|
||||
def conditions_lambdas
|
||||
@if.map { |c| CallTemplate.build(c, self).make_lambda } +
|
||||
@unless.map { |c| CallTemplate.build(c, self).inverted_lambda }
|
||||
end
|
||||
end
|
||||
|
||||
# A future invocation of user-supplied code (either as a callback,
|
||||
# or a condition filter).
|
||||
class CallTemplate # :nodoc:
|
||||
def initialize(target, method, arguments, block)
|
||||
@override_target = target
|
||||
@method_name = method
|
||||
@arguments = arguments
|
||||
@override_block = block
|
||||
end
|
||||
|
||||
# Return the parts needed to make this call, with the given
|
||||
# input values.
|
||||
#
|
||||
# Returns an array of the form:
|
||||
#
|
||||
# [target, block, method, *arguments]
|
||||
#
|
||||
# This array can be used as such:
|
||||
#
|
||||
# target.send(method, *arguments, &block)
|
||||
#
|
||||
# The actual invocation is left up to the caller to minimize
|
||||
# call stack pollution.
|
||||
def expand(target, value, block)
|
||||
result = @arguments.map { |arg|
|
||||
case arg
|
||||
when :value; value
|
||||
when :target; target
|
||||
when :block; block || raise(ArgumentError)
|
||||
end
|
||||
}
|
||||
|
||||
result.unshift @method_name
|
||||
result.unshift @override_block || block
|
||||
result.unshift @override_target || target
|
||||
|
||||
# target, block, method, *arguments = result
|
||||
# target.send(method, *arguments, &block)
|
||||
result
|
||||
end
|
||||
|
||||
# Return a lambda that will make this call when given the input
|
||||
# values.
|
||||
def make_lambda
|
||||
lambda do |target, value, &block|
|
||||
target, block, method, *arguments = expand(target, value, block)
|
||||
target.send(method, *arguments, &block)
|
||||
end
|
||||
end
|
||||
|
||||
# Return a lambda that will make this call when given the input
|
||||
# values, but then return the boolean inverse of that result.
|
||||
def inverted_lambda
|
||||
lambda do |target, value, &block|
|
||||
target, block, method, *arguments = expand(target, value, block)
|
||||
! target.send(method, *arguments, &block)
|
||||
end
|
||||
end
|
||||
|
||||
# Filters support:
|
||||
#
|
||||
# Symbols:: A method to call.
|
||||
# Procs:: A proc to call with the object.
|
||||
# Objects:: An object with a <tt>before_foo</tt> method on it to call.
|
||||
#
|
||||
# All of these objects are converted into a CallTemplate and handled
|
||||
# the same after this point.
|
||||
def self.build(filter, callback)
|
||||
case filter
|
||||
when Symbol
|
||||
new(nil, filter, [], nil)
|
||||
when Conditionals::Value
|
||||
new(filter, :call, [:target, :value], nil)
|
||||
when ::Proc
|
||||
if filter.arity > 1
|
||||
new(nil, :instance_exec, [:target, :block], filter)
|
||||
elsif filter.arity > 0
|
||||
new(nil, :instance_exec, [:target], filter)
|
||||
else
|
||||
new(nil, :instance_exec, [], filter)
|
||||
end
|
||||
else
|
||||
method_to_call = callback.current_scopes.join("_")
|
||||
|
||||
new(filter, method_to_call, [:target], nil)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Execute before and after filters in a sequence instead of
|
||||
# chaining them with nested lambda calls, see:
|
||||
# https://github.com/rails/rails/issues/18011
|
||||
class CallbackSequence # :nodoc:
|
||||
def initialize(nested = nil, call_template = nil, user_conditions = nil)
|
||||
@nested = nested
|
||||
@call_template = call_template
|
||||
@user_conditions = user_conditions
|
||||
|
||||
@before = []
|
||||
@after = []
|
||||
end
|
||||
|
||||
def before(&before)
|
||||
@before.unshift(before)
|
||||
self
|
||||
end
|
||||
|
||||
def after(&after)
|
||||
@after.push(after)
|
||||
self
|
||||
end
|
||||
|
||||
def around(call_template, user_conditions)
|
||||
CallbackSequence.new(self, call_template, user_conditions)
|
||||
end
|
||||
|
||||
def skip?(arg)
|
||||
arg.halted || !@user_conditions.all? { |c| c.call(arg.target, arg.value) }
|
||||
end
|
||||
|
||||
def nested
|
||||
@nested
|
||||
end
|
||||
|
||||
def final?
|
||||
!@call_template
|
||||
end
|
||||
|
||||
def expand_call_template(arg, block)
|
||||
@call_template.expand(arg.target, arg.value, block)
|
||||
end
|
||||
|
||||
def invoke_before(arg)
|
||||
@before.each { |b| b.call(arg) }
|
||||
end
|
||||
|
||||
def invoke_after(arg)
|
||||
@after.each { |a| a.call(arg) }
|
||||
end
|
||||
end
|
||||
|
||||
class CallbackChain #:nodoc:#
|
||||
include Enumerable
|
||||
|
||||
attr_reader :name, :config
|
||||
|
||||
def initialize(name, config)
|
||||
@name = name
|
||||
@config = {
|
||||
scope: [:kind],
|
||||
terminator: default_terminator
|
||||
}.merge!(config)
|
||||
@chain = []
|
||||
@callbacks = nil
|
||||
@mutex = Mutex.new
|
||||
end
|
||||
|
||||
def each(&block); @chain.each(&block); end
|
||||
def index(o); @chain.index(o); end
|
||||
def empty?; @chain.empty?; end
|
||||
|
||||
def insert(index, o)
|
||||
@callbacks = nil
|
||||
@chain.insert(index, o)
|
||||
end
|
||||
|
||||
def delete(o)
|
||||
@callbacks = nil
|
||||
@chain.delete(o)
|
||||
end
|
||||
|
||||
def clear
|
||||
@callbacks = nil
|
||||
@chain.clear
|
||||
self
|
||||
end
|
||||
|
||||
def initialize_copy(other)
|
||||
@callbacks = nil
|
||||
@chain = other.chain.dup
|
||||
@mutex = Mutex.new
|
||||
end
|
||||
|
||||
def compile
|
||||
@callbacks || @mutex.synchronize do
|
||||
final_sequence = CallbackSequence.new
|
||||
@callbacks ||= @chain.reverse.inject(final_sequence) do |callback_sequence, callback|
|
||||
callback.apply callback_sequence
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def append(*callbacks)
|
||||
callbacks.each { |c| append_one(c) }
|
||||
end
|
||||
|
||||
def prepend(*callbacks)
|
||||
callbacks.each { |c| prepend_one(c) }
|
||||
end
|
||||
|
||||
protected
|
||||
def chain; @chain; end
|
||||
|
||||
private
|
||||
|
||||
def append_one(callback)
|
||||
@callbacks = nil
|
||||
remove_duplicates(callback)
|
||||
@chain.push(callback)
|
||||
end
|
||||
|
||||
def prepend_one(callback)
|
||||
@callbacks = nil
|
||||
remove_duplicates(callback)
|
||||
@chain.unshift(callback)
|
||||
end
|
||||
|
||||
def remove_duplicates(callback)
|
||||
@callbacks = nil
|
||||
@chain.delete_if { |c| callback.duplicates?(c) }
|
||||
end
|
||||
|
||||
def default_terminator
|
||||
Proc.new do |target, result_lambda|
|
||||
terminate = true
|
||||
catch(:abort) do
|
||||
result_lambda.call
|
||||
terminate = false
|
||||
end
|
||||
terminate
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
module ClassMethods
|
||||
def normalize_callback_params(filters, block) # :nodoc:
|
||||
type = CALLBACK_FILTER_TYPES.include?(filters.first) ? filters.shift : :before
|
||||
options = filters.extract_options!
|
||||
filters.unshift(block) if block
|
||||
[type, filters, options.dup]
|
||||
end
|
||||
|
||||
# This is used internally to append, prepend and skip callbacks to the
|
||||
# CallbackChain.
|
||||
def __update_callbacks(name) #:nodoc:
|
||||
([self] + ActiveSupport::DescendantsTracker.descendants(self)).reverse_each do |target|
|
||||
chain = target.get_callbacks name
|
||||
yield target, chain.dup
|
||||
end
|
||||
end
|
||||
|
||||
# Install a callback for the given event.
|
||||
#
|
||||
# set_callback :save, :before, :before_method
|
||||
# set_callback :save, :after, :after_method, if: :condition
|
||||
# set_callback :save, :around, ->(r, block) { stuff; result = block.call; stuff }
|
||||
#
|
||||
# The second argument indicates whether the callback is to be run +:before+,
|
||||
# +:after+, or +:around+ the event. If omitted, +:before+ is assumed. This
|
||||
# means the first example above can also be written as:
|
||||
#
|
||||
# set_callback :save, :before_method
|
||||
#
|
||||
# The callback can be specified as a symbol naming an instance method; as a
|
||||
# proc, lambda, or block; or as an object that responds to a certain method
|
||||
# determined by the <tt>:scope</tt> argument to +define_callbacks+.
|
||||
#
|
||||
# If a proc, lambda, or block is given, its body is evaluated in the context
|
||||
# of the current object. It can also optionally accept the current object as
|
||||
# an argument.
|
||||
#
|
||||
# Before and around callbacks are called in the order that they are set;
|
||||
# after callbacks are called in the reverse order.
|
||||
#
|
||||
# Around callbacks can access the return value from the event, if it
|
||||
# wasn't halted, from the +yield+ call.
|
||||
#
|
||||
# ===== Options
|
||||
#
|
||||
# * <tt>:if</tt> - A symbol or an array of symbols, each naming an instance
|
||||
# method or a proc; the callback will be called only when they all return
|
||||
# a true value.
|
||||
# * <tt>:unless</tt> - A symbol or an array of symbols, each naming an
|
||||
# instance method or a proc; the callback will be called only when they
|
||||
# all return a false value.
|
||||
# * <tt>:prepend</tt> - If +true+, the callback will be prepended to the
|
||||
# existing chain rather than appended.
|
||||
def set_callback(name, *filter_list, &block)
|
||||
type, filters, options = normalize_callback_params(filter_list, block)
|
||||
|
||||
self_chain = get_callbacks name
|
||||
mapped = filters.map do |filter|
|
||||
Callback.build(self_chain, filter, type, options)
|
||||
end
|
||||
|
||||
__update_callbacks(name) do |target, chain|
|
||||
options[:prepend] ? chain.prepend(*mapped) : chain.append(*mapped)
|
||||
target.set_callbacks name, chain
|
||||
end
|
||||
end
|
||||
|
||||
# Skip a previously set callback. Like +set_callback+, <tt>:if</tt> or
|
||||
# <tt>:unless</tt> options may be passed in order to control when the
|
||||
# callback is skipped.
|
||||
#
|
||||
# class Writer < Person
|
||||
# skip_callback :validate, :before, :check_membership, if: -> { age > 18 }
|
||||
# end
|
||||
#
|
||||
# An <tt>ArgumentError</tt> will be raised if the callback has not
|
||||
# already been set (unless the <tt>:raise</tt> option is set to <tt>false</tt>).
|
||||
def skip_callback(name, *filter_list, &block)
|
||||
type, filters, options = normalize_callback_params(filter_list, block)
|
||||
|
||||
options[:raise] = true unless options.key?(:raise)
|
||||
|
||||
__update_callbacks(name) do |target, chain|
|
||||
filters.each do |filter|
|
||||
callback = chain.find { |c| c.matches?(type, filter) }
|
||||
|
||||
if !callback && options[:raise]
|
||||
raise ArgumentError, "#{type.to_s.capitalize} #{name} callback #{filter.inspect} has not been defined"
|
||||
end
|
||||
|
||||
if callback && (options.key?(:if) || options.key?(:unless))
|
||||
new_callback = callback.merge_conditional_options(chain, if_option: options[:if], unless_option: options[:unless])
|
||||
chain.insert(chain.index(callback), new_callback)
|
||||
end
|
||||
|
||||
chain.delete(callback)
|
||||
end
|
||||
target.set_callbacks name, chain
|
||||
end
|
||||
end
|
||||
|
||||
# Remove all set callbacks for the given event.
|
||||
def reset_callbacks(name)
|
||||
callbacks = get_callbacks name
|
||||
|
||||
ActiveSupport::DescendantsTracker.descendants(self).each do |target|
|
||||
chain = target.get_callbacks(name).dup
|
||||
callbacks.each { |c| chain.delete(c) }
|
||||
target.set_callbacks name, chain
|
||||
end
|
||||
|
||||
set_callbacks(name, callbacks.dup.clear)
|
||||
end
|
||||
|
||||
# Define sets of events in the object life cycle that support callbacks.
|
||||
#
|
||||
# define_callbacks :validate
|
||||
# define_callbacks :initialize, :save, :destroy
|
||||
#
|
||||
# ===== Options
|
||||
#
|
||||
# * <tt>:terminator</tt> - Determines when a before filter will halt the
|
||||
# callback chain, preventing following before and around callbacks from
|
||||
# being called and the event from being triggered.
|
||||
# This should be a lambda to be executed.
|
||||
# The current object and the result lambda of the callback will be provided
|
||||
# to the terminator lambda.
|
||||
#
|
||||
# define_callbacks :validate, terminator: ->(target, result_lambda) { result_lambda.call == false }
|
||||
#
|
||||
# In this example, if any before validate callbacks returns +false+,
|
||||
# any successive before and around callback is not executed.
|
||||
#
|
||||
# The default terminator halts the chain when a callback throws +:abort+.
|
||||
#
|
||||
# * <tt>:skip_after_callbacks_if_terminated</tt> - Determines if after
|
||||
# callbacks should be terminated by the <tt>:terminator</tt> option. By
|
||||
# default after callbacks are executed no matter if callback chain was
|
||||
# terminated or not. This option has no effect if <tt>:terminator</tt>
|
||||
# option is set to +nil+.
|
||||
#
|
||||
# * <tt>:scope</tt> - Indicates which methods should be executed when an
|
||||
# object is used as a callback.
|
||||
#
|
||||
# class Audit
|
||||
# def before(caller)
|
||||
# puts 'Audit: before is called'
|
||||
# end
|
||||
#
|
||||
# def before_save(caller)
|
||||
# puts 'Audit: before_save is called'
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# class Account
|
||||
# include ActiveSupport::Callbacks
|
||||
#
|
||||
# define_callbacks :save
|
||||
# set_callback :save, :before, Audit.new
|
||||
#
|
||||
# def save
|
||||
# run_callbacks :save do
|
||||
# puts 'save in main'
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# In the above case whenever you save an account the method
|
||||
# <tt>Audit#before</tt> will be called. On the other hand
|
||||
#
|
||||
# define_callbacks :save, scope: [:kind, :name]
|
||||
#
|
||||
# would trigger <tt>Audit#before_save</tt> instead. That's constructed
|
||||
# by calling <tt>#{kind}_#{name}</tt> on the given instance. In this
|
||||
# case "kind" is "before" and "name" is "save". In this context +:kind+
|
||||
# and +:name+ have special meanings: +:kind+ refers to the kind of
|
||||
# callback (before/after/around) and +:name+ refers to the method on
|
||||
# which callbacks are being defined.
|
||||
#
|
||||
# A declaration like
|
||||
#
|
||||
# define_callbacks :save, scope: [:name]
|
||||
#
|
||||
# would call <tt>Audit#save</tt>.
|
||||
#
|
||||
# ===== Notes
|
||||
#
|
||||
# +names+ passed to +define_callbacks+ must not end with
|
||||
# <tt>!</tt>, <tt>?</tt> or <tt>=</tt>.
|
||||
#
|
||||
# Calling +define_callbacks+ multiple times with the same +names+ will
|
||||
# overwrite previous callbacks registered with +set_callback+.
|
||||
def define_callbacks(*names)
|
||||
options = names.extract_options!
|
||||
|
||||
names.each do |name|
|
||||
name = name.to_sym
|
||||
|
||||
set_callbacks name, CallbackChain.new(name, options)
|
||||
|
||||
module_eval <<-RUBY, __FILE__, __LINE__ + 1
|
||||
def _run_#{name}_callbacks(&block)
|
||||
run_callbacks #{name.inspect}, &block
|
||||
end
|
||||
|
||||
def self._#{name}_callbacks
|
||||
get_callbacks(#{name.inspect})
|
||||
end
|
||||
|
||||
def self._#{name}_callbacks=(value)
|
||||
set_callbacks(#{name.inspect}, value)
|
||||
end
|
||||
|
||||
def _#{name}_callbacks
|
||||
__callbacks[#{name.inspect}]
|
||||
end
|
||||
RUBY
|
||||
end
|
||||
end
|
||||
|
||||
protected
|
||||
|
||||
def get_callbacks(name) # :nodoc:
|
||||
__callbacks[name.to_sym]
|
||||
end
|
||||
|
||||
def set_callbacks(name, callbacks) # :nodoc:
|
||||
self.__callbacks = __callbacks.merge(name.to_sym => callbacks)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,144 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module ActiveSupport
|
||||
# A typical module looks like this:
|
||||
#
|
||||
# module M
|
||||
# def self.included(base)
|
||||
# base.extend ClassMethods
|
||||
# base.class_eval do
|
||||
# scope :disabled, -> { where(disabled: true) }
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# module ClassMethods
|
||||
# ...
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# By using <tt>ActiveSupport::Concern</tt> the above module could instead be
|
||||
# written as:
|
||||
#
|
||||
# require 'active_support/concern'
|
||||
#
|
||||
# module M
|
||||
# extend ActiveSupport::Concern
|
||||
#
|
||||
# included do
|
||||
# scope :disabled, -> { where(disabled: true) }
|
||||
# end
|
||||
#
|
||||
# class_methods do
|
||||
# ...
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# Moreover, it gracefully handles module dependencies. Given a +Foo+ module
|
||||
# and a +Bar+ module which depends on the former, we would typically write the
|
||||
# following:
|
||||
#
|
||||
# module Foo
|
||||
# def self.included(base)
|
||||
# base.class_eval do
|
||||
# def self.method_injected_by_foo
|
||||
# ...
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# module Bar
|
||||
# def self.included(base)
|
||||
# base.method_injected_by_foo
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# class Host
|
||||
# include Foo # We need to include this dependency for Bar
|
||||
# include Bar # Bar is the module that Host really needs
|
||||
# end
|
||||
#
|
||||
# But why should +Host+ care about +Bar+'s dependencies, namely +Foo+? We
|
||||
# could try to hide these from +Host+ directly including +Foo+ in +Bar+:
|
||||
#
|
||||
# module Bar
|
||||
# include Foo
|
||||
# def self.included(base)
|
||||
# base.method_injected_by_foo
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# class Host
|
||||
# include Bar
|
||||
# end
|
||||
#
|
||||
# Unfortunately this won't work, since when +Foo+ is included, its <tt>base</tt>
|
||||
# is the +Bar+ module, not the +Host+ class. With <tt>ActiveSupport::Concern</tt>,
|
||||
# module dependencies are properly resolved:
|
||||
#
|
||||
# require 'active_support/concern'
|
||||
#
|
||||
# module Foo
|
||||
# extend ActiveSupport::Concern
|
||||
# included do
|
||||
# def self.method_injected_by_foo
|
||||
# ...
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# module Bar
|
||||
# extend ActiveSupport::Concern
|
||||
# include Foo
|
||||
#
|
||||
# included do
|
||||
# self.method_injected_by_foo
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# class Host
|
||||
# include Bar # It works, now Bar takes care of its dependencies
|
||||
# end
|
||||
module Concern
|
||||
class MultipleIncludedBlocks < StandardError #:nodoc:
|
||||
def initialize
|
||||
super "Cannot define multiple 'included' blocks for a Concern"
|
||||
end
|
||||
end
|
||||
|
||||
def self.extended(base) #:nodoc:
|
||||
base.instance_variable_set(:@_dependencies, [])
|
||||
end
|
||||
|
||||
def append_features(base)
|
||||
if base.instance_variable_defined?(:@_dependencies)
|
||||
base.instance_variable_get(:@_dependencies) << self
|
||||
false
|
||||
else
|
||||
return false if base < self
|
||||
@_dependencies.each { |dep| base.include(dep) }
|
||||
super
|
||||
base.extend const_get(:ClassMethods) if const_defined?(:ClassMethods)
|
||||
base.class_eval(&@_included_block) if instance_variable_defined?(:@_included_block)
|
||||
end
|
||||
end
|
||||
|
||||
def included(base = nil, &block)
|
||||
if base.nil?
|
||||
raise MultipleIncludedBlocks if instance_variable_defined?(:@_included_block)
|
||||
|
||||
@_included_block = block
|
||||
else
|
||||
super
|
||||
end
|
||||
end
|
||||
|
||||
def class_methods(&class_methods_module_definition)
|
||||
mod = const_defined?(:ClassMethods, false) ?
|
||||
const_get(:ClassMethods) :
|
||||
const_set(:ClassMethods, Module.new)
|
||||
|
||||
mod.module_eval(&class_methods_module_definition)
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,150 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/concern"
|
||||
require "active_support/ordered_options"
|
||||
require "active_support/core_ext/array/extract_options"
|
||||
require "active_support/core_ext/regexp"
|
||||
|
||||
module ActiveSupport
|
||||
# Configurable provides a <tt>config</tt> method to store and retrieve
|
||||
# configuration options as an <tt>OrderedHash</tt>.
|
||||
module Configurable
|
||||
extend ActiveSupport::Concern
|
||||
|
||||
class Configuration < ActiveSupport::InheritableOptions
|
||||
def compile_methods!
|
||||
self.class.compile_methods!(keys)
|
||||
end
|
||||
|
||||
# Compiles reader methods so we don't have to go through method_missing.
|
||||
def self.compile_methods!(keys)
|
||||
keys.reject { |m| method_defined?(m) }.each do |key|
|
||||
class_eval <<-RUBY, __FILE__, __LINE__ + 1
|
||||
def #{key}; _get(#{key.inspect}); end
|
||||
RUBY
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
module ClassMethods
|
||||
def config
|
||||
@_config ||= if respond_to?(:superclass) && superclass.respond_to?(:config)
|
||||
superclass.config.inheritable_copy
|
||||
else
|
||||
# create a new "anonymous" class that will host the compiled reader methods
|
||||
Class.new(Configuration).new
|
||||
end
|
||||
end
|
||||
|
||||
def configure
|
||||
yield config
|
||||
end
|
||||
|
||||
# Allows you to add shortcut so that you don't have to refer to attribute
|
||||
# through config. Also look at the example for config to contrast.
|
||||
#
|
||||
# Defines both class and instance config accessors.
|
||||
#
|
||||
# class User
|
||||
# include ActiveSupport::Configurable
|
||||
# config_accessor :allowed_access
|
||||
# end
|
||||
#
|
||||
# User.allowed_access # => nil
|
||||
# User.allowed_access = false
|
||||
# User.allowed_access # => false
|
||||
#
|
||||
# user = User.new
|
||||
# user.allowed_access # => false
|
||||
# user.allowed_access = true
|
||||
# user.allowed_access # => true
|
||||
#
|
||||
# User.allowed_access # => false
|
||||
#
|
||||
# The attribute name must be a valid method name in Ruby.
|
||||
#
|
||||
# class User
|
||||
# include ActiveSupport::Configurable
|
||||
# config_accessor :"1_Badname"
|
||||
# end
|
||||
# # => NameError: invalid config attribute name
|
||||
#
|
||||
# To opt out of the instance writer method, pass <tt>instance_writer: false</tt>.
|
||||
# To opt out of the instance reader method, pass <tt>instance_reader: false</tt>.
|
||||
#
|
||||
# class User
|
||||
# include ActiveSupport::Configurable
|
||||
# config_accessor :allowed_access, instance_reader: false, instance_writer: false
|
||||
# end
|
||||
#
|
||||
# User.allowed_access = false
|
||||
# User.allowed_access # => false
|
||||
#
|
||||
# User.new.allowed_access = true # => NoMethodError
|
||||
# User.new.allowed_access # => NoMethodError
|
||||
#
|
||||
# Or pass <tt>instance_accessor: false</tt>, to opt out both instance methods.
|
||||
#
|
||||
# class User
|
||||
# include ActiveSupport::Configurable
|
||||
# config_accessor :allowed_access, instance_accessor: false
|
||||
# end
|
||||
#
|
||||
# User.allowed_access = false
|
||||
# User.allowed_access # => false
|
||||
#
|
||||
# User.new.allowed_access = true # => NoMethodError
|
||||
# User.new.allowed_access # => NoMethodError
|
||||
#
|
||||
# Also you can pass a block to set up the attribute with a default value.
|
||||
#
|
||||
# class User
|
||||
# include ActiveSupport::Configurable
|
||||
# config_accessor :hair_colors do
|
||||
# [:brown, :black, :blonde, :red]
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# User.hair_colors # => [:brown, :black, :blonde, :red]
|
||||
def config_accessor(*names)
|
||||
options = names.extract_options!
|
||||
|
||||
names.each do |name|
|
||||
raise NameError.new("invalid config attribute name") unless /\A[_A-Za-z]\w*\z/.match?(name)
|
||||
|
||||
reader, reader_line = "def #{name}; config.#{name}; end", __LINE__
|
||||
writer, writer_line = "def #{name}=(value); config.#{name} = value; end", __LINE__
|
||||
|
||||
singleton_class.class_eval reader, __FILE__, reader_line
|
||||
singleton_class.class_eval writer, __FILE__, writer_line
|
||||
|
||||
unless options[:instance_accessor] == false
|
||||
class_eval reader, __FILE__, reader_line unless options[:instance_reader] == false
|
||||
class_eval writer, __FILE__, writer_line unless options[:instance_writer] == false
|
||||
end
|
||||
send("#{name}=", yield) if block_given?
|
||||
end
|
||||
end
|
||||
private :config_accessor
|
||||
end
|
||||
|
||||
# Reads and writes attributes from a configuration <tt>OrderedHash</tt>.
|
||||
#
|
||||
# require 'active_support/configurable'
|
||||
#
|
||||
# class User
|
||||
# include ActiveSupport::Configurable
|
||||
# end
|
||||
#
|
||||
# user = User.new
|
||||
#
|
||||
# user.config.allowed_access = true
|
||||
# user.config.level = 1
|
||||
#
|
||||
# user.config.allowed_access # => true
|
||||
# user.config.level # => 1
|
||||
def config
|
||||
@_config ||= self.class.config.inheritable_copy
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,5 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
Dir.glob(File.expand_path("core_ext/*.rb", __dir__)).each do |path|
|
||||
require path
|
||||
end
|
@ -0,0 +1,195 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module ActiveSupport
|
||||
# Abstract super class that provides a thread-isolated attributes singleton, which resets automatically
|
||||
# before and after each request. This allows you to keep all the per-request attributes easily
|
||||
# available to the whole system.
|
||||
#
|
||||
# The following full app-like example demonstrates how to use a Current class to
|
||||
# facilitate easy access to the global, per-request attributes without passing them deeply
|
||||
# around everywhere:
|
||||
#
|
||||
# # app/models/current.rb
|
||||
# class Current < ActiveSupport::CurrentAttributes
|
||||
# attribute :account, :user
|
||||
# attribute :request_id, :user_agent, :ip_address
|
||||
#
|
||||
# resets { Time.zone = nil }
|
||||
#
|
||||
# def user=(user)
|
||||
# super
|
||||
# self.account = user.account
|
||||
# Time.zone = user.time_zone
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# # app/controllers/concerns/authentication.rb
|
||||
# module Authentication
|
||||
# extend ActiveSupport::Concern
|
||||
#
|
||||
# included do
|
||||
# before_action :authenticate
|
||||
# end
|
||||
#
|
||||
# private
|
||||
# def authenticate
|
||||
# if authenticated_user = User.find_by(id: cookies.encrypted[:user_id])
|
||||
# Current.user = authenticated_user
|
||||
# else
|
||||
# redirect_to new_session_url
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# # app/controllers/concerns/set_current_request_details.rb
|
||||
# module SetCurrentRequestDetails
|
||||
# extend ActiveSupport::Concern
|
||||
#
|
||||
# included do
|
||||
# before_action do
|
||||
# Current.request_id = request.uuid
|
||||
# Current.user_agent = request.user_agent
|
||||
# Current.ip_address = request.ip
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# class ApplicationController < ActionController::Base
|
||||
# include Authentication
|
||||
# include SetCurrentRequestDetails
|
||||
# end
|
||||
#
|
||||
# class MessagesController < ApplicationController
|
||||
# def create
|
||||
# Current.account.messages.create(message_params)
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# class Message < ApplicationRecord
|
||||
# belongs_to :creator, default: -> { Current.user }
|
||||
# after_create { |message| Event.create(record: message) }
|
||||
# end
|
||||
#
|
||||
# class Event < ApplicationRecord
|
||||
# before_create do
|
||||
# self.request_id = Current.request_id
|
||||
# self.user_agent = Current.user_agent
|
||||
# self.ip_address = Current.ip_address
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# A word of caution: It's easy to overdo a global singleton like Current and tangle your model as a result.
|
||||
# Current should only be used for a few, top-level globals, like account, user, and request details.
|
||||
# The attributes stuck in Current should be used by more or less all actions on all requests. If you start
|
||||
# sticking controller-specific attributes in there, you're going to create a mess.
|
||||
class CurrentAttributes
|
||||
include ActiveSupport::Callbacks
|
||||
define_callbacks :reset
|
||||
|
||||
class << self
|
||||
# Returns singleton instance for this class in this thread. If none exists, one is created.
|
||||
def instance
|
||||
current_instances[name] ||= new
|
||||
end
|
||||
|
||||
# Declares one or more attributes that will be given both class and instance accessor methods.
|
||||
def attribute(*names)
|
||||
generated_attribute_methods.module_eval do
|
||||
names.each do |name|
|
||||
define_method(name) do
|
||||
attributes[name.to_sym]
|
||||
end
|
||||
|
||||
define_method("#{name}=") do |attribute|
|
||||
attributes[name.to_sym] = attribute
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
names.each do |name|
|
||||
define_singleton_method(name) do
|
||||
instance.public_send(name)
|
||||
end
|
||||
|
||||
define_singleton_method("#{name}=") do |attribute|
|
||||
instance.public_send("#{name}=", attribute)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Calls this block after #reset is called on the instance. Used for resetting external collaborators, like Time.zone.
|
||||
def resets(&block)
|
||||
set_callback :reset, :after, &block
|
||||
end
|
||||
|
||||
delegate :set, :reset, to: :instance
|
||||
|
||||
def reset_all # :nodoc:
|
||||
current_instances.each_value(&:reset)
|
||||
end
|
||||
|
||||
def clear_all # :nodoc:
|
||||
reset_all
|
||||
current_instances.clear
|
||||
end
|
||||
|
||||
private
|
||||
def generated_attribute_methods
|
||||
@generated_attribute_methods ||= Module.new.tap { |mod| include mod }
|
||||
end
|
||||
|
||||
def current_instances
|
||||
Thread.current[:current_attributes_instances] ||= {}
|
||||
end
|
||||
|
||||
def method_missing(name, *args, &block)
|
||||
# Caches the method definition as a singleton method of the receiver.
|
||||
#
|
||||
# By letting #delegate handle it, we avoid an enclosure that'll capture args.
|
||||
singleton_class.delegate name, to: :instance
|
||||
|
||||
send(name, *args, &block)
|
||||
end
|
||||
end
|
||||
|
||||
attr_accessor :attributes
|
||||
|
||||
def initialize
|
||||
@attributes = {}
|
||||
end
|
||||
|
||||
# Expose one or more attributes within a block. Old values are returned after the block concludes.
|
||||
# Example demonstrating the common use of needing to set Current attributes outside the request-cycle:
|
||||
#
|
||||
# class Chat::PublicationJob < ApplicationJob
|
||||
# def perform(attributes, room_number, creator)
|
||||
# Current.set(person: creator) do
|
||||
# Chat::Publisher.publish(attributes: attributes, room_number: room_number)
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
def set(set_attributes)
|
||||
old_attributes = compute_attributes(set_attributes.keys)
|
||||
assign_attributes(set_attributes)
|
||||
yield
|
||||
ensure
|
||||
assign_attributes(old_attributes)
|
||||
end
|
||||
|
||||
# Reset all attributes. Should be called before and after actions, when used as a per-request singleton.
|
||||
def reset
|
||||
run_callbacks :reset do
|
||||
self.attributes = {}
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
def assign_attributes(new_attributes)
|
||||
new_attributes.each { |key, value| public_send("#{key}=", value) }
|
||||
end
|
||||
|
||||
def compute_attributes(keys)
|
||||
keys.collect { |key| [ key, public_send(key) ] }.to_h
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,62 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module ActiveSupport
|
||||
# This module provides an internal implementation to track descendants
|
||||
# which is faster than iterating through ObjectSpace.
|
||||
module DescendantsTracker
|
||||
@@direct_descendants = {}
|
||||
|
||||
class << self
|
||||
def direct_descendants(klass)
|
||||
@@direct_descendants[klass] || []
|
||||
end
|
||||
|
||||
def descendants(klass)
|
||||
arr = []
|
||||
accumulate_descendants(klass, arr)
|
||||
arr
|
||||
end
|
||||
|
||||
def clear
|
||||
if defined? ActiveSupport::Dependencies
|
||||
@@direct_descendants.each do |klass, descendants|
|
||||
if ActiveSupport::Dependencies.autoloaded?(klass)
|
||||
@@direct_descendants.delete(klass)
|
||||
else
|
||||
descendants.reject! { |v| ActiveSupport::Dependencies.autoloaded?(v) }
|
||||
end
|
||||
end
|
||||
else
|
||||
@@direct_descendants.clear
|
||||
end
|
||||
end
|
||||
|
||||
# This is the only method that is not thread safe, but is only ever called
|
||||
# during the eager loading phase.
|
||||
def store_inherited(klass, descendant)
|
||||
(@@direct_descendants[klass] ||= []) << descendant
|
||||
end
|
||||
|
||||
private
|
||||
def accumulate_descendants(klass, acc)
|
||||
if direct_descendants = @@direct_descendants[klass]
|
||||
acc.concat(direct_descendants)
|
||||
direct_descendants.each { |direct_descendant| accumulate_descendants(direct_descendant, acc) }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def inherited(base)
|
||||
DescendantsTracker.store_inherited(self, base)
|
||||
super
|
||||
end
|
||||
|
||||
def direct_descendants
|
||||
DescendantsTracker.direct_descendants(self)
|
||||
end
|
||||
|
||||
def descendants
|
||||
DescendantsTracker.descendants(self)
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,20 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module ActiveSupport
|
||||
class Digest #:nodoc:
|
||||
class <<self
|
||||
def hash_digest_class
|
||||
@hash_digest_class ||= ::Digest::MD5
|
||||
end
|
||||
|
||||
def hash_digest_class=(klass)
|
||||
raise ArgumentError, "#{klass} is expected to implement hexdigest class method" unless klass.respond_to?(:hexdigest)
|
||||
@hash_digest_class = klass
|
||||
end
|
||||
|
||||
def hexdigest(arg)
|
||||
hash_digest_class.hexdigest(arg)[0...32]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,49 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "yaml"
|
||||
require "active_support/encrypted_file"
|
||||
require "active_support/ordered_options"
|
||||
require "active_support/core_ext/object/inclusion"
|
||||
require "active_support/core_ext/module/delegation"
|
||||
|
||||
module ActiveSupport
|
||||
class EncryptedConfiguration < EncryptedFile
|
||||
delegate :[], :fetch, to: :config
|
||||
delegate_missing_to :options
|
||||
|
||||
def initialize(config_path:, key_path:, env_key:, raise_if_missing_key:)
|
||||
super content_path: config_path, key_path: key_path,
|
||||
env_key: env_key, raise_if_missing_key: raise_if_missing_key
|
||||
end
|
||||
|
||||
# Allow a config to be started without a file present
|
||||
def read
|
||||
super
|
||||
rescue ActiveSupport::EncryptedFile::MissingContentError
|
||||
""
|
||||
end
|
||||
|
||||
def write(contents)
|
||||
deserialize(contents)
|
||||
|
||||
super
|
||||
end
|
||||
|
||||
def config
|
||||
@config ||= deserialize(read).deep_symbolize_keys
|
||||
end
|
||||
|
||||
private
|
||||
def options
|
||||
@options ||= ActiveSupport::InheritableOptions.new(config)
|
||||
end
|
||||
|
||||
def serialize(config)
|
||||
config.present? ? YAML.dump(config) : ""
|
||||
end
|
||||
|
||||
def deserialize(config)
|
||||
YAML.load(config).presence || {}
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,99 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "pathname"
|
||||
require "active_support/message_encryptor"
|
||||
|
||||
module ActiveSupport
|
||||
class EncryptedFile
|
||||
class MissingContentError < RuntimeError
|
||||
def initialize(content_path)
|
||||
super "Missing encrypted content file in #{content_path}."
|
||||
end
|
||||
end
|
||||
|
||||
class MissingKeyError < RuntimeError
|
||||
def initialize(key_path:, env_key:)
|
||||
super \
|
||||
"Missing encryption key to decrypt file with. " +
|
||||
"Ask your team for your master key and write it to #{key_path} or put it in the ENV['#{env_key}']."
|
||||
end
|
||||
end
|
||||
|
||||
CIPHER = "aes-128-gcm"
|
||||
|
||||
def self.generate_key
|
||||
SecureRandom.hex(ActiveSupport::MessageEncryptor.key_len(CIPHER))
|
||||
end
|
||||
|
||||
|
||||
attr_reader :content_path, :key_path, :env_key, :raise_if_missing_key
|
||||
|
||||
def initialize(content_path:, key_path:, env_key:, raise_if_missing_key:)
|
||||
@content_path, @key_path = Pathname.new(content_path), Pathname.new(key_path)
|
||||
@env_key, @raise_if_missing_key = env_key, raise_if_missing_key
|
||||
end
|
||||
|
||||
def key
|
||||
read_env_key || read_key_file || handle_missing_key
|
||||
end
|
||||
|
||||
def read
|
||||
if !key.nil? && content_path.exist?
|
||||
decrypt content_path.binread
|
||||
else
|
||||
raise MissingContentError, content_path
|
||||
end
|
||||
end
|
||||
|
||||
def write(contents)
|
||||
IO.binwrite "#{content_path}.tmp", encrypt(contents)
|
||||
FileUtils.mv "#{content_path}.tmp", content_path
|
||||
end
|
||||
|
||||
def change(&block)
|
||||
writing read, &block
|
||||
end
|
||||
|
||||
|
||||
private
|
||||
def writing(contents)
|
||||
tmp_file = "#{Process.pid}.#{content_path.basename.to_s.chomp('.enc')}"
|
||||
tmp_path = Pathname.new File.join(Dir.tmpdir, tmp_file)
|
||||
tmp_path.binwrite contents
|
||||
|
||||
yield tmp_path
|
||||
|
||||
updated_contents = tmp_path.binread
|
||||
|
||||
write(updated_contents) if updated_contents != contents
|
||||
ensure
|
||||
FileUtils.rm(tmp_path) if tmp_path.exist?
|
||||
end
|
||||
|
||||
|
||||
def encrypt(contents)
|
||||
encryptor.encrypt_and_sign contents
|
||||
end
|
||||
|
||||
def decrypt(contents)
|
||||
encryptor.decrypt_and_verify contents
|
||||
end
|
||||
|
||||
def encryptor
|
||||
@encryptor ||= ActiveSupport::MessageEncryptor.new([ key ].pack("H*"), cipher: CIPHER)
|
||||
end
|
||||
|
||||
|
||||
def read_env_key
|
||||
ENV[env_key]
|
||||
end
|
||||
|
||||
def read_key_file
|
||||
key_path.binread.strip if key_path.exist?
|
||||
end
|
||||
|
||||
def handle_missing_key
|
||||
raise MissingKeyError, key_path: key_path, env_key: env_key if raise_if_missing_key
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,205 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "set"
|
||||
require "pathname"
|
||||
require "concurrent/atomic/atomic_boolean"
|
||||
|
||||
module ActiveSupport
|
||||
# Allows you to "listen" to changes in a file system.
|
||||
# The evented file updater does not hit disk when checking for updates
|
||||
# instead it uses platform specific file system events to trigger a change
|
||||
# in state.
|
||||
#
|
||||
# The file checker takes an array of files to watch or a hash specifying directories
|
||||
# and file extensions to watch. It also takes a block that is called when
|
||||
# EventedFileUpdateChecker#execute is run or when EventedFileUpdateChecker#execute_if_updated
|
||||
# is run and there have been changes to the file system.
|
||||
#
|
||||
# Note: Forking will cause the first call to `updated?` to return `true`.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# checker = ActiveSupport::EventedFileUpdateChecker.new(["/tmp/foo"]) { puts "changed" }
|
||||
# checker.updated?
|
||||
# # => false
|
||||
# checker.execute_if_updated
|
||||
# # => nil
|
||||
#
|
||||
# FileUtils.touch("/tmp/foo")
|
||||
#
|
||||
# checker.updated?
|
||||
# # => true
|
||||
# checker.execute_if_updated
|
||||
# # => "changed"
|
||||
#
|
||||
class EventedFileUpdateChecker #:nodoc: all
|
||||
def initialize(files, dirs = {}, &block)
|
||||
unless block
|
||||
raise ArgumentError, "A block is required to initialize an EventedFileUpdateChecker"
|
||||
end
|
||||
|
||||
@ph = PathHelper.new
|
||||
@files = files.map { |f| @ph.xpath(f) }.to_set
|
||||
|
||||
@dirs = {}
|
||||
dirs.each do |dir, exts|
|
||||
@dirs[@ph.xpath(dir)] = Array(exts).map { |ext| @ph.normalize_extension(ext) }
|
||||
end
|
||||
|
||||
@block = block
|
||||
@updated = Concurrent::AtomicBoolean.new(false)
|
||||
@lcsp = @ph.longest_common_subpath(@dirs.keys)
|
||||
@pid = Process.pid
|
||||
@boot_mutex = Mutex.new
|
||||
|
||||
if (@dtw = directories_to_watch).any?
|
||||
# Loading listen triggers warnings. These are originated by a legit
|
||||
# usage of attr_* macros for private attributes, but adds a lot of noise
|
||||
# to our test suite. Thus, we lazy load it and disable warnings locally.
|
||||
silence_warnings do
|
||||
begin
|
||||
require "listen"
|
||||
rescue LoadError => e
|
||||
raise LoadError, "Could not load the 'listen' gem. Add `gem 'listen'` to the development group of your Gemfile", e.backtrace
|
||||
end
|
||||
end
|
||||
end
|
||||
boot!
|
||||
end
|
||||
|
||||
def updated?
|
||||
@boot_mutex.synchronize do
|
||||
if @pid != Process.pid
|
||||
boot!
|
||||
@pid = Process.pid
|
||||
@updated.make_true
|
||||
end
|
||||
end
|
||||
@updated.true?
|
||||
end
|
||||
|
||||
def execute
|
||||
@updated.make_false
|
||||
@block.call
|
||||
end
|
||||
|
||||
def execute_if_updated
|
||||
if updated?
|
||||
yield if block_given?
|
||||
execute
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
def boot!
|
||||
Listen.to(*@dtw, &method(:changed)).start
|
||||
end
|
||||
|
||||
def changed(modified, added, removed)
|
||||
unless updated?
|
||||
@updated.make_true if (modified + added + removed).any? { |f| watching?(f) }
|
||||
end
|
||||
end
|
||||
|
||||
def watching?(file)
|
||||
file = @ph.xpath(file)
|
||||
|
||||
if @files.member?(file)
|
||||
true
|
||||
elsif file.directory?
|
||||
false
|
||||
else
|
||||
ext = @ph.normalize_extension(file.extname)
|
||||
|
||||
file.dirname.ascend do |dir|
|
||||
if @dirs.fetch(dir, []).include?(ext)
|
||||
break true
|
||||
elsif dir == @lcsp || dir.root?
|
||||
break false
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def directories_to_watch
|
||||
dtw = (@files + @dirs.keys).map { |f| @ph.existing_parent(f) }
|
||||
dtw.compact!
|
||||
dtw.uniq!
|
||||
|
||||
normalized_gem_paths = Gem.path.map { |path| File.join path, "" }
|
||||
dtw = dtw.reject do |path|
|
||||
normalized_gem_paths.any? { |gem_path| path.to_s.start_with?(gem_path) }
|
||||
end
|
||||
|
||||
@ph.filter_out_descendants(dtw)
|
||||
end
|
||||
|
||||
class PathHelper
|
||||
def xpath(path)
|
||||
Pathname.new(path).expand_path
|
||||
end
|
||||
|
||||
def normalize_extension(ext)
|
||||
ext.to_s.sub(/\A\./, "")
|
||||
end
|
||||
|
||||
# Given a collection of Pathname objects returns the longest subpath
|
||||
# common to all of them, or +nil+ if there is none.
|
||||
def longest_common_subpath(paths)
|
||||
return if paths.empty?
|
||||
|
||||
lcsp = Pathname.new(paths[0])
|
||||
|
||||
paths[1..-1].each do |path|
|
||||
until ascendant_of?(lcsp, path)
|
||||
if lcsp.root?
|
||||
# If we get here a root directory is not an ascendant of path.
|
||||
# This may happen if there are paths in different drives on
|
||||
# Windows.
|
||||
return
|
||||
else
|
||||
lcsp = lcsp.parent
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
lcsp
|
||||
end
|
||||
|
||||
# Returns the deepest existing ascendant, which could be the argument itself.
|
||||
def existing_parent(dir)
|
||||
dir.ascend do |ascendant|
|
||||
break ascendant if ascendant.directory?
|
||||
end
|
||||
end
|
||||
|
||||
# Filters out directories which are descendants of others in the collection (stable).
|
||||
def filter_out_descendants(dirs)
|
||||
return dirs if dirs.length < 2
|
||||
|
||||
dirs_sorted_by_nparts = dirs.sort_by { |dir| dir.each_filename.to_a.length }
|
||||
descendants = []
|
||||
|
||||
until dirs_sorted_by_nparts.empty?
|
||||
dir = dirs_sorted_by_nparts.shift
|
||||
|
||||
dirs_sorted_by_nparts.reject! do |possible_descendant|
|
||||
ascendant_of?(dir, possible_descendant) && descendants << possible_descendant
|
||||
end
|
||||
end
|
||||
|
||||
# Array#- preserves order.
|
||||
dirs - descendants
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def ascendant_of?(base, other)
|
||||
base != other && other.ascend do |ascendant|
|
||||
break true if base == ascendant
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,128 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/callbacks"
|
||||
|
||||
module ActiveSupport
|
||||
class ExecutionWrapper
|
||||
include ActiveSupport::Callbacks
|
||||
|
||||
Null = Object.new # :nodoc:
|
||||
def Null.complete! # :nodoc:
|
||||
end
|
||||
|
||||
define_callbacks :run
|
||||
define_callbacks :complete
|
||||
|
||||
def self.to_run(*args, &block)
|
||||
set_callback(:run, *args, &block)
|
||||
end
|
||||
|
||||
def self.to_complete(*args, &block)
|
||||
set_callback(:complete, *args, &block)
|
||||
end
|
||||
|
||||
RunHook = Struct.new(:hook) do # :nodoc:
|
||||
def before(target)
|
||||
hook_state = target.send(:hook_state)
|
||||
hook_state[hook] = hook.run
|
||||
end
|
||||
end
|
||||
|
||||
CompleteHook = Struct.new(:hook) do # :nodoc:
|
||||
def before(target)
|
||||
hook_state = target.send(:hook_state)
|
||||
if hook_state.key?(hook)
|
||||
hook.complete hook_state[hook]
|
||||
end
|
||||
end
|
||||
alias after before
|
||||
end
|
||||
|
||||
# Register an object to be invoked during both the +run+ and
|
||||
# +complete+ steps.
|
||||
#
|
||||
# +hook.complete+ will be passed the value returned from +hook.run+,
|
||||
# and will only be invoked if +run+ has previously been called.
|
||||
# (Mostly, this means it won't be invoked if an exception occurs in
|
||||
# a preceding +to_run+ block; all ordinary +to_complete+ blocks are
|
||||
# invoked in that situation.)
|
||||
def self.register_hook(hook, outer: false)
|
||||
if outer
|
||||
to_run RunHook.new(hook), prepend: true
|
||||
to_complete :after, CompleteHook.new(hook)
|
||||
else
|
||||
to_run RunHook.new(hook)
|
||||
to_complete CompleteHook.new(hook)
|
||||
end
|
||||
end
|
||||
|
||||
# Run this execution.
|
||||
#
|
||||
# Returns an instance, whose +complete!+ method *must* be invoked
|
||||
# after the work has been performed.
|
||||
#
|
||||
# Where possible, prefer +wrap+.
|
||||
def self.run!
|
||||
if active?
|
||||
Null
|
||||
else
|
||||
new.tap do |instance|
|
||||
success = nil
|
||||
begin
|
||||
instance.run!
|
||||
success = true
|
||||
ensure
|
||||
instance.complete! unless success
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Perform the work in the supplied block as an execution.
|
||||
def self.wrap
|
||||
return yield if active?
|
||||
|
||||
instance = run!
|
||||
begin
|
||||
yield
|
||||
ensure
|
||||
instance.complete!
|
||||
end
|
||||
end
|
||||
|
||||
class << self # :nodoc:
|
||||
attr_accessor :active
|
||||
end
|
||||
|
||||
def self.inherited(other) # :nodoc:
|
||||
super
|
||||
other.active = Concurrent::Hash.new
|
||||
end
|
||||
|
||||
self.active = Concurrent::Hash.new
|
||||
|
||||
def self.active? # :nodoc:
|
||||
@active[Thread.current]
|
||||
end
|
||||
|
||||
def run! # :nodoc:
|
||||
self.class.active[Thread.current] = true
|
||||
run_callbacks(:run)
|
||||
end
|
||||
|
||||
# Complete this in-flight execution. This method *must* be called
|
||||
# exactly once on the result of any call to +run!+.
|
||||
#
|
||||
# Where possible, prefer +wrap+.
|
||||
def complete!
|
||||
run_callbacks(:complete)
|
||||
ensure
|
||||
self.class.active.delete Thread.current
|
||||
end
|
||||
|
||||
private
|
||||
def hook_state
|
||||
@_hook_state ||= {}
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,8 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/execution_wrapper"
|
||||
|
||||
module ActiveSupport
|
||||
class Executor < ExecutionWrapper
|
||||
end
|
||||
end
|
@ -0,0 +1,163 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/core_ext/time/calculations"
|
||||
|
||||
module ActiveSupport
|
||||
# FileUpdateChecker specifies the API used by Rails to watch files
|
||||
# and control reloading. The API depends on four methods:
|
||||
#
|
||||
# * +initialize+ which expects two parameters and one block as
|
||||
# described below.
|
||||
#
|
||||
# * +updated?+ which returns a boolean if there were updates in
|
||||
# the filesystem or not.
|
||||
#
|
||||
# * +execute+ which executes the given block on initialization
|
||||
# and updates the latest watched files and timestamp.
|
||||
#
|
||||
# * +execute_if_updated+ which just executes the block if it was updated.
|
||||
#
|
||||
# After initialization, a call to +execute_if_updated+ must execute
|
||||
# the block only if there was really a change in the filesystem.
|
||||
#
|
||||
# This class is used by Rails to reload the I18n framework whenever
|
||||
# they are changed upon a new request.
|
||||
#
|
||||
# i18n_reloader = ActiveSupport::FileUpdateChecker.new(paths) do
|
||||
# I18n.reload!
|
||||
# end
|
||||
#
|
||||
# ActiveSupport::Reloader.to_prepare do
|
||||
# i18n_reloader.execute_if_updated
|
||||
# end
|
||||
class FileUpdateChecker
|
||||
# It accepts two parameters on initialization. The first is an array
|
||||
# of files and the second is an optional hash of directories. The hash must
|
||||
# have directories as keys and the value is an array of extensions to be
|
||||
# watched under that directory.
|
||||
#
|
||||
# This method must also receive a block that will be called once a path
|
||||
# changes. The array of files and list of directories cannot be changed
|
||||
# after FileUpdateChecker has been initialized.
|
||||
def initialize(files, dirs = {}, &block)
|
||||
unless block
|
||||
raise ArgumentError, "A block is required to initialize a FileUpdateChecker"
|
||||
end
|
||||
|
||||
@files = files.freeze
|
||||
@glob = compile_glob(dirs)
|
||||
@block = block
|
||||
|
||||
@watched = nil
|
||||
@updated_at = nil
|
||||
|
||||
@last_watched = watched
|
||||
@last_update_at = updated_at(@last_watched)
|
||||
end
|
||||
|
||||
# Check if any of the entries were updated. If so, the watched and/or
|
||||
# updated_at values are cached until the block is executed via +execute+
|
||||
# or +execute_if_updated+.
|
||||
def updated?
|
||||
current_watched = watched
|
||||
if @last_watched.size != current_watched.size
|
||||
@watched = current_watched
|
||||
true
|
||||
else
|
||||
current_updated_at = updated_at(current_watched)
|
||||
if @last_update_at < current_updated_at
|
||||
@watched = current_watched
|
||||
@updated_at = current_updated_at
|
||||
true
|
||||
else
|
||||
false
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Executes the given block and updates the latest watched files and
|
||||
# timestamp.
|
||||
def execute
|
||||
@last_watched = watched
|
||||
@last_update_at = updated_at(@last_watched)
|
||||
@block.call
|
||||
ensure
|
||||
@watched = nil
|
||||
@updated_at = nil
|
||||
end
|
||||
|
||||
# Execute the block given if updated.
|
||||
def execute_if_updated
|
||||
if updated?
|
||||
yield if block_given?
|
||||
execute
|
||||
true
|
||||
else
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def watched
|
||||
@watched || begin
|
||||
all = @files.select { |f| File.exist?(f) }
|
||||
all.concat(Dir[@glob]) if @glob
|
||||
all
|
||||
end
|
||||
end
|
||||
|
||||
def updated_at(paths)
|
||||
@updated_at || max_mtime(paths) || Time.at(0)
|
||||
end
|
||||
|
||||
# This method returns the maximum mtime of the files in +paths+, or +nil+
|
||||
# if the array is empty.
|
||||
#
|
||||
# Files with a mtime in the future are ignored. Such abnormal situation
|
||||
# can happen for example if the user changes the clock by hand. It is
|
||||
# healthy to consider this edge case because with mtimes in the future
|
||||
# reloading is not triggered.
|
||||
def max_mtime(paths)
|
||||
time_now = Time.now
|
||||
max_mtime = nil
|
||||
|
||||
# Time comparisons are performed with #compare_without_coercion because
|
||||
# AS redefines these operators in a way that is much slower and does not
|
||||
# bring any benefit in this particular code.
|
||||
#
|
||||
# Read t1.compare_without_coercion(t2) < 0 as t1 < t2.
|
||||
paths.each do |path|
|
||||
mtime = File.mtime(path)
|
||||
|
||||
next if time_now.compare_without_coercion(mtime) < 0
|
||||
|
||||
if max_mtime.nil? || max_mtime.compare_without_coercion(mtime) < 0
|
||||
max_mtime = mtime
|
||||
end
|
||||
end
|
||||
|
||||
max_mtime
|
||||
end
|
||||
|
||||
def compile_glob(hash)
|
||||
hash.freeze # Freeze so changes aren't accidentally pushed
|
||||
return if hash.empty?
|
||||
|
||||
globs = hash.map do |key, value|
|
||||
"#{escape(key)}/**/*#{compile_ext(value)}"
|
||||
end
|
||||
"{#{globs.join(",")}}"
|
||||
end
|
||||
|
||||
def escape(key)
|
||||
key.gsub(",", '\,')
|
||||
end
|
||||
|
||||
def compile_ext(array)
|
||||
array = Array(array)
|
||||
return if array.empty?
|
||||
".{#{array.join(",")}}"
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,17 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module ActiveSupport
|
||||
# Returns the version of the currently loaded Active Support as a <tt>Gem::Version</tt>.
|
||||
def self.gem_version
|
||||
Gem::Version.new VERSION::STRING
|
||||
end
|
||||
|
||||
module VERSION
|
||||
MAJOR = 5
|
||||
MINOR = 2
|
||||
TINY = 2
|
||||
PRE = nil
|
||||
|
||||
STRING = [MAJOR, MINOR, TINY, PRE].compact.join(".")
|
||||
end
|
||||
end
|
@ -0,0 +1,38 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "zlib"
|
||||
require "stringio"
|
||||
|
||||
module ActiveSupport
|
||||
# A convenient wrapper for the zlib standard library that allows
|
||||
# compression/decompression of strings with gzip.
|
||||
#
|
||||
# gzip = ActiveSupport::Gzip.compress('compress me!')
|
||||
# # => "\x1F\x8B\b\x00o\x8D\xCDO\x00\x03K\xCE\xCF-(J-.V\xC8MU\x04\x00R>n\x83\f\x00\x00\x00"
|
||||
#
|
||||
# ActiveSupport::Gzip.decompress(gzip)
|
||||
# # => "compress me!"
|
||||
module Gzip
|
||||
class Stream < StringIO
|
||||
def initialize(*)
|
||||
super
|
||||
set_encoding "BINARY"
|
||||
end
|
||||
def close; rewind; end
|
||||
end
|
||||
|
||||
# Decompresses a gzipped string.
|
||||
def self.decompress(source)
|
||||
Zlib::GzipReader.wrap(StringIO.new(source), &:read)
|
||||
end
|
||||
|
||||
# Compresses a string using gzip.
|
||||
def self.compress(source, level = Zlib::DEFAULT_COMPRESSION, strategy = Zlib::DEFAULT_STRATEGY)
|
||||
output = Stream.new
|
||||
gz = Zlib::GzipWriter.new(output, level, strategy)
|
||||
gz.write(source)
|
||||
gz.close
|
||||
output.string
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,382 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/core_ext/hash/keys"
|
||||
require "active_support/core_ext/hash/reverse_merge"
|
||||
|
||||
module ActiveSupport
|
||||
# Implements a hash where keys <tt>:foo</tt> and <tt>"foo"</tt> are considered
|
||||
# to be the same.
|
||||
#
|
||||
# rgb = ActiveSupport::HashWithIndifferentAccess.new
|
||||
#
|
||||
# rgb[:black] = '#000000'
|
||||
# rgb[:black] # => '#000000'
|
||||
# rgb['black'] # => '#000000'
|
||||
#
|
||||
# rgb['white'] = '#FFFFFF'
|
||||
# rgb[:white] # => '#FFFFFF'
|
||||
# rgb['white'] # => '#FFFFFF'
|
||||
#
|
||||
# Internally symbols are mapped to strings when used as keys in the entire
|
||||
# writing interface (calling <tt>[]=</tt>, <tt>merge</tt>, etc). This
|
||||
# mapping belongs to the public interface. For example, given:
|
||||
#
|
||||
# hash = ActiveSupport::HashWithIndifferentAccess.new(a: 1)
|
||||
#
|
||||
# You are guaranteed that the key is returned as a string:
|
||||
#
|
||||
# hash.keys # => ["a"]
|
||||
#
|
||||
# Technically other types of keys are accepted:
|
||||
#
|
||||
# hash = ActiveSupport::HashWithIndifferentAccess.new(a: 1)
|
||||
# hash[0] = 0
|
||||
# hash # => {"a"=>1, 0=>0}
|
||||
#
|
||||
# but this class is intended for use cases where strings or symbols are the
|
||||
# expected keys and it is convenient to understand both as the same. For
|
||||
# example the +params+ hash in Ruby on Rails.
|
||||
#
|
||||
# Note that core extensions define <tt>Hash#with_indifferent_access</tt>:
|
||||
#
|
||||
# rgb = { black: '#000000', white: '#FFFFFF' }.with_indifferent_access
|
||||
#
|
||||
# which may be handy.
|
||||
#
|
||||
# To access this class outside of Rails, require the core extension with:
|
||||
#
|
||||
# require "active_support/core_ext/hash/indifferent_access"
|
||||
#
|
||||
# which will, in turn, require this file.
|
||||
class HashWithIndifferentAccess < Hash
|
||||
# Returns +true+ so that <tt>Array#extract_options!</tt> finds members of
|
||||
# this class.
|
||||
def extractable_options?
|
||||
true
|
||||
end
|
||||
|
||||
def with_indifferent_access
|
||||
dup
|
||||
end
|
||||
|
||||
def nested_under_indifferent_access
|
||||
self
|
||||
end
|
||||
|
||||
def initialize(constructor = {})
|
||||
if constructor.respond_to?(:to_hash)
|
||||
super()
|
||||
update(constructor)
|
||||
|
||||
hash = constructor.to_hash
|
||||
self.default = hash.default if hash.default
|
||||
self.default_proc = hash.default_proc if hash.default_proc
|
||||
else
|
||||
super(constructor)
|
||||
end
|
||||
end
|
||||
|
||||
def self.[](*args)
|
||||
new.merge!(Hash[*args])
|
||||
end
|
||||
|
||||
alias_method :regular_writer, :[]= unless method_defined?(:regular_writer)
|
||||
alias_method :regular_update, :update unless method_defined?(:regular_update)
|
||||
|
||||
# Assigns a new value to the hash:
|
||||
#
|
||||
# hash = ActiveSupport::HashWithIndifferentAccess.new
|
||||
# hash[:key] = 'value'
|
||||
#
|
||||
# This value can be later fetched using either +:key+ or <tt>'key'</tt>.
|
||||
def []=(key, value)
|
||||
regular_writer(convert_key(key), convert_value(value, for: :assignment))
|
||||
end
|
||||
|
||||
alias_method :store, :[]=
|
||||
|
||||
# Updates the receiver in-place, merging in the hash passed as argument:
|
||||
#
|
||||
# hash_1 = ActiveSupport::HashWithIndifferentAccess.new
|
||||
# hash_1[:key] = 'value'
|
||||
#
|
||||
# hash_2 = ActiveSupport::HashWithIndifferentAccess.new
|
||||
# hash_2[:key] = 'New Value!'
|
||||
#
|
||||
# hash_1.update(hash_2) # => {"key"=>"New Value!"}
|
||||
#
|
||||
# The argument can be either an
|
||||
# <tt>ActiveSupport::HashWithIndifferentAccess</tt> or a regular +Hash+.
|
||||
# In either case the merge respects the semantics of indifferent access.
|
||||
#
|
||||
# If the argument is a regular hash with keys +:key+ and +"key"+ only one
|
||||
# of the values end up in the receiver, but which one is unspecified.
|
||||
#
|
||||
# When given a block, the value for duplicated keys will be determined
|
||||
# by the result of invoking the block with the duplicated key, the value
|
||||
# in the receiver, and the value in +other_hash+. The rules for duplicated
|
||||
# keys follow the semantics of indifferent access:
|
||||
#
|
||||
# hash_1[:key] = 10
|
||||
# hash_2['key'] = 12
|
||||
# hash_1.update(hash_2) { |key, old, new| old + new } # => {"key"=>22}
|
||||
def update(other_hash)
|
||||
if other_hash.is_a? HashWithIndifferentAccess
|
||||
super(other_hash)
|
||||
else
|
||||
other_hash.to_hash.each_pair do |key, value|
|
||||
if block_given? && key?(key)
|
||||
value = yield(convert_key(key), self[key], value)
|
||||
end
|
||||
regular_writer(convert_key(key), convert_value(value))
|
||||
end
|
||||
self
|
||||
end
|
||||
end
|
||||
|
||||
alias_method :merge!, :update
|
||||
|
||||
# Checks the hash for a key matching the argument passed in:
|
||||
#
|
||||
# hash = ActiveSupport::HashWithIndifferentAccess.new
|
||||
# hash['key'] = 'value'
|
||||
# hash.key?(:key) # => true
|
||||
# hash.key?('key') # => true
|
||||
def key?(key)
|
||||
super(convert_key(key))
|
||||
end
|
||||
|
||||
alias_method :include?, :key?
|
||||
alias_method :has_key?, :key?
|
||||
alias_method :member?, :key?
|
||||
|
||||
# Same as <tt>Hash#[]</tt> where the key passed as argument can be
|
||||
# either a string or a symbol:
|
||||
#
|
||||
# counters = ActiveSupport::HashWithIndifferentAccess.new
|
||||
# counters[:foo] = 1
|
||||
#
|
||||
# counters['foo'] # => 1
|
||||
# counters[:foo] # => 1
|
||||
# counters[:zoo] # => nil
|
||||
def [](key)
|
||||
super(convert_key(key))
|
||||
end
|
||||
|
||||
# Same as <tt>Hash#fetch</tt> where the key passed as argument can be
|
||||
# either a string or a symbol:
|
||||
#
|
||||
# counters = ActiveSupport::HashWithIndifferentAccess.new
|
||||
# counters[:foo] = 1
|
||||
#
|
||||
# counters.fetch('foo') # => 1
|
||||
# counters.fetch(:bar, 0) # => 0
|
||||
# counters.fetch(:bar) { |key| 0 } # => 0
|
||||
# counters.fetch(:zoo) # => KeyError: key not found: "zoo"
|
||||
def fetch(key, *extras)
|
||||
super(convert_key(key), *extras)
|
||||
end
|
||||
|
||||
if Hash.new.respond_to?(:dig)
|
||||
# Same as <tt>Hash#dig</tt> where the key passed as argument can be
|
||||
# either a string or a symbol:
|
||||
#
|
||||
# counters = ActiveSupport::HashWithIndifferentAccess.new
|
||||
# counters[:foo] = { bar: 1 }
|
||||
#
|
||||
# counters.dig('foo', 'bar') # => 1
|
||||
# counters.dig(:foo, :bar) # => 1
|
||||
# counters.dig(:zoo) # => nil
|
||||
def dig(*args)
|
||||
args[0] = convert_key(args[0]) if args.size > 0
|
||||
super(*args)
|
||||
end
|
||||
end
|
||||
|
||||
# Same as <tt>Hash#default</tt> where the key passed as argument can be
|
||||
# either a string or a symbol:
|
||||
#
|
||||
# hash = ActiveSupport::HashWithIndifferentAccess.new(1)
|
||||
# hash.default # => 1
|
||||
#
|
||||
# hash = ActiveSupport::HashWithIndifferentAccess.new { |hash, key| key }
|
||||
# hash.default # => nil
|
||||
# hash.default('foo') # => 'foo'
|
||||
# hash.default(:foo) # => 'foo'
|
||||
def default(*args)
|
||||
super(*args.map { |arg| convert_key(arg) })
|
||||
end
|
||||
|
||||
# Returns an array of the values at the specified indices:
|
||||
#
|
||||
# hash = ActiveSupport::HashWithIndifferentAccess.new
|
||||
# hash[:a] = 'x'
|
||||
# hash[:b] = 'y'
|
||||
# hash.values_at('a', 'b') # => ["x", "y"]
|
||||
def values_at(*indices)
|
||||
indices.collect { |key| self[convert_key(key)] }
|
||||
end
|
||||
|
||||
# Returns an array of the values at the specified indices, but also
|
||||
# raises an exception when one of the keys can't be found.
|
||||
#
|
||||
# hash = ActiveSupport::HashWithIndifferentAccess.new
|
||||
# hash[:a] = 'x'
|
||||
# hash[:b] = 'y'
|
||||
# hash.fetch_values('a', 'b') # => ["x", "y"]
|
||||
# hash.fetch_values('a', 'c') { |key| 'z' } # => ["x", "z"]
|
||||
# hash.fetch_values('a', 'c') # => KeyError: key not found: "c"
|
||||
def fetch_values(*indices, &block)
|
||||
indices.collect { |key| fetch(key, &block) }
|
||||
end if Hash.method_defined?(:fetch_values)
|
||||
|
||||
# Returns a shallow copy of the hash.
|
||||
#
|
||||
# hash = ActiveSupport::HashWithIndifferentAccess.new({ a: { b: 'b' } })
|
||||
# dup = hash.dup
|
||||
# dup[:a][:c] = 'c'
|
||||
#
|
||||
# hash[:a][:c] # => "c"
|
||||
# dup[:a][:c] # => "c"
|
||||
def dup
|
||||
self.class.new(self).tap do |new_hash|
|
||||
set_defaults(new_hash)
|
||||
end
|
||||
end
|
||||
|
||||
# This method has the same semantics of +update+, except it does not
|
||||
# modify the receiver but rather returns a new hash with indifferent
|
||||
# access with the result of the merge.
|
||||
def merge(hash, &block)
|
||||
dup.update(hash, &block)
|
||||
end
|
||||
|
||||
# Like +merge+ but the other way around: Merges the receiver into the
|
||||
# argument and returns a new hash with indifferent access as result:
|
||||
#
|
||||
# hash = ActiveSupport::HashWithIndifferentAccess.new
|
||||
# hash['a'] = nil
|
||||
# hash.reverse_merge(a: 0, b: 1) # => {"a"=>nil, "b"=>1}
|
||||
def reverse_merge(other_hash)
|
||||
super(self.class.new(other_hash))
|
||||
end
|
||||
alias_method :with_defaults, :reverse_merge
|
||||
|
||||
# Same semantics as +reverse_merge+ but modifies the receiver in-place.
|
||||
def reverse_merge!(other_hash)
|
||||
super(self.class.new(other_hash))
|
||||
end
|
||||
alias_method :with_defaults!, :reverse_merge!
|
||||
|
||||
# Replaces the contents of this hash with other_hash.
|
||||
#
|
||||
# h = { "a" => 100, "b" => 200 }
|
||||
# h.replace({ "c" => 300, "d" => 400 }) # => {"c"=>300, "d"=>400}
|
||||
def replace(other_hash)
|
||||
super(self.class.new(other_hash))
|
||||
end
|
||||
|
||||
# Removes the specified key from the hash.
|
||||
def delete(key)
|
||||
super(convert_key(key))
|
||||
end
|
||||
|
||||
def stringify_keys!; self end
|
||||
def deep_stringify_keys!; self end
|
||||
def stringify_keys; dup end
|
||||
def deep_stringify_keys; dup end
|
||||
undef :symbolize_keys!
|
||||
undef :deep_symbolize_keys!
|
||||
def symbolize_keys; to_hash.symbolize_keys! end
|
||||
alias_method :to_options, :symbolize_keys
|
||||
def deep_symbolize_keys; to_hash.deep_symbolize_keys! end
|
||||
def to_options!; self end
|
||||
|
||||
def select(*args, &block)
|
||||
return to_enum(:select) unless block_given?
|
||||
dup.tap { |hash| hash.select!(*args, &block) }
|
||||
end
|
||||
|
||||
def reject(*args, &block)
|
||||
return to_enum(:reject) unless block_given?
|
||||
dup.tap { |hash| hash.reject!(*args, &block) }
|
||||
end
|
||||
|
||||
def transform_values(*args, &block)
|
||||
return to_enum(:transform_values) unless block_given?
|
||||
dup.tap { |hash| hash.transform_values!(*args, &block) }
|
||||
end
|
||||
|
||||
def transform_keys(*args, &block)
|
||||
return to_enum(:transform_keys) unless block_given?
|
||||
dup.tap { |hash| hash.transform_keys!(*args, &block) }
|
||||
end
|
||||
|
||||
def transform_keys!
|
||||
return enum_for(:transform_keys!) { size } unless block_given?
|
||||
keys.each do |key|
|
||||
self[yield(key)] = delete(key)
|
||||
end
|
||||
self
|
||||
end
|
||||
|
||||
def slice(*keys)
|
||||
keys.map! { |key| convert_key(key) }
|
||||
self.class.new(super)
|
||||
end
|
||||
|
||||
def slice!(*keys)
|
||||
keys.map! { |key| convert_key(key) }
|
||||
super
|
||||
end
|
||||
|
||||
def compact
|
||||
dup.tap(&:compact!)
|
||||
end
|
||||
|
||||
# Convert to a regular hash with string keys.
|
||||
def to_hash
|
||||
_new_hash = Hash.new
|
||||
set_defaults(_new_hash)
|
||||
|
||||
each do |key, value|
|
||||
_new_hash[key] = convert_value(value, for: :to_hash)
|
||||
end
|
||||
_new_hash
|
||||
end
|
||||
|
||||
private
|
||||
def convert_key(key) # :doc:
|
||||
key.kind_of?(Symbol) ? key.to_s : key
|
||||
end
|
||||
|
||||
def convert_value(value, options = {}) # :doc:
|
||||
if value.is_a? Hash
|
||||
if options[:for] == :to_hash
|
||||
value.to_hash
|
||||
else
|
||||
value.nested_under_indifferent_access
|
||||
end
|
||||
elsif value.is_a?(Array)
|
||||
if options[:for] != :assignment || value.frozen?
|
||||
value = value.dup
|
||||
end
|
||||
value.map! { |e| convert_value(e, options) }
|
||||
else
|
||||
value
|
||||
end
|
||||
end
|
||||
|
||||
def set_defaults(target) # :doc:
|
||||
if default_proc
|
||||
target.default_proc = default_proc.dup
|
||||
else
|
||||
target.default = default
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# :stopdoc:
|
||||
|
||||
HashWithIndifferentAccess = ActiveSupport::HashWithIndifferentAccess
|
@ -0,0 +1,118 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support"
|
||||
require "active_support/file_update_checker"
|
||||
require "active_support/core_ext/array/wrap"
|
||||
|
||||
# :enddoc:
|
||||
|
||||
module I18n
|
||||
class Railtie < Rails::Railtie
|
||||
config.i18n = ActiveSupport::OrderedOptions.new
|
||||
config.i18n.railties_load_path = []
|
||||
config.i18n.load_path = []
|
||||
config.i18n.fallbacks = ActiveSupport::OrderedOptions.new
|
||||
|
||||
# Set the i18n configuration after initialization since a lot of
|
||||
# configuration is still usually done in application initializers.
|
||||
config.after_initialize do |app|
|
||||
I18n::Railtie.initialize_i18n(app)
|
||||
end
|
||||
|
||||
# Trigger i18n config before any eager loading has happened
|
||||
# so it's ready if any classes require it when eager loaded.
|
||||
config.before_eager_load do |app|
|
||||
I18n::Railtie.initialize_i18n(app)
|
||||
end
|
||||
|
||||
@i18n_inited = false
|
||||
|
||||
# Setup i18n configuration.
|
||||
def self.initialize_i18n(app)
|
||||
return if @i18n_inited
|
||||
|
||||
fallbacks = app.config.i18n.delete(:fallbacks)
|
||||
|
||||
# Avoid issues with setting the default_locale by disabling available locales
|
||||
# check while configuring.
|
||||
enforce_available_locales = app.config.i18n.delete(:enforce_available_locales)
|
||||
enforce_available_locales = I18n.enforce_available_locales if enforce_available_locales.nil?
|
||||
I18n.enforce_available_locales = false
|
||||
|
||||
reloadable_paths = []
|
||||
app.config.i18n.each do |setting, value|
|
||||
case setting
|
||||
when :railties_load_path
|
||||
reloadable_paths = value
|
||||
app.config.i18n.load_path.unshift(*value.flat_map(&:existent))
|
||||
when :load_path
|
||||
I18n.load_path += value
|
||||
else
|
||||
I18n.send("#{setting}=", value)
|
||||
end
|
||||
end
|
||||
|
||||
init_fallbacks(fallbacks) if fallbacks && validate_fallbacks(fallbacks)
|
||||
|
||||
# Restore available locales check so it will take place from now on.
|
||||
I18n.enforce_available_locales = enforce_available_locales
|
||||
|
||||
directories = watched_dirs_with_extensions(reloadable_paths)
|
||||
reloader = app.config.file_watcher.new(I18n.load_path.dup, directories) do
|
||||
I18n.load_path.keep_if { |p| File.exist?(p) }
|
||||
I18n.load_path |= reloadable_paths.flat_map(&:existent)
|
||||
|
||||
I18n.reload!
|
||||
end
|
||||
|
||||
app.reloaders << reloader
|
||||
app.reloader.to_run do
|
||||
reloader.execute_if_updated { require_unload_lock! }
|
||||
end
|
||||
reloader.execute
|
||||
|
||||
@i18n_inited = true
|
||||
end
|
||||
|
||||
def self.include_fallbacks_module
|
||||
I18n.backend.class.include(I18n::Backend::Fallbacks)
|
||||
end
|
||||
|
||||
def self.init_fallbacks(fallbacks)
|
||||
include_fallbacks_module
|
||||
|
||||
args = \
|
||||
case fallbacks
|
||||
when ActiveSupport::OrderedOptions
|
||||
[*(fallbacks[:defaults] || []) << fallbacks[:map]].compact
|
||||
when Hash, Array
|
||||
Array.wrap(fallbacks)
|
||||
else # TrueClass
|
||||
[I18n.default_locale]
|
||||
end
|
||||
|
||||
if args.empty? || args.first.is_a?(Hash)
|
||||
args.unshift I18n.default_locale
|
||||
end
|
||||
|
||||
I18n.fallbacks = I18n::Locale::Fallbacks.new(*args)
|
||||
end
|
||||
|
||||
def self.validate_fallbacks(fallbacks)
|
||||
case fallbacks
|
||||
when ActiveSupport::OrderedOptions
|
||||
!fallbacks.empty?
|
||||
when TrueClass, Array, Hash
|
||||
true
|
||||
else
|
||||
raise "Unexpected fallback type #{fallbacks.inspect}"
|
||||
end
|
||||
end
|
||||
|
||||
def self.watched_dirs_with_extensions(paths)
|
||||
paths.each_with_object({}) do |path, result|
|
||||
result[path.absolute_current] = path.extensions
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,9 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
# in case active_support/inflector is required without the rest of active_support
|
||||
require "active_support/inflector/inflections"
|
||||
require "active_support/inflector/transliterate"
|
||||
require "active_support/inflector/methods"
|
||||
|
||||
require "active_support/inflections"
|
||||
require "active_support/core_ext/string/inflections"
|
@ -0,0 +1,73 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "concurrent/map"
|
||||
require "openssl"
|
||||
|
||||
module ActiveSupport
|
||||
# KeyGenerator is a simple wrapper around OpenSSL's implementation of PBKDF2.
|
||||
# It can be used to derive a number of keys for various purposes from a given secret.
|
||||
# This lets Rails applications have a single secure secret, but avoid reusing that
|
||||
# key in multiple incompatible contexts.
|
||||
class KeyGenerator
|
||||
def initialize(secret, options = {})
|
||||
@secret = secret
|
||||
# The default iterations are higher than required for our key derivation uses
|
||||
# on the off chance someone uses this for password storage
|
||||
@iterations = options[:iterations] || 2**16
|
||||
end
|
||||
|
||||
# Returns a derived key suitable for use. The default key_size is chosen
|
||||
# to be compatible with the default settings of ActiveSupport::MessageVerifier.
|
||||
# i.e. OpenSSL::Digest::SHA1#block_length
|
||||
def generate_key(salt, key_size = 64)
|
||||
OpenSSL::PKCS5.pbkdf2_hmac_sha1(@secret, salt, @iterations, key_size)
|
||||
end
|
||||
end
|
||||
|
||||
# CachingKeyGenerator is a wrapper around KeyGenerator which allows users to avoid
|
||||
# re-executing the key generation process when it's called using the same salt and
|
||||
# key_size.
|
||||
class CachingKeyGenerator
|
||||
def initialize(key_generator)
|
||||
@key_generator = key_generator
|
||||
@cache_keys = Concurrent::Map.new
|
||||
end
|
||||
|
||||
# Returns a derived key suitable for use.
|
||||
def generate_key(*args)
|
||||
@cache_keys[args.join] ||= @key_generator.generate_key(*args)
|
||||
end
|
||||
end
|
||||
|
||||
class LegacyKeyGenerator # :nodoc:
|
||||
SECRET_MIN_LENGTH = 30 # Characters
|
||||
|
||||
def initialize(secret)
|
||||
ensure_secret_secure(secret)
|
||||
@secret = secret
|
||||
end
|
||||
|
||||
def generate_key(salt)
|
||||
@secret
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# To prevent users from using something insecure like "Password" we make sure that the
|
||||
# secret they've provided is at least 30 characters in length.
|
||||
def ensure_secret_secure(secret)
|
||||
if secret.blank?
|
||||
raise ArgumentError, "A secret is required to generate an integrity hash " \
|
||||
"for cookie session data. Set a secret_key_base of at least " \
|
||||
"#{SECRET_MIN_LENGTH} characters in via `bin/rails credentials:edit`."
|
||||
end
|
||||
|
||||
if secret.length < SECRET_MIN_LENGTH
|
||||
raise ArgumentError, "Secret should be something secure, " \
|
||||
"like \"#{SecureRandom.hex(16)}\". The value you " \
|
||||
"provided, \"#{secret}\", is shorter than the minimum length " \
|
||||
"of #{SECRET_MIN_LENGTH} characters."
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,108 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/logger_silence"
|
||||
require "active_support/logger_thread_safe_level"
|
||||
require "logger"
|
||||
|
||||
module ActiveSupport
|
||||
class Logger < ::Logger
|
||||
include ActiveSupport::LoggerThreadSafeLevel
|
||||
include LoggerSilence
|
||||
|
||||
# Returns true if the logger destination matches one of the sources
|
||||
#
|
||||
# logger = Logger.new(STDOUT)
|
||||
# ActiveSupport::Logger.logger_outputs_to?(logger, STDOUT)
|
||||
# # => true
|
||||
def self.logger_outputs_to?(logger, *sources)
|
||||
logdev = logger.instance_variable_get("@logdev")
|
||||
logger_source = logdev.dev if logdev.respond_to?(:dev)
|
||||
sources.any? { |source| source == logger_source }
|
||||
end
|
||||
|
||||
# Broadcasts logs to multiple loggers.
|
||||
def self.broadcast(logger) # :nodoc:
|
||||
Module.new do
|
||||
define_method(:add) do |*args, &block|
|
||||
logger.add(*args, &block)
|
||||
super(*args, &block)
|
||||
end
|
||||
|
||||
define_method(:<<) do |x|
|
||||
logger << x
|
||||
super(x)
|
||||
end
|
||||
|
||||
define_method(:close) do
|
||||
logger.close
|
||||
super()
|
||||
end
|
||||
|
||||
define_method(:progname=) do |name|
|
||||
logger.progname = name
|
||||
super(name)
|
||||
end
|
||||
|
||||
define_method(:formatter=) do |formatter|
|
||||
logger.formatter = formatter
|
||||
super(formatter)
|
||||
end
|
||||
|
||||
define_method(:level=) do |level|
|
||||
logger.level = level
|
||||
super(level)
|
||||
end
|
||||
|
||||
define_method(:local_level=) do |level|
|
||||
logger.local_level = level if logger.respond_to?(:local_level=)
|
||||
super(level) if respond_to?(:local_level=)
|
||||
end
|
||||
|
||||
define_method(:silence) do |level = Logger::ERROR, &block|
|
||||
if logger.respond_to?(:silence)
|
||||
logger.silence(level) do
|
||||
if defined?(super)
|
||||
super(level, &block)
|
||||
else
|
||||
block.call(self)
|
||||
end
|
||||
end
|
||||
else
|
||||
if defined?(super)
|
||||
super(level, &block)
|
||||
else
|
||||
block.call(self)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def initialize(*args)
|
||||
super
|
||||
@formatter = SimpleFormatter.new
|
||||
after_initialize if respond_to? :after_initialize
|
||||
end
|
||||
|
||||
def add(severity, message = nil, progname = nil, &block)
|
||||
return true if @logdev.nil? || (severity || UNKNOWN) < level
|
||||
super
|
||||
end
|
||||
|
||||
Logger::Severity.constants.each do |severity|
|
||||
class_eval(<<-EOT, __FILE__, __LINE__ + 1)
|
||||
def #{severity.downcase}? # def debug?
|
||||
Logger::#{severity} >= level # DEBUG >= level
|
||||
end # end
|
||||
EOT
|
||||
end
|
||||
|
||||
# Simple formatter which only displays the message.
|
||||
class SimpleFormatter < ::Logger::Formatter
|
||||
# This method is invoked when a log event occurs
|
||||
def call(severity, timestamp, progname, msg)
|
||||
"#{String === msg ? msg : msg.inspect}\n"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,29 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/concern"
|
||||
require "active_support/core_ext/module/attribute_accessors"
|
||||
require "concurrent"
|
||||
|
||||
module LoggerSilence
|
||||
extend ActiveSupport::Concern
|
||||
|
||||
included do
|
||||
cattr_accessor :silencer, default: true
|
||||
end
|
||||
|
||||
# Silences the logger for the duration of the block.
|
||||
def silence(temporary_level = Logger::ERROR)
|
||||
if silencer
|
||||
begin
|
||||
old_local_level = local_level
|
||||
self.local_level = temporary_level
|
||||
|
||||
yield self
|
||||
ensure
|
||||
self.local_level = old_local_level
|
||||
end
|
||||
else
|
||||
yield self
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,33 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/concern"
|
||||
|
||||
module ActiveSupport
|
||||
module LoggerThreadSafeLevel # :nodoc:
|
||||
extend ActiveSupport::Concern
|
||||
|
||||
def after_initialize
|
||||
@local_levels = Concurrent::Map.new(initial_capacity: 2)
|
||||
end
|
||||
|
||||
def local_log_id
|
||||
Thread.current.__id__
|
||||
end
|
||||
|
||||
def local_level
|
||||
@local_levels[local_log_id]
|
||||
end
|
||||
|
||||
def local_level=(level)
|
||||
if level
|
||||
@local_levels[local_log_id] = level
|
||||
else
|
||||
@local_levels.delete(local_log_id)
|
||||
end
|
||||
end
|
||||
|
||||
def level
|
||||
local_level || super
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,229 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "openssl"
|
||||
require "base64"
|
||||
require "active_support/core_ext/array/extract_options"
|
||||
require "active_support/core_ext/module/attribute_accessors"
|
||||
require "active_support/message_verifier"
|
||||
require "active_support/messages/metadata"
|
||||
|
||||
module ActiveSupport
|
||||
# MessageEncryptor is a simple way to encrypt values which get stored
|
||||
# somewhere you don't trust.
|
||||
#
|
||||
# The cipher text and initialization vector are base64 encoded and returned
|
||||
# to you.
|
||||
#
|
||||
# This can be used in situations similar to the <tt>MessageVerifier</tt>, but
|
||||
# where you don't want users to be able to determine the value of the payload.
|
||||
#
|
||||
# len = ActiveSupport::MessageEncryptor.key_len
|
||||
# salt = SecureRandom.random_bytes(len)
|
||||
# key = ActiveSupport::KeyGenerator.new('password').generate_key(salt, len) # => "\x89\xE0\x156\xAC..."
|
||||
# crypt = ActiveSupport::MessageEncryptor.new(key) # => #<ActiveSupport::MessageEncryptor ...>
|
||||
# encrypted_data = crypt.encrypt_and_sign('my secret data') # => "NlFBTTMwOUV5UlA1QlNEN2xkY2d6eThYWWh..."
|
||||
# crypt.decrypt_and_verify(encrypted_data) # => "my secret data"
|
||||
#
|
||||
# === Confining messages to a specific purpose
|
||||
#
|
||||
# By default any message can be used throughout your app. But they can also be
|
||||
# confined to a specific +:purpose+.
|
||||
#
|
||||
# token = crypt.encrypt_and_sign("this is the chair", purpose: :login)
|
||||
#
|
||||
# Then that same purpose must be passed when verifying to get the data back out:
|
||||
#
|
||||
# crypt.decrypt_and_verify(token, purpose: :login) # => "this is the chair"
|
||||
# crypt.decrypt_and_verify(token, purpose: :shipping) # => nil
|
||||
# crypt.decrypt_and_verify(token) # => nil
|
||||
#
|
||||
# Likewise, if a message has no purpose it won't be returned when verifying with
|
||||
# a specific purpose.
|
||||
#
|
||||
# token = crypt.encrypt_and_sign("the conversation is lively")
|
||||
# crypt.decrypt_and_verify(token, purpose: :scare_tactics) # => nil
|
||||
# crypt.decrypt_and_verify(token) # => "the conversation is lively"
|
||||
#
|
||||
# === Making messages expire
|
||||
#
|
||||
# By default messages last forever and verifying one year from now will still
|
||||
# return the original value. But messages can be set to expire at a given
|
||||
# time with +:expires_in+ or +:expires_at+.
|
||||
#
|
||||
# crypt.encrypt_and_sign(parcel, expires_in: 1.month)
|
||||
# crypt.encrypt_and_sign(doowad, expires_at: Time.now.end_of_year)
|
||||
#
|
||||
# Then the messages can be verified and returned upto the expire time.
|
||||
# Thereafter, verifying returns +nil+.
|
||||
#
|
||||
# === Rotating keys
|
||||
#
|
||||
# MessageEncryptor also supports rotating out old configurations by falling
|
||||
# back to a stack of encryptors. Call +rotate+ to build and add an encryptor
|
||||
# so +decrypt_and_verify+ will also try the fallback.
|
||||
#
|
||||
# By default any rotated encryptors use the values of the primary
|
||||
# encryptor unless specified otherwise.
|
||||
#
|
||||
# You'd give your encryptor the new defaults:
|
||||
#
|
||||
# crypt = ActiveSupport::MessageEncryptor.new(@secret, cipher: "aes-256-gcm")
|
||||
#
|
||||
# Then gradually rotate the old values out by adding them as fallbacks. Any message
|
||||
# generated with the old values will then work until the rotation is removed.
|
||||
#
|
||||
# crypt.rotate old_secret # Fallback to an old secret instead of @secret.
|
||||
# crypt.rotate cipher: "aes-256-cbc" # Fallback to an old cipher instead of aes-256-gcm.
|
||||
#
|
||||
# Though if both the secret and the cipher was changed at the same time,
|
||||
# the above should be combined into:
|
||||
#
|
||||
# crypt.rotate old_secret, cipher: "aes-256-cbc"
|
||||
class MessageEncryptor
|
||||
prepend Messages::Rotator::Encryptor
|
||||
|
||||
cattr_accessor :use_authenticated_message_encryption, instance_accessor: false, default: false
|
||||
|
||||
class << self
|
||||
def default_cipher #:nodoc:
|
||||
if use_authenticated_message_encryption
|
||||
"aes-256-gcm"
|
||||
else
|
||||
"aes-256-cbc"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
module NullSerializer #:nodoc:
|
||||
def self.load(value)
|
||||
value
|
||||
end
|
||||
|
||||
def self.dump(value)
|
||||
value
|
||||
end
|
||||
end
|
||||
|
||||
module NullVerifier #:nodoc:
|
||||
def self.verify(value)
|
||||
value
|
||||
end
|
||||
|
||||
def self.generate(value)
|
||||
value
|
||||
end
|
||||
end
|
||||
|
||||
class InvalidMessage < StandardError; end
|
||||
OpenSSLCipherError = OpenSSL::Cipher::CipherError
|
||||
|
||||
# Initialize a new MessageEncryptor. +secret+ must be at least as long as
|
||||
# the cipher key size. For the default 'aes-256-gcm' cipher, this is 256
|
||||
# bits. If you are using a user-entered secret, you can generate a suitable
|
||||
# key by using <tt>ActiveSupport::KeyGenerator</tt> or a similar key
|
||||
# derivation function.
|
||||
#
|
||||
# First additional parameter is used as the signature key for +MessageVerifier+.
|
||||
# This allows you to specify keys to encrypt and sign data.
|
||||
#
|
||||
# ActiveSupport::MessageEncryptor.new('secret', 'signature_secret')
|
||||
#
|
||||
# Options:
|
||||
# * <tt>:cipher</tt> - Cipher to use. Can be any cipher returned by
|
||||
# <tt>OpenSSL::Cipher.ciphers</tt>. Default is 'aes-256-gcm'.
|
||||
# * <tt>:digest</tt> - String of digest to use for signing. Default is
|
||||
# +SHA1+. Ignored when using an AEAD cipher like 'aes-256-gcm'.
|
||||
# * <tt>:serializer</tt> - Object serializer to use. Default is +Marshal+.
|
||||
def initialize(secret, *signature_key_or_options)
|
||||
options = signature_key_or_options.extract_options!
|
||||
sign_secret = signature_key_or_options.first
|
||||
@secret = secret
|
||||
@sign_secret = sign_secret
|
||||
@cipher = options[:cipher] || self.class.default_cipher
|
||||
@digest = options[:digest] || "SHA1" unless aead_mode?
|
||||
@verifier = resolve_verifier
|
||||
@serializer = options[:serializer] || Marshal
|
||||
end
|
||||
|
||||
# Encrypt and sign a message. We need to sign the message in order to avoid
|
||||
# padding attacks. Reference: https://www.limited-entropy.com/padding-oracle-attacks/.
|
||||
def encrypt_and_sign(value, expires_at: nil, expires_in: nil, purpose: nil)
|
||||
verifier.generate(_encrypt(value, expires_at: expires_at, expires_in: expires_in, purpose: purpose))
|
||||
end
|
||||
|
||||
# Decrypt and verify a message. We need to verify the message in order to
|
||||
# avoid padding attacks. Reference: https://www.limited-entropy.com/padding-oracle-attacks/.
|
||||
def decrypt_and_verify(data, purpose: nil, **)
|
||||
_decrypt(verifier.verify(data), purpose)
|
||||
end
|
||||
|
||||
# Given a cipher, returns the key length of the cipher to help generate the key of desired size
|
||||
def self.key_len(cipher = default_cipher)
|
||||
OpenSSL::Cipher.new(cipher).key_len
|
||||
end
|
||||
|
||||
private
|
||||
def _encrypt(value, **metadata_options)
|
||||
cipher = new_cipher
|
||||
cipher.encrypt
|
||||
cipher.key = @secret
|
||||
|
||||
# Rely on OpenSSL for the initialization vector
|
||||
iv = cipher.random_iv
|
||||
cipher.auth_data = "" if aead_mode?
|
||||
|
||||
encrypted_data = cipher.update(Messages::Metadata.wrap(@serializer.dump(value), metadata_options))
|
||||
encrypted_data << cipher.final
|
||||
|
||||
blob = "#{::Base64.strict_encode64 encrypted_data}--#{::Base64.strict_encode64 iv}"
|
||||
blob = "#{blob}--#{::Base64.strict_encode64 cipher.auth_tag}" if aead_mode?
|
||||
blob
|
||||
end
|
||||
|
||||
def _decrypt(encrypted_message, purpose)
|
||||
cipher = new_cipher
|
||||
encrypted_data, iv, auth_tag = encrypted_message.split("--".freeze).map { |v| ::Base64.strict_decode64(v) }
|
||||
|
||||
# Currently the OpenSSL bindings do not raise an error if auth_tag is
|
||||
# truncated, which would allow an attacker to easily forge it. See
|
||||
# https://github.com/ruby/openssl/issues/63
|
||||
raise InvalidMessage if aead_mode? && (auth_tag.nil? || auth_tag.bytes.length != 16)
|
||||
|
||||
cipher.decrypt
|
||||
cipher.key = @secret
|
||||
cipher.iv = iv
|
||||
if aead_mode?
|
||||
cipher.auth_tag = auth_tag
|
||||
cipher.auth_data = ""
|
||||
end
|
||||
|
||||
decrypted_data = cipher.update(encrypted_data)
|
||||
decrypted_data << cipher.final
|
||||
|
||||
message = Messages::Metadata.verify(decrypted_data, purpose)
|
||||
@serializer.load(message) if message
|
||||
rescue OpenSSLCipherError, TypeError, ArgumentError
|
||||
raise InvalidMessage
|
||||
end
|
||||
|
||||
def new_cipher
|
||||
OpenSSL::Cipher.new(@cipher)
|
||||
end
|
||||
|
||||
def verifier
|
||||
@verifier
|
||||
end
|
||||
|
||||
def aead_mode?
|
||||
@aead_mode ||= new_cipher.authenticated?
|
||||
end
|
||||
|
||||
def resolve_verifier
|
||||
if aead_mode?
|
||||
NullVerifier
|
||||
else
|
||||
MessageVerifier.new(@sign_secret || @secret, digest: @digest, serializer: NullSerializer)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,205 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "base64"
|
||||
require "active_support/core_ext/object/blank"
|
||||
require "active_support/security_utils"
|
||||
require "active_support/messages/metadata"
|
||||
require "active_support/messages/rotator"
|
||||
|
||||
module ActiveSupport
|
||||
# +MessageVerifier+ makes it easy to generate and verify messages which are
|
||||
# signed to prevent tampering.
|
||||
#
|
||||
# This is useful for cases like remember-me tokens and auto-unsubscribe links
|
||||
# where the session store isn't suitable or available.
|
||||
#
|
||||
# Remember Me:
|
||||
# cookies[:remember_me] = @verifier.generate([@user.id, 2.weeks.from_now])
|
||||
#
|
||||
# In the authentication filter:
|
||||
#
|
||||
# id, time = @verifier.verify(cookies[:remember_me])
|
||||
# if Time.now < time
|
||||
# self.current_user = User.find(id)
|
||||
# end
|
||||
#
|
||||
# By default it uses Marshal to serialize the message. If you want to use
|
||||
# another serialization method, you can set the serializer in the options
|
||||
# hash upon initialization:
|
||||
#
|
||||
# @verifier = ActiveSupport::MessageVerifier.new('s3Krit', serializer: YAML)
|
||||
#
|
||||
# +MessageVerifier+ creates HMAC signatures using SHA1 hash algorithm by default.
|
||||
# If you want to use a different hash algorithm, you can change it by providing
|
||||
# +:digest+ key as an option while initializing the verifier:
|
||||
#
|
||||
# @verifier = ActiveSupport::MessageVerifier.new('s3Krit', digest: 'SHA256')
|
||||
#
|
||||
# === Confining messages to a specific purpose
|
||||
#
|
||||
# By default any message can be used throughout your app. But they can also be
|
||||
# confined to a specific +:purpose+.
|
||||
#
|
||||
# token = @verifier.generate("this is the chair", purpose: :login)
|
||||
#
|
||||
# Then that same purpose must be passed when verifying to get the data back out:
|
||||
#
|
||||
# @verifier.verified(token, purpose: :login) # => "this is the chair"
|
||||
# @verifier.verified(token, purpose: :shipping) # => nil
|
||||
# @verifier.verified(token) # => nil
|
||||
#
|
||||
# @verifier.verify(token, purpose: :login) # => "this is the chair"
|
||||
# @verifier.verify(token, purpose: :shipping) # => ActiveSupport::MessageVerifier::InvalidSignature
|
||||
# @verifier.verify(token) # => ActiveSupport::MessageVerifier::InvalidSignature
|
||||
#
|
||||
# Likewise, if a message has no purpose it won't be returned when verifying with
|
||||
# a specific purpose.
|
||||
#
|
||||
# token = @verifier.generate("the conversation is lively")
|
||||
# @verifier.verified(token, purpose: :scare_tactics) # => nil
|
||||
# @verifier.verified(token) # => "the conversation is lively"
|
||||
#
|
||||
# @verifier.verify(token, purpose: :scare_tactics) # => ActiveSupport::MessageVerifier::InvalidSignature
|
||||
# @verifier.verify(token) # => "the conversation is lively"
|
||||
#
|
||||
# === Making messages expire
|
||||
#
|
||||
# By default messages last forever and verifying one year from now will still
|
||||
# return the original value. But messages can be set to expire at a given
|
||||
# time with +:expires_in+ or +:expires_at+.
|
||||
#
|
||||
# @verifier.generate(parcel, expires_in: 1.month)
|
||||
# @verifier.generate(doowad, expires_at: Time.now.end_of_year)
|
||||
#
|
||||
# Then the messages can be verified and returned upto the expire time.
|
||||
# Thereafter, the +verified+ method returns +nil+ while +verify+ raises
|
||||
# <tt>ActiveSupport::MessageVerifier::InvalidSignature</tt>.
|
||||
#
|
||||
# === Rotating keys
|
||||
#
|
||||
# MessageVerifier also supports rotating out old configurations by falling
|
||||
# back to a stack of verifiers. Call +rotate+ to build and add a verifier to
|
||||
# so either +verified+ or +verify+ will also try verifying with the fallback.
|
||||
#
|
||||
# By default any rotated verifiers use the values of the primary
|
||||
# verifier unless specified otherwise.
|
||||
#
|
||||
# You'd give your verifier the new defaults:
|
||||
#
|
||||
# verifier = ActiveSupport::MessageVerifier.new(@secret, digest: "SHA512", serializer: JSON)
|
||||
#
|
||||
# Then gradually rotate the old values out by adding them as fallbacks. Any message
|
||||
# generated with the old values will then work until the rotation is removed.
|
||||
#
|
||||
# verifier.rotate old_secret # Fallback to an old secret instead of @secret.
|
||||
# verifier.rotate digest: "SHA256" # Fallback to an old digest instead of SHA512.
|
||||
# verifier.rotate serializer: Marshal # Fallback to an old serializer instead of JSON.
|
||||
#
|
||||
# Though the above would most likely be combined into one rotation:
|
||||
#
|
||||
# verifier.rotate old_secret, digest: "SHA256", serializer: Marshal
|
||||
class MessageVerifier
|
||||
prepend Messages::Rotator::Verifier
|
||||
|
||||
class InvalidSignature < StandardError; end
|
||||
|
||||
def initialize(secret, options = {})
|
||||
raise ArgumentError, "Secret should not be nil." unless secret
|
||||
@secret = secret
|
||||
@digest = options[:digest] || "SHA1"
|
||||
@serializer = options[:serializer] || Marshal
|
||||
end
|
||||
|
||||
# Checks if a signed message could have been generated by signing an object
|
||||
# with the +MessageVerifier+'s secret.
|
||||
#
|
||||
# verifier = ActiveSupport::MessageVerifier.new 's3Krit'
|
||||
# signed_message = verifier.generate 'a private message'
|
||||
# verifier.valid_message?(signed_message) # => true
|
||||
#
|
||||
# tampered_message = signed_message.chop # editing the message invalidates the signature
|
||||
# verifier.valid_message?(tampered_message) # => false
|
||||
def valid_message?(signed_message)
|
||||
return if signed_message.nil? || !signed_message.valid_encoding? || signed_message.blank?
|
||||
|
||||
data, digest = signed_message.split("--".freeze)
|
||||
data.present? && digest.present? && ActiveSupport::SecurityUtils.secure_compare(digest, generate_digest(data))
|
||||
end
|
||||
|
||||
# Decodes the signed message using the +MessageVerifier+'s secret.
|
||||
#
|
||||
# verifier = ActiveSupport::MessageVerifier.new 's3Krit'
|
||||
#
|
||||
# signed_message = verifier.generate 'a private message'
|
||||
# verifier.verified(signed_message) # => 'a private message'
|
||||
#
|
||||
# Returns +nil+ if the message was not signed with the same secret.
|
||||
#
|
||||
# other_verifier = ActiveSupport::MessageVerifier.new 'd1ff3r3nt-s3Krit'
|
||||
# other_verifier.verified(signed_message) # => nil
|
||||
#
|
||||
# Returns +nil+ if the message is not Base64-encoded.
|
||||
#
|
||||
# invalid_message = "f--46a0120593880c733a53b6dad75b42ddc1c8996d"
|
||||
# verifier.verified(invalid_message) # => nil
|
||||
#
|
||||
# Raises any error raised while decoding the signed message.
|
||||
#
|
||||
# incompatible_message = "test--dad7b06c94abba8d46a15fafaef56c327665d5ff"
|
||||
# verifier.verified(incompatible_message) # => TypeError: incompatible marshal file format
|
||||
def verified(signed_message, purpose: nil, **)
|
||||
if valid_message?(signed_message)
|
||||
begin
|
||||
data = signed_message.split("--".freeze)[0]
|
||||
message = Messages::Metadata.verify(decode(data), purpose)
|
||||
@serializer.load(message) if message
|
||||
rescue ArgumentError => argument_error
|
||||
return if argument_error.message.include?("invalid base64")
|
||||
raise
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Decodes the signed message using the +MessageVerifier+'s secret.
|
||||
#
|
||||
# verifier = ActiveSupport::MessageVerifier.new 's3Krit'
|
||||
# signed_message = verifier.generate 'a private message'
|
||||
#
|
||||
# verifier.verify(signed_message) # => 'a private message'
|
||||
#
|
||||
# Raises +InvalidSignature+ if the message was not signed with the same
|
||||
# secret or was not Base64-encoded.
|
||||
#
|
||||
# other_verifier = ActiveSupport::MessageVerifier.new 'd1ff3r3nt-s3Krit'
|
||||
# other_verifier.verify(signed_message) # => ActiveSupport::MessageVerifier::InvalidSignature
|
||||
def verify(*args)
|
||||
verified(*args) || raise(InvalidSignature)
|
||||
end
|
||||
|
||||
# Generates a signed message for the provided value.
|
||||
#
|
||||
# The message is signed with the +MessageVerifier+'s secret. Without knowing
|
||||
# the secret, the original value cannot be extracted from the message.
|
||||
#
|
||||
# verifier = ActiveSupport::MessageVerifier.new 's3Krit'
|
||||
# verifier.generate 'a private message' # => "BAhJIhRwcml2YXRlLW1lc3NhZ2UGOgZFVA==--e2d724331ebdee96a10fb99b089508d1c72bd772"
|
||||
def generate(value, expires_at: nil, expires_in: nil, purpose: nil)
|
||||
data = encode(Messages::Metadata.wrap(@serializer.dump(value), expires_at: expires_at, expires_in: expires_in, purpose: purpose))
|
||||
"#{data}--#{generate_digest(data)}"
|
||||
end
|
||||
|
||||
private
|
||||
def encode(data)
|
||||
::Base64.strict_encode64(data)
|
||||
end
|
||||
|
||||
def decode(data)
|
||||
::Base64.strict_decode64(data)
|
||||
end
|
||||
|
||||
def generate_digest(data)
|
||||
require "openssl" unless defined?(OpenSSL)
|
||||
OpenSSL::HMAC.hexdigest(OpenSSL::Digest.const_get(@digest).new, @secret, data)
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,27 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/core_ext/hash/deep_merge"
|
||||
|
||||
module ActiveSupport
|
||||
class OptionMerger #:nodoc:
|
||||
instance_methods.each do |method|
|
||||
undef_method(method) if method !~ /^(__|instance_eval|class|object_id)/
|
||||
end
|
||||
|
||||
def initialize(context, options)
|
||||
@context, @options = context, options
|
||||
end
|
||||
|
||||
private
|
||||
def method_missing(method, *arguments, &block)
|
||||
if arguments.first.is_a?(Proc)
|
||||
proc = arguments.pop
|
||||
arguments << lambda { |*args| @options.deep_merge(proc.call(*args)) }
|
||||
else
|
||||
arguments << (arguments.last.respond_to?(:to_hash) ? @options.deep_merge(arguments.pop) : @options.dup)
|
||||
end
|
||||
|
||||
@context.__send__(method, *arguments, &block)
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,50 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "yaml"
|
||||
|
||||
YAML.add_builtin_type("omap") do |type, val|
|
||||
ActiveSupport::OrderedHash[val.map { |v| v.to_a.first }]
|
||||
end
|
||||
|
||||
module ActiveSupport
|
||||
# DEPRECATED: <tt>ActiveSupport::OrderedHash</tt> implements a hash that preserves
|
||||
# insertion order.
|
||||
#
|
||||
# oh = ActiveSupport::OrderedHash.new
|
||||
# oh[:a] = 1
|
||||
# oh[:b] = 2
|
||||
# oh.keys # => [:a, :b], this order is guaranteed
|
||||
#
|
||||
# Also, maps the +omap+ feature for YAML files
|
||||
# (See http://yaml.org/type/omap.html) to support ordered items
|
||||
# when loading from yaml.
|
||||
#
|
||||
# <tt>ActiveSupport::OrderedHash</tt> is namespaced to prevent conflicts
|
||||
# with other implementations.
|
||||
class OrderedHash < ::Hash
|
||||
def to_yaml_type
|
||||
"!tag:yaml.org,2002:omap"
|
||||
end
|
||||
|
||||
def encode_with(coder)
|
||||
coder.represent_seq "!omap", map { |k, v| { k => v } }
|
||||
end
|
||||
|
||||
def select(*args, &block)
|
||||
dup.tap { |hash| hash.select!(*args, &block) }
|
||||
end
|
||||
|
||||
def reject(*args, &block)
|
||||
dup.tap { |hash| hash.reject!(*args, &block) }
|
||||
end
|
||||
|
||||
def nested_under_indifferent_access
|
||||
self
|
||||
end
|
||||
|
||||
# Returns true to make sure that this hash is extractable via <tt>Array#extract_options!</tt>
|
||||
def extractable_options?
|
||||
true
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,85 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/core_ext/object/blank"
|
||||
|
||||
module ActiveSupport
|
||||
# Usually key value pairs are handled something like this:
|
||||
#
|
||||
# h = {}
|
||||
# h[:boy] = 'John'
|
||||
# h[:girl] = 'Mary'
|
||||
# h[:boy] # => 'John'
|
||||
# h[:girl] # => 'Mary'
|
||||
# h[:dog] # => nil
|
||||
#
|
||||
# Using +OrderedOptions+, the above code could be reduced to:
|
||||
#
|
||||
# h = ActiveSupport::OrderedOptions.new
|
||||
# h.boy = 'John'
|
||||
# h.girl = 'Mary'
|
||||
# h.boy # => 'John'
|
||||
# h.girl # => 'Mary'
|
||||
# h.dog # => nil
|
||||
#
|
||||
# To raise an exception when the value is blank, append a
|
||||
# bang to the key name, like:
|
||||
#
|
||||
# h.dog! # => raises KeyError: :dog is blank
|
||||
#
|
||||
class OrderedOptions < Hash
|
||||
alias_method :_get, :[] # preserve the original #[] method
|
||||
protected :_get # make it protected
|
||||
|
||||
def []=(key, value)
|
||||
super(key.to_sym, value)
|
||||
end
|
||||
|
||||
def [](key)
|
||||
super(key.to_sym)
|
||||
end
|
||||
|
||||
def method_missing(name, *args)
|
||||
name_string = name.to_s
|
||||
if name_string.chomp!("=")
|
||||
self[name_string] = args.first
|
||||
else
|
||||
bangs = name_string.chomp!("!")
|
||||
|
||||
if bangs
|
||||
self[name_string].presence || raise(KeyError.new(":#{name_string} is blank"))
|
||||
else
|
||||
self[name_string]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def respond_to_missing?(name, include_private)
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
# +InheritableOptions+ provides a constructor to build an +OrderedOptions+
|
||||
# hash inherited from another hash.
|
||||
#
|
||||
# Use this if you already have some hash and you want to create a new one based on it.
|
||||
#
|
||||
# h = ActiveSupport::InheritableOptions.new({ girl: 'Mary', boy: 'John' })
|
||||
# h.girl # => 'Mary'
|
||||
# h.boy # => 'John'
|
||||
class InheritableOptions < OrderedOptions
|
||||
def initialize(parent = nil)
|
||||
if parent.kind_of?(OrderedOptions)
|
||||
# use the faster _get when dealing with OrderedOptions
|
||||
super() { |h, k| parent._get(k) }
|
||||
elsif parent
|
||||
super() { |h, k| parent[k] }
|
||||
else
|
||||
super()
|
||||
end
|
||||
end
|
||||
|
||||
def inheritable_copy
|
||||
self.class.new(self)
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,15 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module ActiveSupport
|
||||
# A class with no predefined methods that behaves similarly to Builder's
|
||||
# BlankSlate. Used for proxy classes.
|
||||
class ProxyObject < ::BasicObject
|
||||
undef_method :==
|
||||
undef_method :equal?
|
||||
|
||||
# Let ActiveSupport::ProxyObject at least raise exceptions.
|
||||
def raise(*args)
|
||||
::Object.send(:raise, *args)
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,35 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
# This is private interface.
|
||||
#
|
||||
# Rails components cherry pick from Active Support as needed, but there are a
|
||||
# few features that are used for sure in some way or another and it is not worth
|
||||
# putting individual requires absolutely everywhere. Think blank? for example.
|
||||
#
|
||||
# This file is loaded by every Rails component except Active Support itself,
|
||||
# but it does not belong to the Rails public interface. It is internal to
|
||||
# Rails and can change anytime.
|
||||
|
||||
# Defines Object#blank? and Object#present?.
|
||||
require "active_support/core_ext/object/blank"
|
||||
|
||||
# Rails own autoload, eager_load, etc.
|
||||
require "active_support/dependencies/autoload"
|
||||
|
||||
# Support for ClassMethods and the included macro.
|
||||
require "active_support/concern"
|
||||
|
||||
# Defines Class#class_attribute.
|
||||
require "active_support/core_ext/class/attribute"
|
||||
|
||||
# Defines Module#delegate.
|
||||
require "active_support/core_ext/module/delegation"
|
||||
|
||||
# Defines ActiveSupport::Deprecation.
|
||||
require "active_support/deprecation"
|
||||
|
||||
# Defines Regexp#match?.
|
||||
#
|
||||
# This should be removed when Rails needs Ruby 2.4 or later, and the require
|
||||
# added where other Regexp extensions are being used (easy to grep).
|
||||
require "active_support/core_ext/regexp"
|
@ -0,0 +1,80 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support"
|
||||
require "active_support/i18n_railtie"
|
||||
|
||||
module ActiveSupport
|
||||
class Railtie < Rails::Railtie # :nodoc:
|
||||
config.active_support = ActiveSupport::OrderedOptions.new
|
||||
|
||||
config.eager_load_namespaces << ActiveSupport
|
||||
|
||||
initializer "active_support.set_authenticated_message_encryption" do |app|
|
||||
config.after_initialize do
|
||||
unless app.config.active_support.use_authenticated_message_encryption.nil?
|
||||
ActiveSupport::MessageEncryptor.use_authenticated_message_encryption =
|
||||
app.config.active_support.use_authenticated_message_encryption
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
initializer "active_support.reset_all_current_attributes_instances" do |app|
|
||||
app.reloader.before_class_unload { ActiveSupport::CurrentAttributes.clear_all }
|
||||
app.executor.to_run { ActiveSupport::CurrentAttributes.reset_all }
|
||||
app.executor.to_complete { ActiveSupport::CurrentAttributes.reset_all }
|
||||
end
|
||||
|
||||
initializer "active_support.deprecation_behavior" do |app|
|
||||
if deprecation = app.config.active_support.deprecation
|
||||
ActiveSupport::Deprecation.behavior = deprecation
|
||||
end
|
||||
end
|
||||
|
||||
# Sets the default value for Time.zone
|
||||
# If assigned value cannot be matched to a TimeZone, an exception will be raised.
|
||||
initializer "active_support.initialize_time_zone" do |app|
|
||||
begin
|
||||
TZInfo::DataSource.get
|
||||
rescue TZInfo::DataSourceNotFound => e
|
||||
raise e.exception "tzinfo-data is not present. Please add gem 'tzinfo-data' to your Gemfile and run bundle install"
|
||||
end
|
||||
require "active_support/core_ext/time/zones"
|
||||
Time.zone_default = Time.find_zone!(app.config.time_zone)
|
||||
end
|
||||
|
||||
# Sets the default week start
|
||||
# If assigned value is not a valid day symbol (e.g. :sunday, :monday, ...), an exception will be raised.
|
||||
initializer "active_support.initialize_beginning_of_week" do |app|
|
||||
require "active_support/core_ext/date/calculations"
|
||||
beginning_of_week_default = Date.find_beginning_of_week!(app.config.beginning_of_week)
|
||||
|
||||
Date.beginning_of_week_default = beginning_of_week_default
|
||||
end
|
||||
|
||||
initializer "active_support.require_master_key" do |app|
|
||||
if app.config.respond_to?(:require_master_key) && app.config.require_master_key
|
||||
begin
|
||||
app.credentials.key
|
||||
rescue ActiveSupport::EncryptedFile::MissingKeyError => error
|
||||
$stderr.puts error.message
|
||||
exit 1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
initializer "active_support.set_configs" do |app|
|
||||
app.config.active_support.each do |k, v|
|
||||
k = "#{k}="
|
||||
ActiveSupport.send(k, v) if ActiveSupport.respond_to? k
|
||||
end
|
||||
end
|
||||
|
||||
initializer "active_support.set_hash_digest_class" do |app|
|
||||
config.after_initialize do
|
||||
if app.config.active_support.use_sha1_digests
|
||||
ActiveSupport::Digest.hash_digest_class = ::Digest::SHA1
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,131 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/execution_wrapper"
|
||||
|
||||
module ActiveSupport
|
||||
#--
|
||||
# This class defines several callbacks:
|
||||
#
|
||||
# to_prepare -- Run once at application startup, and also from
|
||||
# +to_run+.
|
||||
#
|
||||
# to_run -- Run before a work run that is reloading. If
|
||||
# +reload_classes_only_on_change+ is true (the default), the class
|
||||
# unload will have already occurred.
|
||||
#
|
||||
# to_complete -- Run after a work run that has reloaded. If
|
||||
# +reload_classes_only_on_change+ is false, the class unload will
|
||||
# have occurred after the work run, but before this callback.
|
||||
#
|
||||
# before_class_unload -- Run immediately before the classes are
|
||||
# unloaded.
|
||||
#
|
||||
# after_class_unload -- Run immediately after the classes are
|
||||
# unloaded.
|
||||
#
|
||||
class Reloader < ExecutionWrapper
|
||||
define_callbacks :prepare
|
||||
|
||||
define_callbacks :class_unload
|
||||
|
||||
# Registers a callback that will run once at application startup and every time the code is reloaded.
|
||||
def self.to_prepare(*args, &block)
|
||||
set_callback(:prepare, *args, &block)
|
||||
end
|
||||
|
||||
# Registers a callback that will run immediately before the classes are unloaded.
|
||||
def self.before_class_unload(*args, &block)
|
||||
set_callback(:class_unload, *args, &block)
|
||||
end
|
||||
|
||||
# Registers a callback that will run immediately after the classes are unloaded.
|
||||
def self.after_class_unload(*args, &block)
|
||||
set_callback(:class_unload, :after, *args, &block)
|
||||
end
|
||||
|
||||
to_run(:after) { self.class.prepare! }
|
||||
|
||||
# Initiate a manual reload
|
||||
def self.reload!
|
||||
executor.wrap do
|
||||
new.tap do |instance|
|
||||
begin
|
||||
instance.run!
|
||||
ensure
|
||||
instance.complete!
|
||||
end
|
||||
end
|
||||
end
|
||||
prepare!
|
||||
end
|
||||
|
||||
def self.run! # :nodoc:
|
||||
if check!
|
||||
super
|
||||
else
|
||||
Null
|
||||
end
|
||||
end
|
||||
|
||||
# Run the supplied block as a work unit, reloading code as needed
|
||||
def self.wrap
|
||||
executor.wrap do
|
||||
super
|
||||
end
|
||||
end
|
||||
|
||||
class_attribute :executor, default: Executor
|
||||
class_attribute :check, default: lambda { false }
|
||||
|
||||
def self.check! # :nodoc:
|
||||
@should_reload ||= check.call
|
||||
end
|
||||
|
||||
def self.reloaded! # :nodoc:
|
||||
@should_reload = false
|
||||
end
|
||||
|
||||
def self.prepare! # :nodoc:
|
||||
new.run_callbacks(:prepare)
|
||||
end
|
||||
|
||||
def initialize
|
||||
super
|
||||
@locked = false
|
||||
end
|
||||
|
||||
# Acquire the ActiveSupport::Dependencies::Interlock unload lock,
|
||||
# ensuring it will be released automatically
|
||||
def require_unload_lock!
|
||||
unless @locked
|
||||
ActiveSupport::Dependencies.interlock.start_unloading
|
||||
@locked = true
|
||||
end
|
||||
end
|
||||
|
||||
# Release the unload lock if it has been previously obtained
|
||||
def release_unload_lock!
|
||||
if @locked
|
||||
@locked = false
|
||||
ActiveSupport::Dependencies.interlock.done_unloading
|
||||
end
|
||||
end
|
||||
|
||||
def run! # :nodoc:
|
||||
super
|
||||
release_unload_lock!
|
||||
end
|
||||
|
||||
def class_unload!(&block) # :nodoc:
|
||||
require_unload_lock!
|
||||
run_callbacks(:class_unload, &block)
|
||||
end
|
||||
|
||||
def complete! # :nodoc:
|
||||
super
|
||||
self.class.reloaded!
|
||||
ensure
|
||||
release_unload_lock!
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,174 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/concern"
|
||||
require "active_support/core_ext/class/attribute"
|
||||
require "active_support/core_ext/string/inflections"
|
||||
|
||||
module ActiveSupport
|
||||
# Rescuable module adds support for easier exception handling.
|
||||
module Rescuable
|
||||
extend Concern
|
||||
|
||||
included do
|
||||
class_attribute :rescue_handlers, default: []
|
||||
end
|
||||
|
||||
module ClassMethods
|
||||
# Rescue exceptions raised in controller actions.
|
||||
#
|
||||
# <tt>rescue_from</tt> receives a series of exception classes or class
|
||||
# names, and a trailing <tt>:with</tt> option with the name of a method
|
||||
# or a Proc object to be called to handle them. Alternatively a block can
|
||||
# be given.
|
||||
#
|
||||
# Handlers that take one argument will be called with the exception, so
|
||||
# that the exception can be inspected when dealing with it.
|
||||
#
|
||||
# Handlers are inherited. They are searched from right to left, from
|
||||
# bottom to top, and up the hierarchy. The handler of the first class for
|
||||
# which <tt>exception.is_a?(klass)</tt> holds true is the one invoked, if
|
||||
# any.
|
||||
#
|
||||
# class ApplicationController < ActionController::Base
|
||||
# rescue_from User::NotAuthorized, with: :deny_access # self defined exception
|
||||
# rescue_from ActiveRecord::RecordInvalid, with: :show_errors
|
||||
#
|
||||
# rescue_from 'MyAppError::Base' do |exception|
|
||||
# render xml: exception, status: 500
|
||||
# end
|
||||
#
|
||||
# private
|
||||
# def deny_access
|
||||
# ...
|
||||
# end
|
||||
#
|
||||
# def show_errors(exception)
|
||||
# exception.record.new_record? ? ...
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# Exceptions raised inside exception handlers are not propagated up.
|
||||
def rescue_from(*klasses, with: nil, &block)
|
||||
unless with
|
||||
if block_given?
|
||||
with = block
|
||||
else
|
||||
raise ArgumentError, "Need a handler. Pass the with: keyword argument or provide a block."
|
||||
end
|
||||
end
|
||||
|
||||
klasses.each do |klass|
|
||||
key = if klass.is_a?(Module) && klass.respond_to?(:===)
|
||||
klass.name
|
||||
elsif klass.is_a?(String)
|
||||
klass
|
||||
else
|
||||
raise ArgumentError, "#{klass.inspect} must be an Exception class or a String referencing an Exception class"
|
||||
end
|
||||
|
||||
# Put the new handler at the end because the list is read in reverse.
|
||||
self.rescue_handlers += [[key, with]]
|
||||
end
|
||||
end
|
||||
|
||||
# Matches an exception to a handler based on the exception class.
|
||||
#
|
||||
# If no handler matches the exception, check for a handler matching the
|
||||
# (optional) exception.cause. If no handler matches the exception or its
|
||||
# cause, this returns +nil+, so you can deal with unhandled exceptions.
|
||||
# Be sure to re-raise unhandled exceptions if this is what you expect.
|
||||
#
|
||||
# begin
|
||||
# …
|
||||
# rescue => exception
|
||||
# rescue_with_handler(exception) || raise
|
||||
# end
|
||||
#
|
||||
# Returns the exception if it was handled and +nil+ if it was not.
|
||||
def rescue_with_handler(exception, object: self, visited_exceptions: [])
|
||||
visited_exceptions << exception
|
||||
|
||||
if handler = handler_for_rescue(exception, object: object)
|
||||
handler.call exception
|
||||
exception
|
||||
elsif exception
|
||||
if visited_exceptions.include?(exception.cause)
|
||||
nil
|
||||
else
|
||||
rescue_with_handler(exception.cause, object: object, visited_exceptions: visited_exceptions)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def handler_for_rescue(exception, object: self) #:nodoc:
|
||||
case rescuer = find_rescue_handler(exception)
|
||||
when Symbol
|
||||
method = object.method(rescuer)
|
||||
if method.arity == 0
|
||||
-> e { method.call }
|
||||
else
|
||||
method
|
||||
end
|
||||
when Proc
|
||||
if rescuer.arity == 0
|
||||
-> e { object.instance_exec(&rescuer) }
|
||||
else
|
||||
-> e { object.instance_exec(e, &rescuer) }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
def find_rescue_handler(exception)
|
||||
if exception
|
||||
# Handlers are in order of declaration but the most recently declared
|
||||
# is the highest priority match, so we search for matching handlers
|
||||
# in reverse.
|
||||
_, handler = rescue_handlers.reverse_each.detect do |class_or_name, _|
|
||||
if klass = constantize_rescue_handler_class(class_or_name)
|
||||
klass === exception
|
||||
end
|
||||
end
|
||||
|
||||
handler
|
||||
end
|
||||
end
|
||||
|
||||
def constantize_rescue_handler_class(class_or_name)
|
||||
case class_or_name
|
||||
when String, Symbol
|
||||
begin
|
||||
# Try a lexical lookup first since we support
|
||||
#
|
||||
# class Super
|
||||
# rescue_from 'Error', with: …
|
||||
# end
|
||||
#
|
||||
# class Sub
|
||||
# class Error < StandardError; end
|
||||
# end
|
||||
#
|
||||
# so an Error raised in Sub will hit the 'Error' handler.
|
||||
const_get class_or_name
|
||||
rescue NameError
|
||||
class_or_name.safe_constantize
|
||||
end
|
||||
else
|
||||
class_or_name
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Delegates to the class method, but uses the instance as the subject for
|
||||
# rescue_from handlers (method calls, instance_exec blocks).
|
||||
def rescue_with_handler(exception)
|
||||
self.class.rescue_with_handler exception, object: self
|
||||
end
|
||||
|
||||
# Internal handler lookup. Delegates to class method. Some libraries call
|
||||
# this directly, so keeping it around for compatibility.
|
||||
def handler_for_rescue(exception) #:nodoc:
|
||||
self.class.handler_for_rescue exception, object: self
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,31 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "digest/sha2"
|
||||
|
||||
module ActiveSupport
|
||||
module SecurityUtils
|
||||
# Constant time string comparison, for fixed length strings.
|
||||
#
|
||||
# The values compared should be of fixed length, such as strings
|
||||
# that have already been processed by HMAC. Raises in case of length mismatch.
|
||||
def fixed_length_secure_compare(a, b)
|
||||
raise ArgumentError, "string length mismatch." unless a.bytesize == b.bytesize
|
||||
|
||||
l = a.unpack "C#{a.bytesize}"
|
||||
|
||||
res = 0
|
||||
b.each_byte { |byte| res |= byte ^ l.shift }
|
||||
res == 0
|
||||
end
|
||||
module_function :fixed_length_secure_compare
|
||||
|
||||
# Constant time string comparison, for variable length strings.
|
||||
#
|
||||
# The values are first processed by SHA256, so that we don't leak length info
|
||||
# via timing attacks.
|
||||
def secure_compare(a, b)
|
||||
fixed_length_secure_compare(::Digest::SHA256.hexdigest(a), ::Digest::SHA256.hexdigest(b)) && a == b
|
||||
end
|
||||
module_function :secure_compare
|
||||
end
|
||||
end
|
@ -0,0 +1,34 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module ActiveSupport
|
||||
# Wrapping a string in this class gives you a prettier way to test
|
||||
# for equality. The value returned by <tt>Rails.env</tt> is wrapped
|
||||
# in a StringInquirer object, so instead of calling this:
|
||||
#
|
||||
# Rails.env == 'production'
|
||||
#
|
||||
# you can call this:
|
||||
#
|
||||
# Rails.env.production?
|
||||
#
|
||||
# == Instantiating a new StringInquirer
|
||||
#
|
||||
# vehicle = ActiveSupport::StringInquirer.new('car')
|
||||
# vehicle.car? # => true
|
||||
# vehicle.bike? # => false
|
||||
class StringInquirer < String
|
||||
private
|
||||
|
||||
def respond_to_missing?(method_name, include_private = false)
|
||||
(method_name[-1] == "?") || super
|
||||
end
|
||||
|
||||
def method_missing(method_name, *arguments)
|
||||
if method_name[-1] == "?"
|
||||
self == method_name[0..-2]
|
||||
else
|
||||
super
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,130 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/per_thread_registry"
|
||||
require "active_support/notifications"
|
||||
|
||||
module ActiveSupport
|
||||
# ActiveSupport::Subscriber is an object set to consume
|
||||
# ActiveSupport::Notifications. The subscriber dispatches notifications to
|
||||
# a registered object based on its given namespace.
|
||||
#
|
||||
# An example would be an Active Record subscriber responsible for collecting
|
||||
# statistics about queries:
|
||||
#
|
||||
# module ActiveRecord
|
||||
# class StatsSubscriber < ActiveSupport::Subscriber
|
||||
# attach_to :active_record
|
||||
#
|
||||
# def sql(event)
|
||||
# Statsd.timing("sql.#{event.payload[:name]}", event.duration)
|
||||
# end
|
||||
# end
|
||||
# end
|
||||
#
|
||||
# After configured, whenever a "sql.active_record" notification is published,
|
||||
# it will properly dispatch the event (ActiveSupport::Notifications::Event) to
|
||||
# the +sql+ method.
|
||||
class Subscriber
|
||||
class << self
|
||||
# Attach the subscriber to a namespace.
|
||||
def attach_to(namespace, subscriber = new, notifier = ActiveSupport::Notifications)
|
||||
@namespace = namespace
|
||||
@subscriber = subscriber
|
||||
@notifier = notifier
|
||||
|
||||
subscribers << subscriber
|
||||
|
||||
# Add event subscribers for all existing methods on the class.
|
||||
subscriber.public_methods(false).each do |event|
|
||||
add_event_subscriber(event)
|
||||
end
|
||||
end
|
||||
|
||||
# Adds event subscribers for all new methods added to the class.
|
||||
def method_added(event)
|
||||
# Only public methods are added as subscribers, and only if a notifier
|
||||
# has been set up. This means that subscribers will only be set up for
|
||||
# classes that call #attach_to.
|
||||
if public_method_defined?(event) && notifier
|
||||
add_event_subscriber(event)
|
||||
end
|
||||
end
|
||||
|
||||
def subscribers
|
||||
@@subscribers ||= []
|
||||
end
|
||||
|
||||
# TODO Change this to private once we've dropped Ruby 2.2 support.
|
||||
# Workaround for Ruby 2.2 "private attribute?" warning.
|
||||
protected
|
||||
|
||||
attr_reader :subscriber, :notifier, :namespace
|
||||
|
||||
private
|
||||
|
||||
def add_event_subscriber(event) # :doc:
|
||||
return if %w{ start finish }.include?(event.to_s)
|
||||
|
||||
pattern = "#{event}.#{namespace}"
|
||||
|
||||
# Don't add multiple subscribers (eg. if methods are redefined).
|
||||
return if subscriber.patterns.include?(pattern)
|
||||
|
||||
subscriber.patterns << pattern
|
||||
notifier.subscribe(pattern, subscriber)
|
||||
end
|
||||
end
|
||||
|
||||
attr_reader :patterns # :nodoc:
|
||||
|
||||
def initialize
|
||||
@queue_key = [self.class.name, object_id].join "-"
|
||||
@patterns = []
|
||||
super
|
||||
end
|
||||
|
||||
def start(name, id, payload)
|
||||
e = ActiveSupport::Notifications::Event.new(name, now, nil, id, payload)
|
||||
parent = event_stack.last
|
||||
parent << e if parent
|
||||
|
||||
event_stack.push e
|
||||
end
|
||||
|
||||
def finish(name, id, payload)
|
||||
finished = now
|
||||
event = event_stack.pop
|
||||
event.end = finished
|
||||
event.payload.merge!(payload)
|
||||
|
||||
method = name.split(".".freeze).first
|
||||
send(method, event)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def event_stack
|
||||
SubscriberQueueRegistry.instance.get_queue(@queue_key)
|
||||
end
|
||||
|
||||
def now
|
||||
Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
||||
end
|
||||
end
|
||||
|
||||
# This is a registry for all the event stacks kept for subscribers.
|
||||
#
|
||||
# See the documentation of <tt>ActiveSupport::PerThreadRegistry</tt>
|
||||
# for further details.
|
||||
class SubscriberQueueRegistry # :nodoc:
|
||||
extend PerThreadRegistry
|
||||
|
||||
def initialize
|
||||
@registry = {}
|
||||
end
|
||||
|
||||
def get_queue(queue_key)
|
||||
@registry[queue_key] ||= []
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,79 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "active_support/core_ext/module/delegation"
|
||||
require "active_support/core_ext/object/blank"
|
||||
require "logger"
|
||||
require "active_support/logger"
|
||||
|
||||
module ActiveSupport
|
||||
# Wraps any standard Logger object to provide tagging capabilities.
|
||||
#
|
||||
# logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT))
|
||||
# logger.tagged('BCX') { logger.info 'Stuff' } # Logs "[BCX] Stuff"
|
||||
# logger.tagged('BCX', "Jason") { logger.info 'Stuff' } # Logs "[BCX] [Jason] Stuff"
|
||||
# logger.tagged('BCX') { logger.tagged('Jason') { logger.info 'Stuff' } } # Logs "[BCX] [Jason] Stuff"
|
||||
#
|
||||
# This is used by the default Rails.logger as configured by Railties to make
|
||||
# it easy to stamp log lines with subdomains, request ids, and anything else
|
||||
# to aid debugging of multi-user production applications.
|
||||
module TaggedLogging
|
||||
module Formatter # :nodoc:
|
||||
# This method is invoked when a log event occurs.
|
||||
def call(severity, timestamp, progname, msg)
|
||||
super(severity, timestamp, progname, "#{tags_text}#{msg}")
|
||||
end
|
||||
|
||||
def tagged(*tags)
|
||||
new_tags = push_tags(*tags)
|
||||
yield self
|
||||
ensure
|
||||
pop_tags(new_tags.size)
|
||||
end
|
||||
|
||||
def push_tags(*tags)
|
||||
tags.flatten.reject(&:blank?).tap do |new_tags|
|
||||
current_tags.concat new_tags
|
||||
end
|
||||
end
|
||||
|
||||
def pop_tags(size = 1)
|
||||
current_tags.pop size
|
||||
end
|
||||
|
||||
def clear_tags!
|
||||
current_tags.clear
|
||||
end
|
||||
|
||||
def current_tags
|
||||
# We use our object ID here to avoid conflicting with other instances
|
||||
thread_key = @thread_key ||= "activesupport_tagged_logging_tags:#{object_id}".freeze
|
||||
Thread.current[thread_key] ||= []
|
||||
end
|
||||
|
||||
def tags_text
|
||||
tags = current_tags
|
||||
if tags.any?
|
||||
tags.collect { |tag| "[#{tag}] " }.join
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def self.new(logger)
|
||||
# Ensure we set a default formatter so we aren't extending nil!
|
||||
logger.formatter ||= ActiveSupport::Logger::SimpleFormatter.new
|
||||
logger.formatter.extend Formatter
|
||||
logger.extend(self)
|
||||
end
|
||||
|
||||
delegate :push_tags, :pop_tags, :clear_tags!, to: :formatter
|
||||
|
||||
def tagged(*tags)
|
||||
formatter.tagged(*tags) { yield self }
|
||||
end
|
||||
|
||||
def flush
|
||||
clear_tags!
|
||||
super if defined?(super)
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,72 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
gem "minitest" # make sure we get the gem, not stdlib
|
||||
require "minitest"
|
||||
require "active_support/testing/tagged_logging"
|
||||
require "active_support/testing/setup_and_teardown"
|
||||
require "active_support/testing/assertions"
|
||||
require "active_support/testing/deprecation"
|
||||
require "active_support/testing/declarative"
|
||||
require "active_support/testing/isolation"
|
||||
require "active_support/testing/constant_lookup"
|
||||
require "active_support/testing/time_helpers"
|
||||
require "active_support/testing/file_fixtures"
|
||||
|
||||
module ActiveSupport
|
||||
class TestCase < ::Minitest::Test
|
||||
Assertion = Minitest::Assertion
|
||||
|
||||
class << self
|
||||
# Sets the order in which test cases are run.
|
||||
#
|
||||
# ActiveSupport::TestCase.test_order = :random # => :random
|
||||
#
|
||||
# Valid values are:
|
||||
# * +:random+ (to run tests in random order)
|
||||
# * +:parallel+ (to run tests in parallel)
|
||||
# * +:sorted+ (to run tests alphabetically by method name)
|
||||
# * +:alpha+ (equivalent to +:sorted+)
|
||||
def test_order=(new_order)
|
||||
ActiveSupport.test_order = new_order
|
||||
end
|
||||
|
||||
# Returns the order in which test cases are run.
|
||||
#
|
||||
# ActiveSupport::TestCase.test_order # => :random
|
||||
#
|
||||
# Possible values are +:random+, +:parallel+, +:alpha+, +:sorted+.
|
||||
# Defaults to +:random+.
|
||||
def test_order
|
||||
ActiveSupport.test_order ||= :random
|
||||
end
|
||||
end
|
||||
|
||||
alias_method :method_name, :name
|
||||
|
||||
include ActiveSupport::Testing::TaggedLogging
|
||||
prepend ActiveSupport::Testing::SetupAndTeardown
|
||||
include ActiveSupport::Testing::Assertions
|
||||
include ActiveSupport::Testing::Deprecation
|
||||
include ActiveSupport::Testing::TimeHelpers
|
||||
include ActiveSupport::Testing::FileFixtures
|
||||
extend ActiveSupport::Testing::Declarative
|
||||
|
||||
# test/unit backwards compatibility methods
|
||||
alias :assert_raise :assert_raises
|
||||
alias :assert_not_empty :refute_empty
|
||||
alias :assert_not_equal :refute_equal
|
||||
alias :assert_not_in_delta :refute_in_delta
|
||||
alias :assert_not_in_epsilon :refute_in_epsilon
|
||||
alias :assert_not_includes :refute_includes
|
||||
alias :assert_not_instance_of :refute_instance_of
|
||||
alias :assert_not_kind_of :refute_kind_of
|
||||
alias :assert_no_match :refute_match
|
||||
alias :assert_not_nil :refute_nil
|
||||
alias :assert_not_operator :refute_operator
|
||||
alias :assert_not_predicate :refute_predicate
|
||||
alias :assert_not_respond_to :refute_respond_to
|
||||
alias :assert_not_same :refute_same
|
||||
|
||||
ActiveSupport.run_load_hooks(:active_support_test_case, self)
|
||||
end
|
||||
end
|
Binary file not shown.
@ -0,0 +1,10 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require_relative "gem_version"
|
||||
|
||||
module ActiveSupport
|
||||
# Returns the version of the currently loaded ActiveSupport as a <tt>Gem::Version</tt>
|
||||
def self.version
|
||||
gem_version
|
||||
end
|
||||
end
|
@ -0,0 +1,183 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
raise "JRuby is required to use the JDOM backend for XmlMini" unless RUBY_PLATFORM.include?("java")
|
||||
|
||||
require "jruby"
|
||||
include Java
|
||||
|
||||
require "active_support/core_ext/object/blank"
|
||||
|
||||
java_import javax.xml.parsers.DocumentBuilder unless defined? DocumentBuilder
|
||||
java_import javax.xml.parsers.DocumentBuilderFactory unless defined? DocumentBuilderFactory
|
||||
java_import java.io.StringReader unless defined? StringReader
|
||||
java_import org.xml.sax.InputSource unless defined? InputSource
|
||||
java_import org.xml.sax.Attributes unless defined? Attributes
|
||||
java_import org.w3c.dom.Node unless defined? Node
|
||||
|
||||
module ActiveSupport
|
||||
module XmlMini_JDOM #:nodoc:
|
||||
extend self
|
||||
|
||||
CONTENT_KEY = "__content__".freeze
|
||||
|
||||
NODE_TYPE_NAMES = %w{ATTRIBUTE_NODE CDATA_SECTION_NODE COMMENT_NODE DOCUMENT_FRAGMENT_NODE
|
||||
DOCUMENT_NODE DOCUMENT_TYPE_NODE ELEMENT_NODE ENTITY_NODE ENTITY_REFERENCE_NODE NOTATION_NODE
|
||||
PROCESSING_INSTRUCTION_NODE TEXT_NODE}
|
||||
|
||||
node_type_map = {}
|
||||
NODE_TYPE_NAMES.each { |type| node_type_map[Node.send(type)] = type }
|
||||
|
||||
# Parse an XML Document string or IO into a simple hash using Java's jdom.
|
||||
# data::
|
||||
# XML Document string or IO to parse
|
||||
def parse(data)
|
||||
if data.respond_to?(:read)
|
||||
data = data.read
|
||||
end
|
||||
|
||||
if data.blank?
|
||||
{}
|
||||
else
|
||||
@dbf = DocumentBuilderFactory.new_instance
|
||||
# secure processing of java xml
|
||||
# https://archive.is/9xcQQ
|
||||
@dbf.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false)
|
||||
@dbf.setFeature("http://xml.org/sax/features/external-general-entities", false)
|
||||
@dbf.setFeature("http://xml.org/sax/features/external-parameter-entities", false)
|
||||
@dbf.setFeature(javax.xml.XMLConstants::FEATURE_SECURE_PROCESSING, true)
|
||||
xml_string_reader = StringReader.new(data)
|
||||
xml_input_source = InputSource.new(xml_string_reader)
|
||||
doc = @dbf.new_document_builder.parse(xml_input_source)
|
||||
merge_element!({ CONTENT_KEY => "" }, doc.document_element, XmlMini.depth)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Convert an XML element and merge into the hash
|
||||
#
|
||||
# hash::
|
||||
# Hash to merge the converted element into.
|
||||
# element::
|
||||
# XML element to merge into hash
|
||||
def merge_element!(hash, element, depth)
|
||||
raise "Document too deep!" if depth == 0
|
||||
delete_empty(hash)
|
||||
merge!(hash, element.tag_name, collapse(element, depth))
|
||||
end
|
||||
|
||||
def delete_empty(hash)
|
||||
hash.delete(CONTENT_KEY) if hash[CONTENT_KEY] == ""
|
||||
end
|
||||
|
||||
# Actually converts an XML document element into a data structure.
|
||||
#
|
||||
# element::
|
||||
# The document element to be collapsed.
|
||||
def collapse(element, depth)
|
||||
hash = get_attributes(element)
|
||||
|
||||
child_nodes = element.child_nodes
|
||||
if child_nodes.length > 0
|
||||
(0...child_nodes.length).each do |i|
|
||||
child = child_nodes.item(i)
|
||||
merge_element!(hash, child, depth - 1) unless child.node_type == Node.TEXT_NODE
|
||||
end
|
||||
merge_texts!(hash, element) unless empty_content?(element)
|
||||
hash
|
||||
else
|
||||
merge_texts!(hash, element)
|
||||
end
|
||||
end
|
||||
|
||||
# Merge all the texts of an element into the hash
|
||||
#
|
||||
# hash::
|
||||
# Hash to add the converted element to.
|
||||
# element::
|
||||
# XML element whose texts are to me merged into the hash
|
||||
def merge_texts!(hash, element)
|
||||
delete_empty(hash)
|
||||
text_children = texts(element)
|
||||
if text_children.join.empty?
|
||||
hash
|
||||
else
|
||||
# must use value to prevent double-escaping
|
||||
merge!(hash, CONTENT_KEY, text_children.join)
|
||||
end
|
||||
end
|
||||
|
||||
# Adds a new key/value pair to an existing Hash. If the key to be added
|
||||
# already exists and the existing value associated with key is not
|
||||
# an Array, it will be wrapped in an Array. Then the new value is
|
||||
# appended to that Array.
|
||||
#
|
||||
# hash::
|
||||
# Hash to add key/value pair to.
|
||||
# key::
|
||||
# Key to be added.
|
||||
# value::
|
||||
# Value to be associated with key.
|
||||
def merge!(hash, key, value)
|
||||
if hash.has_key?(key)
|
||||
if hash[key].instance_of?(Array)
|
||||
hash[key] << value
|
||||
else
|
||||
hash[key] = [hash[key], value]
|
||||
end
|
||||
elsif value.instance_of?(Array)
|
||||
hash[key] = [value]
|
||||
else
|
||||
hash[key] = value
|
||||
end
|
||||
hash
|
||||
end
|
||||
|
||||
# Converts the attributes array of an XML element into a hash.
|
||||
# Returns an empty Hash if node has no attributes.
|
||||
#
|
||||
# element::
|
||||
# XML element to extract attributes from.
|
||||
def get_attributes(element)
|
||||
attribute_hash = {}
|
||||
attributes = element.attributes
|
||||
(0...attributes.length).each do |i|
|
||||
attribute_hash[CONTENT_KEY] ||= ""
|
||||
attribute_hash[attributes.item(i).name] = attributes.item(i).value
|
||||
end
|
||||
attribute_hash
|
||||
end
|
||||
|
||||
# Determines if a document element has text content
|
||||
#
|
||||
# element::
|
||||
# XML element to be checked.
|
||||
def texts(element)
|
||||
texts = []
|
||||
child_nodes = element.child_nodes
|
||||
(0...child_nodes.length).each do |i|
|
||||
item = child_nodes.item(i)
|
||||
if item.node_type == Node.TEXT_NODE
|
||||
texts << item.get_data
|
||||
end
|
||||
end
|
||||
texts
|
||||
end
|
||||
|
||||
# Determines if a document element has text content
|
||||
#
|
||||
# element::
|
||||
# XML element to be checked.
|
||||
def empty_content?(element)
|
||||
text = "".dup
|
||||
child_nodes = element.child_nodes
|
||||
(0...child_nodes.length).each do |i|
|
||||
item = child_nodes.item(i)
|
||||
if item.node_type == Node.TEXT_NODE
|
||||
text << item.get_data.strip
|
||||
end
|
||||
end
|
||||
text.strip.length == 0
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,80 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "libxml"
|
||||
require "active_support/core_ext/object/blank"
|
||||
require "stringio"
|
||||
|
||||
module ActiveSupport
|
||||
module XmlMini_LibXML #:nodoc:
|
||||
extend self
|
||||
|
||||
# Parse an XML Document string or IO into a simple hash using libxml.
|
||||
# data::
|
||||
# XML Document string or IO to parse
|
||||
def parse(data)
|
||||
if !data.respond_to?(:read)
|
||||
data = StringIO.new(data || "")
|
||||
end
|
||||
|
||||
if data.eof?
|
||||
{}
|
||||
else
|
||||
LibXML::XML::Parser.io(data).parse.to_hash
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
module LibXML #:nodoc:
|
||||
module Conversions #:nodoc:
|
||||
module Document #:nodoc:
|
||||
def to_hash
|
||||
root.to_hash
|
||||
end
|
||||
end
|
||||
|
||||
module Node #:nodoc:
|
||||
CONTENT_ROOT = "__content__".freeze
|
||||
|
||||
# Convert XML document to hash.
|
||||
#
|
||||
# hash::
|
||||
# Hash to merge the converted element into.
|
||||
def to_hash(hash = {})
|
||||
node_hash = {}
|
||||
|
||||
# Insert node hash into parent hash correctly.
|
||||
case hash[name]
|
||||
when Array then hash[name] << node_hash
|
||||
when Hash then hash[name] = [hash[name], node_hash]
|
||||
when nil then hash[name] = node_hash
|
||||
end
|
||||
|
||||
# Handle child elements
|
||||
each_child do |c|
|
||||
if c.element?
|
||||
c.to_hash(node_hash)
|
||||
elsif c.text? || c.cdata?
|
||||
node_hash[CONTENT_ROOT] ||= "".dup
|
||||
node_hash[CONTENT_ROOT] << c.content
|
||||
end
|
||||
end
|
||||
|
||||
# Remove content node if it is blank
|
||||
if node_hash.length > 1 && node_hash[CONTENT_ROOT].blank?
|
||||
node_hash.delete(CONTENT_ROOT)
|
||||
end
|
||||
|
||||
# Handle attributes
|
||||
each_attr { |a| node_hash[a.name] = a.value }
|
||||
|
||||
hash
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# :enddoc:
|
||||
|
||||
LibXML::XML::Document.include(LibXML::Conversions::Document)
|
||||
LibXML::XML::Node.include(LibXML::Conversions::Node)
|
@ -0,0 +1,83 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "libxml"
|
||||
require "active_support/core_ext/object/blank"
|
||||
require "stringio"
|
||||
|
||||
module ActiveSupport
|
||||
module XmlMini_LibXMLSAX #:nodoc:
|
||||
extend self
|
||||
|
||||
# Class that will build the hash while the XML document
|
||||
# is being parsed using SAX events.
|
||||
class HashBuilder
|
||||
include LibXML::XML::SaxParser::Callbacks
|
||||
|
||||
CONTENT_KEY = "__content__".freeze
|
||||
HASH_SIZE_KEY = "__hash_size__".freeze
|
||||
|
||||
attr_reader :hash
|
||||
|
||||
def current_hash
|
||||
@hash_stack.last
|
||||
end
|
||||
|
||||
def on_start_document
|
||||
@hash = { CONTENT_KEY => "".dup }
|
||||
@hash_stack = [@hash]
|
||||
end
|
||||
|
||||
def on_end_document
|
||||
@hash = @hash_stack.pop
|
||||
@hash.delete(CONTENT_KEY)
|
||||
end
|
||||
|
||||
def on_start_element(name, attrs = {})
|
||||
new_hash = { CONTENT_KEY => "".dup }.merge!(attrs)
|
||||
new_hash[HASH_SIZE_KEY] = new_hash.size + 1
|
||||
|
||||
case current_hash[name]
|
||||
when Array then current_hash[name] << new_hash
|
||||
when Hash then current_hash[name] = [current_hash[name], new_hash]
|
||||
when nil then current_hash[name] = new_hash
|
||||
end
|
||||
|
||||
@hash_stack.push(new_hash)
|
||||
end
|
||||
|
||||
def on_end_element(name)
|
||||
if current_hash.length > current_hash.delete(HASH_SIZE_KEY) && current_hash[CONTENT_KEY].blank? || current_hash[CONTENT_KEY] == ""
|
||||
current_hash.delete(CONTENT_KEY)
|
||||
end
|
||||
@hash_stack.pop
|
||||
end
|
||||
|
||||
def on_characters(string)
|
||||
current_hash[CONTENT_KEY] << string
|
||||
end
|
||||
|
||||
alias_method :on_cdata_block, :on_characters
|
||||
end
|
||||
|
||||
attr_accessor :document_class
|
||||
self.document_class = HashBuilder
|
||||
|
||||
def parse(data)
|
||||
if !data.respond_to?(:read)
|
||||
data = StringIO.new(data || "")
|
||||
end
|
||||
|
||||
if data.eof?
|
||||
{}
|
||||
else
|
||||
LibXML::XML::Error.set_handler(&LibXML::XML::Error::QUIET_HANDLER)
|
||||
parser = LibXML::XML::SaxParser.io(data)
|
||||
document = document_class.new
|
||||
|
||||
parser.callbacks = document
|
||||
parser.parse
|
||||
document.hash
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,83 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
begin
|
||||
require "nokogiri"
|
||||
rescue LoadError => e
|
||||
$stderr.puts "You don't have nokogiri installed in your application. Please add it to your Gemfile and run bundle install"
|
||||
raise e
|
||||
end
|
||||
require "active_support/core_ext/object/blank"
|
||||
require "stringio"
|
||||
|
||||
module ActiveSupport
|
||||
module XmlMini_Nokogiri #:nodoc:
|
||||
extend self
|
||||
|
||||
# Parse an XML Document string or IO into a simple hash using libxml / nokogiri.
|
||||
# data::
|
||||
# XML Document string or IO to parse
|
||||
def parse(data)
|
||||
if !data.respond_to?(:read)
|
||||
data = StringIO.new(data || "")
|
||||
end
|
||||
|
||||
if data.eof?
|
||||
{}
|
||||
else
|
||||
doc = Nokogiri::XML(data)
|
||||
raise doc.errors.first if doc.errors.length > 0
|
||||
doc.to_hash
|
||||
end
|
||||
end
|
||||
|
||||
module Conversions #:nodoc:
|
||||
module Document #:nodoc:
|
||||
def to_hash
|
||||
root.to_hash
|
||||
end
|
||||
end
|
||||
|
||||
module Node #:nodoc:
|
||||
CONTENT_ROOT = "__content__".freeze
|
||||
|
||||
# Convert XML document to hash.
|
||||
#
|
||||
# hash::
|
||||
# Hash to merge the converted element into.
|
||||
def to_hash(hash = {})
|
||||
node_hash = {}
|
||||
|
||||
# Insert node hash into parent hash correctly.
|
||||
case hash[name]
|
||||
when Array then hash[name] << node_hash
|
||||
when Hash then hash[name] = [hash[name], node_hash]
|
||||
when nil then hash[name] = node_hash
|
||||
end
|
||||
|
||||
# Handle child elements
|
||||
children.each do |c|
|
||||
if c.element?
|
||||
c.to_hash(node_hash)
|
||||
elsif c.text? || c.cdata?
|
||||
node_hash[CONTENT_ROOT] ||= "".dup
|
||||
node_hash[CONTENT_ROOT] << c.content
|
||||
end
|
||||
end
|
||||
|
||||
# Remove content node if it is blank and there are child tags
|
||||
if node_hash.length > 1 && node_hash[CONTENT_ROOT].blank?
|
||||
node_hash.delete(CONTENT_ROOT)
|
||||
end
|
||||
|
||||
# Handle attributes
|
||||
attribute_nodes.each { |a| node_hash[a.node_name] = a.value }
|
||||
|
||||
hash
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
Nokogiri::XML::Document.include(Conversions::Document)
|
||||
Nokogiri::XML::Node.include(Conversions::Node)
|
||||
end
|
||||
end
|
@ -0,0 +1,86 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
begin
|
||||
require "nokogiri"
|
||||
rescue LoadError => e
|
||||
$stderr.puts "You don't have nokogiri installed in your application. Please add it to your Gemfile and run bundle install"
|
||||
raise e
|
||||
end
|
||||
require "active_support/core_ext/object/blank"
|
||||
require "stringio"
|
||||
|
||||
module ActiveSupport
|
||||
module XmlMini_NokogiriSAX #:nodoc:
|
||||
extend self
|
||||
|
||||
# Class that will build the hash while the XML document
|
||||
# is being parsed using SAX events.
|
||||
class HashBuilder < Nokogiri::XML::SAX::Document
|
||||
CONTENT_KEY = "__content__".freeze
|
||||
HASH_SIZE_KEY = "__hash_size__".freeze
|
||||
|
||||
attr_reader :hash
|
||||
|
||||
def current_hash
|
||||
@hash_stack.last
|
||||
end
|
||||
|
||||
def start_document
|
||||
@hash = {}
|
||||
@hash_stack = [@hash]
|
||||
end
|
||||
|
||||
def end_document
|
||||
raise "Parse stack not empty!" if @hash_stack.size > 1
|
||||
end
|
||||
|
||||
def error(error_message)
|
||||
raise error_message
|
||||
end
|
||||
|
||||
def start_element(name, attrs = [])
|
||||
new_hash = { CONTENT_KEY => "".dup }.merge!(Hash[attrs])
|
||||
new_hash[HASH_SIZE_KEY] = new_hash.size + 1
|
||||
|
||||
case current_hash[name]
|
||||
when Array then current_hash[name] << new_hash
|
||||
when Hash then current_hash[name] = [current_hash[name], new_hash]
|
||||
when nil then current_hash[name] = new_hash
|
||||
end
|
||||
|
||||
@hash_stack.push(new_hash)
|
||||
end
|
||||
|
||||
def end_element(name)
|
||||
if current_hash.length > current_hash.delete(HASH_SIZE_KEY) && current_hash[CONTENT_KEY].blank? || current_hash[CONTENT_KEY] == ""
|
||||
current_hash.delete(CONTENT_KEY)
|
||||
end
|
||||
@hash_stack.pop
|
||||
end
|
||||
|
||||
def characters(string)
|
||||
current_hash[CONTENT_KEY] << string
|
||||
end
|
||||
|
||||
alias_method :cdata_block, :characters
|
||||
end
|
||||
|
||||
attr_accessor :document_class
|
||||
self.document_class = HashBuilder
|
||||
|
||||
def parse(data)
|
||||
if !data.respond_to?(:read)
|
||||
data = StringIO.new(data || "")
|
||||
end
|
||||
|
||||
if data.eof?
|
||||
{}
|
||||
else
|
||||
document = document_class.new
|
||||
parser = Nokogiri::XML::SAX::Parser.new(document)
|
||||
parser.parse(data)
|
||||
document.hash
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
3
Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/backports-3.11.4/lib/backports.rb
vendored
Normal file
3
Library/Homebrew/vendor/bundle-standalone/ruby/2.3.0/gems/backports-3.11.4/lib/backports.rb
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
require "backports/version"
|
||||
require "backports/2.4"
|
||||
require "backports/rails"
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,33 @@
|
||||
unless String.method_defined? :delete_suffix
|
||||
require 'backports/tools/arguments'
|
||||
|
||||
class String
|
||||
def delete_suffix(suffix)
|
||||
suffix = Backports.coerce_to_str(suffix)
|
||||
len = suffix.length
|
||||
if len > 0 && index(suffix, -len)
|
||||
self[0...-len]
|
||||
else
|
||||
dup
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
unless String.method_defined? :delete_suffix!
|
||||
require 'backports/tools/arguments'
|
||||
|
||||
class String
|
||||
def delete_suffix!(suffix)
|
||||
suffix = Backports.coerce_to_str(suffix)
|
||||
chomp! if frozen?
|
||||
len = suffix.length
|
||||
if len > 0 && index(suffix, -len)
|
||||
self[-len..-1] = ''
|
||||
self
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,3 @@
|
||||
require 'backports/tools/require_relative_dir'
|
||||
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,53 @@
|
||||
# Note: Must be required explicitely!
|
||||
# This is a best attempt to fake BasicObject in Ruby 1.8.x
|
||||
# What you do get:
|
||||
# * as few methods as the real BasicObject (at the moment the library is required...)
|
||||
# * BasicObject === <anything> # ==> returns true
|
||||
# What you don't get:
|
||||
# * BasicObject is not in the ancestor list of all classes and thus
|
||||
# * Comparisons between classes won't work, e.g.
|
||||
# Object < BasicObject # ==> returns true instead of false
|
||||
# * Instance methods added to Object or Kernel after you require 'backports/basic_object'
|
||||
# might also be available in instances of BasicObject and subclasses
|
||||
# (they will only be undefined whenever a subclass of BasicObject is created)
|
||||
# Because of all the fineprint, BasicObject must be required explicitely
|
||||
|
||||
unless Object.const_defined? :BasicObject
|
||||
|
||||
class BasicObject
|
||||
KEEP = %w[== equal? ! != instance_eval instance_exec __send__]
|
||||
KEEP.concat KEEP.map { |e| e.to_sym }
|
||||
|
||||
# undefine almost all instance methods
|
||||
begin
|
||||
old_verbose, $VERBOSE = $VERBOSE, nil # silence the warning for undefining __id__
|
||||
(instance_methods - KEEP).each do |method|
|
||||
undef_method method
|
||||
end
|
||||
ensure
|
||||
$VERBOSE = old_verbose
|
||||
end
|
||||
|
||||
class << self
|
||||
def === (cmp)
|
||||
true
|
||||
end
|
||||
|
||||
# Let's try to keep things clean, in case methods have been added to Object
|
||||
# either directly or through an included module.
|
||||
# We'll do this whenever a class is derived from BasicObject
|
||||
# Ideally, we'd do this by trapping Object.method_added
|
||||
# and M.method_added for any module M included in Object or a submodule
|
||||
# Seems really though to get right, but pull requests welcome ;-)
|
||||
def inherited(sub)
|
||||
BasicObject.class_eval do
|
||||
(instance_methods - KEEP).each do |method|
|
||||
if Object.method_defined?(method) && instance_method(method).owner == Object.instance_method(method).owner
|
||||
undef_method method
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,24 @@
|
||||
# Will intercept future and past 'require' calls of std_lib
|
||||
# and load additionally the updated libraries.
|
||||
require 'backports/tools/std_lib'
|
||||
require 'backports/tools/alias_method_chain'
|
||||
|
||||
module Kernel
|
||||
def require_with_backports(lib)
|
||||
begin
|
||||
return false unless require_without_backports(lib)
|
||||
paths = Backports::StdLib.extended_lib.fetch(lib, nil)
|
||||
rescue LoadError
|
||||
return false if Backports::StdLib::LoadedFeatures.new.include?(lib)
|
||||
raise unless paths = Backports::StdLib.extended_lib.fetch(lib, nil)
|
||||
Backports::StdLib::LoadedFeatures.mark_as_loaded(lib)
|
||||
end
|
||||
if paths
|
||||
paths.each do |path|
|
||||
require_without_backports(path)
|
||||
end
|
||||
end
|
||||
true
|
||||
end
|
||||
Backports.alias_method_chain self, :require, :backports
|
||||
end
|
@ -0,0 +1,3 @@
|
||||
# Methods used internally by the backports.
|
||||
require 'backports/tools/require_relative_dir'
|
||||
Backports.require_relative_dir
|
@ -0,0 +1,8 @@
|
||||
module Backports
|
||||
# Safe alias_method that will only alias if the source exists and destination doesn't
|
||||
def self.alias_method(mod, new_name, old_name)
|
||||
mod.instance_eval do
|
||||
alias_method new_name, old_name
|
||||
end if mod.method_defined?(old_name) && !mod.method_defined?(new_name)
|
||||
end
|
||||
end
|
@ -0,0 +1,26 @@
|
||||
module Backports
|
||||
# Modified to avoid polluting Module if so desired
|
||||
# (from Rails)
|
||||
def self.alias_method_chain(mod, target, feature)
|
||||
mod.class_eval do
|
||||
# Strip out punctuation on predicates or bang methods since
|
||||
# e.g. target?_without_feature is not a valid method name.
|
||||
aliased_target, punctuation = target.to_s.sub(/([?!=])$/, ''), $1
|
||||
yield(aliased_target, punctuation) if block_given?
|
||||
|
||||
with_method, without_method = "#{aliased_target}_with_#{feature}#{punctuation}", "#{aliased_target}_without_#{feature}#{punctuation}"
|
||||
|
||||
alias_method without_method, target
|
||||
alias_method target, with_method
|
||||
|
||||
case
|
||||
when public_method_defined?(without_method)
|
||||
public target
|
||||
when protected_method_defined?(without_method)
|
||||
protected target
|
||||
when private_method_defined?(without_method)
|
||||
private target
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,12 @@
|
||||
module Backports
|
||||
MOST_EXTREME_OBJECT_EVER = Object.new # :nodoc:
|
||||
class << MOST_EXTREME_OBJECT_EVER
|
||||
def <(whatever)
|
||||
true
|
||||
end
|
||||
|
||||
def >(whatever)
|
||||
true
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,17 @@
|
||||
module Backports
|
||||
class << self
|
||||
def float_to_integer(float)
|
||||
map_via_packing(float, 'D', 'q')
|
||||
end
|
||||
|
||||
def integer_to_float(int)
|
||||
map_via_packing(int, 'q', 'D')
|
||||
end
|
||||
|
||||
private
|
||||
def map_via_packing(nb, pack, unpack)
|
||||
result, = [nb.abs].pack(pack).unpack(unpack)
|
||||
nb < 0 ? -result : result
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,57 @@
|
||||
require 'backports/tools/arguments'
|
||||
module Backports
|
||||
# Used internally to combine {IO|File} options hash into mode (String or Integer)
|
||||
def self.combine_mode_and_option(mode = nil, options = Backports::Undefined)
|
||||
# Can't backport autoclose, {internal|external|}encoding
|
||||
mode, options = nil, mode if mode.respond_to?(:to_hash) && options == Backports::Undefined
|
||||
options = {} if options == nil || options == Backports::Undefined
|
||||
options = coerce_to_hash(options)
|
||||
if mode && options[:mode]
|
||||
raise ArgumentError, "mode specified twice"
|
||||
end
|
||||
mode ||= options[:mode] || "r"
|
||||
mode = try_convert(mode, String, :to_str) || try_convert(mode, Integer, :to_int) || mode
|
||||
if options[:textmode] || options[:binmode]
|
||||
text = options[:textmode] || (mode.is_a?(String) && mode =~ /t/)
|
||||
bin = options[:binmode] || (mode.is_a?(String) ? mode =~ /b/ : mode & File::Constants::BINARY != 0)
|
||||
if text && bin
|
||||
raise ArgumentError, "both textmode and binmode specified"
|
||||
end
|
||||
case
|
||||
when !options[:binmode]
|
||||
when mode.is_a?(String)
|
||||
mode.insert(1, "b")
|
||||
else
|
||||
mode |= File::Constants::BINARY
|
||||
end
|
||||
end
|
||||
mode
|
||||
end
|
||||
|
||||
# Used internally to combine {IO|File} options hash into mode (String or Integer) and perm
|
||||
def self.combine_mode_perm_and_option(mode = nil, perm = Backports::Undefined, options = Backports::Undefined)
|
||||
mode, options = nil, mode if mode.respond_to?(:to_hash) && perm == Backports::Undefined
|
||||
perm, options = nil, perm if perm.respond_to?(:to_hash) && options == Backports::Undefined
|
||||
perm = nil if perm == Backports::Undefined
|
||||
options = {} if options == Backports::Undefined
|
||||
options = coerce_to_hash(options)
|
||||
if perm && options[:perm]
|
||||
raise ArgumentError, "perm specified twice"
|
||||
end
|
||||
[combine_mode_and_option(mode, options), perm || options[:perm]]
|
||||
end
|
||||
|
||||
def self.write(binary, filename, string, offset, options)
|
||||
offset, options = nil, offset if offset.respond_to?(:to_hash) && options == Backports::Undefined
|
||||
options = {} if options == Backports::Undefined
|
||||
options = coerce_to_hash(options)
|
||||
File.open(filename, 'a+'){} if offset # insure existence
|
||||
options = {:mode => offset.nil? ? "w" : "r+"}.merge(options)
|
||||
args = options[:open_args] || [options]
|
||||
File.open(filename, *Backports.combine_mode_perm_and_option(*args)) do |f|
|
||||
f.binmode if binary && f.respond_to?(:binmode)
|
||||
f.seek(offset) unless offset.nil?
|
||||
f.write(string)
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,43 @@
|
||||
require 'backports/tools/alias_method_chain'
|
||||
|
||||
module Backports
|
||||
# Metaprogramming utility to make block optional.
|
||||
# Tests first if block is already optional when given options
|
||||
def self.make_block_optional(mod, *methods)
|
||||
mod = class << mod; self; end unless mod.is_a? Module
|
||||
options = methods.last.is_a?(Hash) ? methods.pop : {}
|
||||
methods.each do |selector|
|
||||
unless mod.method_defined? selector
|
||||
warn "#{mod}##{selector} is not defined, so block can't be made optional"
|
||||
next
|
||||
end
|
||||
unless options[:force]
|
||||
# Check if needed
|
||||
test_on = options.fetch(:test_on)
|
||||
result = begin
|
||||
test_on.send(selector, *options.fetch(:arg, []))
|
||||
rescue LocalJumpError
|
||||
false
|
||||
end
|
||||
next if result.class.name =~ /Enumerator/
|
||||
end
|
||||
require 'enumerator'
|
||||
arity = mod.instance_method(selector).arity
|
||||
last_arg = []
|
||||
if arity < 0
|
||||
last_arg = ["*rest"]
|
||||
arity = -1-arity
|
||||
end
|
||||
arg_sequence = ((0...arity).map{|i| "arg_#{i}"} + last_arg + ["&block"]).join(", ")
|
||||
|
||||
alias_method_chain(mod, selector, :optional_block) do |aliased_target, punctuation|
|
||||
mod.module_eval <<-end_eval, __FILE__, __LINE__ + 1
|
||||
def #{aliased_target}_with_optional_block#{punctuation}(#{arg_sequence})
|
||||
return to_enum(:#{aliased_target}_without_optional_block#{punctuation}, #{arg_sequence}) unless block_given?
|
||||
#{aliased_target}_without_optional_block#{punctuation}(#{arg_sequence})
|
||||
end
|
||||
end_eval
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,55 @@
|
||||
require 'backports/tools/alias_method_chain'
|
||||
require 'backports/tools/arguments'
|
||||
|
||||
module Backports
|
||||
# Metaprogramming utility to convert the first file argument to path
|
||||
def self.convert_first_argument_to_path(klass, selector)
|
||||
mod = class << klass; self; end
|
||||
unless mod.method_defined? selector
|
||||
warn "#{mod}##{selector} is not defined, so argument can't converted to path"
|
||||
return
|
||||
end
|
||||
arity = mod.instance_method(selector).arity
|
||||
last_arg = []
|
||||
if arity < 0
|
||||
last_arg = ["*rest"]
|
||||
arity = -1-arity
|
||||
end
|
||||
arg_sequence = (["file"] + (1...arity).map{|i| "arg_#{i}"} + last_arg + ["&block"]).join(", ")
|
||||
|
||||
alias_method_chain(mod, selector, :potential_path_argument) do |aliased_target, punctuation|
|
||||
mod.module_eval <<-end_eval, __FILE__, __LINE__ + 1
|
||||
def #{aliased_target}_with_potential_path_argument#{punctuation}(#{arg_sequence})
|
||||
file = Backports.convert_path(file)
|
||||
#{aliased_target}_without_potential_path_argument#{punctuation}(#{arg_sequence})
|
||||
end
|
||||
end_eval
|
||||
end
|
||||
end
|
||||
|
||||
# Metaprogramming utility to convert all file arguments to paths
|
||||
def self.convert_all_arguments_to_path(klass, selector, skip)
|
||||
mod = class << klass; self; end
|
||||
unless mod.method_defined? selector
|
||||
warn "#{mod}##{selector} is not defined, so arguments can't converted to path"
|
||||
return
|
||||
end
|
||||
first_args = (1..skip).map{|i| "arg_#{i}"}.join(",") + (skip > 0 ? "," : "")
|
||||
alias_method_chain(mod, selector, :potential_path_arguments) do |aliased_target, punctuation|
|
||||
mod.module_eval <<-end_eval, __FILE__, __LINE__ + 1
|
||||
def #{aliased_target}_with_potential_path_arguments#{punctuation}(#{first_args}*files, &block)
|
||||
files = files.map{|f| Backports.convert_path(f) }
|
||||
#{aliased_target}_without_potential_path_arguments#{punctuation}(#{first_args}*files, &block)
|
||||
end
|
||||
end_eval
|
||||
end
|
||||
end
|
||||
|
||||
def self.convert_path(path)
|
||||
try_convert(path, IO, :to_io) ||
|
||||
begin
|
||||
path = path.to_path if path.respond_to?(:to_path)
|
||||
try_convert(path, String, :to_str) || path
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,13 @@
|
||||
module Backports
|
||||
def self.require_relative_dir
|
||||
dir = caller.first.split(/\.rb:\d/,2).first
|
||||
short_path = dir[/.*(backports\/.*)/, 1] << '/'
|
||||
Dir.entries(dir).
|
||||
map{|f| Regexp.last_match(1) if /^(.*)\.rb$/ =~ f}.
|
||||
compact.
|
||||
sort.
|
||||
each do |f|
|
||||
require short_path + f
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,59 @@
|
||||
module Backports
|
||||
module StdLib
|
||||
class LoadedFeatures
|
||||
if RUBY_VERSION >= "1.9"
|
||||
# Full paths are recorded in $LOADED_FEATURES.
|
||||
@@our_loads = {}
|
||||
# Check loaded features for one that matches "#{any of the load path}/#{feature}"
|
||||
def include?(feature)
|
||||
return true if @@our_loads[feature]
|
||||
# Assume backported features are Ruby libraries (i.e. not C)
|
||||
@loaded ||= $LOADED_FEATURES.group_by{|p| File.basename(p, ".rb")}
|
||||
if fullpaths = @loaded[File.basename(feature, ".rb")]
|
||||
fullpaths.any?{|fullpath|
|
||||
base_dir, = fullpath.partition("/#{feature}")
|
||||
$LOAD_PATH.include?(base_dir)
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
def self.mark_as_loaded(feature)
|
||||
@@our_loads[feature] = true
|
||||
# Nothing to do, the full path will be OK
|
||||
end
|
||||
|
||||
else
|
||||
# Requested features are recorded in $LOADED_FEATURES
|
||||
def include?(feature)
|
||||
# Assume backported features are Ruby libraries (i.e. not C)
|
||||
$LOADED_FEATURES.include?("#{File.basename(feature, '.rb')}.rb")
|
||||
end
|
||||
|
||||
def self.mark_as_loaded(feature)
|
||||
$LOADED_FEATURES << "#{File.basename(feature, '.rb')}.rb"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
class << self
|
||||
attr_accessor :extended_lib
|
||||
|
||||
def extend_relative relative_dir="stdlib"
|
||||
loaded = Backports::StdLib::LoadedFeatures.new
|
||||
dir = File.expand_path(relative_dir, File.dirname(caller.first.split(/:\d/,2).first))
|
||||
Dir.entries(dir).
|
||||
map{|f| Regexp.last_match(1) if /^(.*)\.rb$/ =~ f}.
|
||||
compact.
|
||||
each do |f|
|
||||
path = File.expand_path(f, dir)
|
||||
if loaded.include?(f)
|
||||
require path
|
||||
else
|
||||
@extended_lib[f] << path
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
self.extended_lib ||= Hash.new{|h, k| h[k] = []}
|
||||
end
|
||||
end
|
@ -0,0 +1,9 @@
|
||||
module Backports
|
||||
def self.suppress_verbose_warnings
|
||||
before = $VERBOSE
|
||||
$VERBOSE = false if $VERBOSE # Set to false (default warning) but not nil (no warnings)
|
||||
yield
|
||||
ensure
|
||||
$VERBOSE = before
|
||||
end
|
||||
end
|
@ -0,0 +1,908 @@
|
||||
module ThreadSafe
|
||||
# A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59
|
||||
# available in public domain.
|
||||
#
|
||||
# Original source code available here:
|
||||
# http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59
|
||||
#
|
||||
# The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose
|
||||
# size exceeds a threshold).
|
||||
#
|
||||
# A hash table supporting full concurrency of retrievals and high expected
|
||||
# concurrency for updates. However, even though all operations are
|
||||
# thread-safe, retrieval operations do _not_ entail locking, and there is
|
||||
# _not_ any support for locking the entire table in a way that prevents all
|
||||
# access.
|
||||
#
|
||||
# Retrieval operations generally do not block, so may overlap with update
|
||||
# operations. Retrievals reflect the results of the most recently _completed_
|
||||
# update operations holding upon their onset. (More formally, an update
|
||||
# operation for a given key bears a _happens-before_ relation with any (non
|
||||
# +nil+) retrieval for that key reporting the updated value.) For aggregate
|
||||
# operations such as +clear()+, concurrent retrievals may reflect insertion or
|
||||
# removal of only some entries. Similarly, the +each_pair+ iterator yields
|
||||
# elements reflecting the state of the hash table at some point at or since
|
||||
# the start of the +each_pair+. Bear in mind that the results of aggregate
|
||||
# status methods including +size()+ and +empty?+} are typically useful only
|
||||
# when a map is not undergoing concurrent updates in other threads. Otherwise
|
||||
# the results of these methods reflect transient states that may be adequate
|
||||
# for monitoring or estimation purposes, but not for program control.
|
||||
#
|
||||
# The table is dynamically expanded when there are too many collisions (i.e.,
|
||||
# keys that have distinct hash codes but fall into the same slot modulo the
|
||||
# table size), with the expected average effect of maintaining roughly two
|
||||
# bins per mapping (corresponding to a 0.75 load factor threshold for
|
||||
# resizing). There may be much variance around this average as mappings are
|
||||
# added and removed, but overall, this maintains a commonly accepted
|
||||
# time/space tradeoff for hash tables. However, resizing this or any other
|
||||
# kind of hash table may be a relatively slow operation. When possible, it is
|
||||
# a good idea to provide a size estimate as an optional :initial_capacity
|
||||
# initializer argument. An additional optional :load_factor constructor
|
||||
# argument provides a further means of customizing initial table capacity by
|
||||
# specifying the table density to be used in calculating the amount of space
|
||||
# to allocate for the given number of elements. Note that using many keys with
|
||||
# exactly the same +hash+ is a sure way to slow down performance of any hash
|
||||
# table.
|
||||
#
|
||||
# ## Design overview
|
||||
#
|
||||
# The primary design goal of this hash table is to maintain concurrent
|
||||
# readability (typically method +[]+, but also iteration and related methods)
|
||||
# while minimizing update contention. Secondary goals are to keep space
|
||||
# consumption about the same or better than plain +Hash+, and to support high
|
||||
# initial insertion rates on an empty table by many threads.
|
||||
#
|
||||
# Each key-value mapping is held in a +Node+. The validation-based approach
|
||||
# explained below leads to a lot of code sprawl because retry-control
|
||||
# precludes factoring into smaller methods.
|
||||
#
|
||||
# The table is lazily initialized to a power-of-two size upon the first
|
||||
# insertion. Each bin in the table normally contains a list of +Node+s (most
|
||||
# often, the list has only zero or one +Node+). Table accesses require
|
||||
# volatile/atomic reads, writes, and CASes. The lists of nodes within bins are
|
||||
# always accurately traversable under volatile reads, so long as lookups check
|
||||
# hash code and non-nullness of value before checking key equality.
|
||||
#
|
||||
# We use the top two bits of +Node+ hash fields for control purposes -- they
|
||||
# are available anyway because of addressing constraints. As explained further
|
||||
# below, these top bits are used as follows:
|
||||
#
|
||||
# - 00 - Normal
|
||||
# - 01 - Locked
|
||||
# - 11 - Locked and may have a thread waiting for lock
|
||||
# - 10 - +Node+ is a forwarding node
|
||||
#
|
||||
# The lower 28 bits of each +Node+'s hash field contain a the key's hash code,
|
||||
# except for forwarding nodes, for which the lower bits are zero (and so
|
||||
# always have hash field == +MOVED+).
|
||||
#
|
||||
# Insertion (via +[]=+ or its variants) of the first node in an empty bin is
|
||||
# performed by just CASing it to the bin. This is by far the most common case
|
||||
# for put operations under most key/hash distributions. Other update
|
||||
# operations (insert, delete, and replace) require locks. We do not want to
|
||||
# waste the space required to associate a distinct lock object with each bin,
|
||||
# so instead use the first node of a bin list itself as a lock. Blocking
|
||||
# support for these locks relies +Util::CheapLockable. However, we also need a
|
||||
# +try_lock+ construction, so we overlay these by using bits of the +Node+
|
||||
# hash field for lock control (see above), and so normally use builtin
|
||||
# monitors only for blocking and signalling using
|
||||
# +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+.
|
||||
#
|
||||
# Using the first node of a list as a lock does not by itself suffice though:
|
||||
# When a node is locked, any update must first validate that it is still the
|
||||
# first node after locking it, and retry if not. Because new nodes are always
|
||||
# appended to lists, once a node is first in a bin, it remains first until
|
||||
# deleted or the bin becomes invalidated (upon resizing). However, operations
|
||||
# that only conditionally update may inspect nodes until the point of update.
|
||||
# This is a converse of sorts to the lazy locking technique described by
|
||||
# Herlihy & Shavit.
|
||||
#
|
||||
# The main disadvantage of per-bin locks is that other update operations on
|
||||
# other nodes in a bin list protected by the same lock can stall, for example
|
||||
# when user +eql?+ or mapping functions take a long time. However,
|
||||
# statistically, under random hash codes, this is not a common problem.
|
||||
# Ideally, the frequency of nodes in bins follows a Poisson distribution
|
||||
# (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of
|
||||
# about 0.5 on average, given the resizing threshold of 0.75, although with a
|
||||
# large variance because of resizing granularity. Ignoring variance, the
|
||||
# expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
|
||||
# factorial(k)). The first values are:
|
||||
#
|
||||
# - 0: 0.60653066
|
||||
# - 1: 0.30326533
|
||||
# - 2: 0.07581633
|
||||
# - 3: 0.01263606
|
||||
# - 4: 0.00157952
|
||||
# - 5: 0.00015795
|
||||
# - 6: 0.00001316
|
||||
# - 7: 0.00000094
|
||||
# - 8: 0.00000006
|
||||
# - more: less than 1 in ten million
|
||||
#
|
||||
# Lock contention probability for two threads accessing distinct elements is
|
||||
# roughly 1 / (8 * #elements) under random hashes.
|
||||
#
|
||||
# The table is resized when occupancy exceeds a percentage threshold
|
||||
# (nominally, 0.75, but see below). Only a single thread performs the resize
|
||||
# (using field +size_control+, to arrange exclusion), but the table otherwise
|
||||
# remains usable for reads and updates. Resizing proceeds by transferring
|
||||
# bins, one by one, from the table to the next table. Because we are using
|
||||
# power-of-two expansion, the elements from each bin must either stay at same
|
||||
# index, or move with a power of two offset. We eliminate unnecessary node
|
||||
# creation by catching cases where old nodes can be reused because their next
|
||||
# fields won't change. On average, only about one-sixth of them need cloning
|
||||
# when a table doubles. The nodes they replace will be garbage collectable as
|
||||
# soon as they are no longer referenced by any reader thread that may be in
|
||||
# the midst of concurrently traversing table. Upon transfer, the old table bin
|
||||
# contains only a special forwarding node (with hash field +MOVED+) that
|
||||
# contains the next table as its key. On encountering a forwarding node,
|
||||
# access and update operations restart, using the new table.
|
||||
#
|
||||
# Each bin transfer requires its bin lock. However, unlike other cases, a
|
||||
# transfer can skip a bin if it fails to acquire its lock, and revisit it
|
||||
# later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that
|
||||
# have been skipped because of failure to acquire a lock, and blocks only if
|
||||
# none are available (i.e., only very rarely). The transfer operation must
|
||||
# also ensure that all accessible bins in both the old and new table are
|
||||
# usable by any traversal. When there are no lock acquisition failures, this
|
||||
# is arranged simply by proceeding from the last bin (+table.size - 1+) up
|
||||
# towards the first. Upon seeing a forwarding node, traversals arrange to move
|
||||
# to the new table without revisiting nodes. However, when any node is skipped
|
||||
# during a transfer, all earlier table bins may have become visible, so are
|
||||
# initialized with a reverse-forwarding node back to the old table until the
|
||||
# new ones are established. (This sometimes requires transiently locking a
|
||||
# forwarding node, which is possible under the above encoding.) These more
|
||||
# expensive mechanics trigger only when necessary.
|
||||
#
|
||||
# The traversal scheme also applies to partial traversals of
|
||||
# ranges of bins (via an alternate Traverser constructor)
|
||||
# to support partitioned aggregate operations. Also, read-only
|
||||
# operations give up if ever forwarded to a null table, which
|
||||
# provides support for shutdown-style clearing, which is also not
|
||||
# currently implemented.
|
||||
#
|
||||
# Lazy table initialization minimizes footprint until first use.
|
||||
#
|
||||
# The element count is maintained using a +ThreadSafe::Util::Adder+,
|
||||
# which avoids contention on updates but can encounter cache thrashing
|
||||
# if read too frequently during concurrent access. To avoid reading so
|
||||
# often, resizing is attempted either when a bin lock is
|
||||
# contended, or upon adding to a bin already holding two or more
|
||||
# nodes (checked before adding in the +x_if_absent+ methods, after
|
||||
# adding in others). Under uniform hash distributions, the
|
||||
# probability of this occurring at threshold is around 13%,
|
||||
# meaning that only about 1 in 8 puts check threshold (and after
|
||||
# resizing, many fewer do so). But this approximation has high
|
||||
# variance for small table sizes, so we check on any collision
|
||||
# for sizes <= 64. The bulk putAll operation further reduces
|
||||
# contention by only committing count updates upon these size
|
||||
# checks.
|
||||
class AtomicReferenceCacheBackend
|
||||
class Table < Util::PowerOfTwoTuple
|
||||
def cas_new_node(i, hash, key, value)
|
||||
cas(i, nil, Node.new(hash, key, value))
|
||||
end
|
||||
|
||||
def try_to_cas_in_computed(i, hash, key)
|
||||
succeeded = false
|
||||
new_value = nil
|
||||
new_node = Node.new(locked_hash = hash | LOCKED, key, NULL)
|
||||
if cas(i, nil, new_node)
|
||||
begin
|
||||
if NULL == (new_value = yield(NULL))
|
||||
was_null = true
|
||||
else
|
||||
new_node.value = new_value
|
||||
end
|
||||
succeeded = true
|
||||
ensure
|
||||
volatile_set(i, nil) if !succeeded || was_null
|
||||
new_node.unlock_via_hash(locked_hash, hash)
|
||||
end
|
||||
end
|
||||
return succeeded, new_value
|
||||
end
|
||||
|
||||
def try_lock_via_hash(i, node, node_hash)
|
||||
node.try_lock_via_hash(node_hash) do
|
||||
yield if volatile_get(i) == node
|
||||
end
|
||||
end
|
||||
|
||||
def delete_node_at(i, node, predecessor_node)
|
||||
if predecessor_node
|
||||
predecessor_node.next = node.next
|
||||
else
|
||||
volatile_set(i, node.next)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Key-value entry. Nodes with a hash field of +MOVED+ are special, and do
|
||||
# not contain user keys or values. Otherwise, keys are never +nil+, and
|
||||
# +NULL+ +value+ fields indicate that a node is in the process of being
|
||||
# deleted or created. For purposes of read-only access, a key may be read
|
||||
# before a value, but can only be used after checking value to be +!= NULL+.
|
||||
class Node
|
||||
extend Util::Volatile
|
||||
attr_volatile :hash, :value, :next
|
||||
|
||||
include Util::CheapLockable
|
||||
|
||||
bit_shift = Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves
|
||||
# Encodings for special uses of Node hash fields. See above for explanation.
|
||||
MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes
|
||||
LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit
|
||||
WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together
|
||||
HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash
|
||||
|
||||
SPIN_LOCK_ATTEMPTS = Util::CPU_COUNT > 1 ? Util::CPU_COUNT * 2 : 0
|
||||
|
||||
attr_reader :key
|
||||
|
||||
def initialize(hash, key, value, next_node = nil)
|
||||
super()
|
||||
@key = key
|
||||
self.lazy_set_hash(hash)
|
||||
self.lazy_set_value(value)
|
||||
self.next = next_node
|
||||
end
|
||||
|
||||
# Spins a while if +LOCKED+ bit set and this node is the first of its bin,
|
||||
# and then sets +WAITING+ bits on hash field and blocks (once) if they are
|
||||
# still set. It is OK for this method to return even if lock is not
|
||||
# available upon exit, which enables these simple single-wait mechanics.
|
||||
#
|
||||
# The corresponding signalling operation is performed within callers: Upon
|
||||
# detecting that +WAITING+ has been set when unlocking lock (via a failed
|
||||
# CAS from non-waiting +LOCKED+ state), unlockers acquire the
|
||||
# +cheap_synchronize+ lock and perform a +cheap_broadcast+.
|
||||
def try_await_lock(table, i)
|
||||
if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking?
|
||||
spins = SPIN_LOCK_ATTEMPTS
|
||||
randomizer = base_randomizer = Util::XorShiftRandom.get
|
||||
while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash)
|
||||
if spins >= 0
|
||||
if (randomizer = (randomizer >> 1)).even? # spin at random
|
||||
if (spins -= 1) == 0
|
||||
Thread.pass # yield before blocking
|
||||
else
|
||||
randomizer = base_randomizer = Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero?
|
||||
end
|
||||
end
|
||||
elsif cas_hash(my_hash, my_hash | WAITING)
|
||||
force_aquire_lock(table, i)
|
||||
break
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def key?(key)
|
||||
@key.eql?(key)
|
||||
end
|
||||
|
||||
def matches?(key, hash)
|
||||
pure_hash == hash && key?(key)
|
||||
end
|
||||
|
||||
def pure_hash
|
||||
hash & HASH_BITS
|
||||
end
|
||||
|
||||
def try_lock_via_hash(node_hash = hash)
|
||||
if cas_hash(node_hash, locked_hash = node_hash | LOCKED)
|
||||
begin
|
||||
yield
|
||||
ensure
|
||||
unlock_via_hash(locked_hash, node_hash)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def locked?
|
||||
self.class.locked_hash?(hash)
|
||||
end
|
||||
|
||||
def unlock_via_hash(locked_hash, node_hash)
|
||||
unless cas_hash(locked_hash, node_hash)
|
||||
self.hash = node_hash
|
||||
cheap_synchronize { cheap_broadcast }
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
def force_aquire_lock(table, i)
|
||||
cheap_synchronize do
|
||||
if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING
|
||||
cheap_wait
|
||||
else
|
||||
cheap_broadcast # possibly won race vs signaller
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
class << self
|
||||
def locked_hash?(hash)
|
||||
(hash & LOCKED) != 0
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# shorthands
|
||||
MOVED = Node::MOVED
|
||||
LOCKED = Node::LOCKED
|
||||
WAITING = Node::WAITING
|
||||
HASH_BITS = Node::HASH_BITS
|
||||
|
||||
NOW_RESIZING = -1
|
||||
DEFAULT_CAPACITY = 16
|
||||
MAX_CAPACITY = Util::MAX_INT
|
||||
|
||||
# The buffer size for skipped bins during transfers. The
|
||||
# value is arbitrary but should be large enough to avoid
|
||||
# most locking stalls during resizes.
|
||||
TRANSFER_BUFFER_SIZE = 32
|
||||
|
||||
extend Util::Volatile
|
||||
attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two.
|
||||
|
||||
# Table initialization and resizing control. When negative, the
|
||||
# table is being initialized or resized. Otherwise, when table is
|
||||
# null, holds the initial table size to use upon creation, or 0
|
||||
# for default. After initialization, holds the next element count
|
||||
# value upon which to resize the table.
|
||||
:size_control
|
||||
|
||||
def initialize(options = nil)
|
||||
super()
|
||||
@counter = Util::Adder.new
|
||||
initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY
|
||||
self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity
|
||||
end
|
||||
|
||||
def get_or_default(key, else_value = nil)
|
||||
hash = key_hash(key)
|
||||
current_table = table
|
||||
while current_table
|
||||
node = current_table.volatile_get_by_hash(hash)
|
||||
current_table =
|
||||
while node
|
||||
if (node_hash = node.hash) == MOVED
|
||||
break node.key
|
||||
elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value)
|
||||
return value
|
||||
end
|
||||
node = node.next
|
||||
end
|
||||
end
|
||||
else_value
|
||||
end
|
||||
|
||||
def [](key)
|
||||
get_or_default(key)
|
||||
end
|
||||
|
||||
def key?(key)
|
||||
get_or_default(key, NULL) != NULL
|
||||
end
|
||||
|
||||
def []=(key, value)
|
||||
get_and_set(key, value)
|
||||
value
|
||||
end
|
||||
|
||||
def compute_if_absent(key)
|
||||
hash = key_hash(key)
|
||||
current_table = table || initialize_table
|
||||
while true
|
||||
if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
|
||||
succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield }
|
||||
if succeeded
|
||||
increment_size
|
||||
return new_value
|
||||
end
|
||||
elsif (node_hash = node.hash) == MOVED
|
||||
current_table = node.key
|
||||
elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS))
|
||||
return current_value
|
||||
elsif Node.locked_hash?(node_hash)
|
||||
try_await_lock(current_table, i, node)
|
||||
else
|
||||
succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield }
|
||||
return value if succeeded
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def compute_if_present(key)
|
||||
new_value = nil
|
||||
internal_replace(key) do |old_value|
|
||||
if (new_value = yield(NULL == old_value ? nil : old_value)).nil?
|
||||
NULL
|
||||
else
|
||||
new_value
|
||||
end
|
||||
end
|
||||
new_value
|
||||
end
|
||||
|
||||
def compute(key)
|
||||
internal_compute(key) do |old_value|
|
||||
if (new_value = yield(NULL == old_value ? nil : old_value)).nil?
|
||||
NULL
|
||||
else
|
||||
new_value
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def merge_pair(key, value)
|
||||
internal_compute(key) do |old_value|
|
||||
if NULL == old_value || !(value = yield(old_value)).nil?
|
||||
value
|
||||
else
|
||||
NULL
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def replace_pair(key, old_value, new_value)
|
||||
NULL != internal_replace(key, old_value) { new_value }
|
||||
end
|
||||
|
||||
def replace_if_exists(key, new_value)
|
||||
if (result = internal_replace(key) { new_value }) && NULL != result
|
||||
result
|
||||
end
|
||||
end
|
||||
|
||||
def get_and_set(key, value) # internalPut in the original CHMV8
|
||||
hash = key_hash(key)
|
||||
current_table = table || initialize_table
|
||||
while true
|
||||
if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
|
||||
if current_table.cas_new_node(i, hash, key, value)
|
||||
increment_size
|
||||
break
|
||||
end
|
||||
elsif (node_hash = node.hash) == MOVED
|
||||
current_table = node.key
|
||||
elsif Node.locked_hash?(node_hash)
|
||||
try_await_lock(current_table, i, node)
|
||||
else
|
||||
succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash)
|
||||
break old_value if succeeded
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def delete(key)
|
||||
replace_if_exists(key, NULL)
|
||||
end
|
||||
|
||||
def delete_pair(key, value)
|
||||
result = internal_replace(key, value) { NULL }
|
||||
if result && NULL != result
|
||||
!!result
|
||||
else
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
def each_pair
|
||||
return self unless current_table = table
|
||||
current_table_size = base_size = current_table.size
|
||||
i = base_index = 0
|
||||
while base_index < base_size
|
||||
if node = current_table.volatile_get(i)
|
||||
if node.hash == MOVED
|
||||
current_table = node.key
|
||||
current_table_size = current_table.size
|
||||
else
|
||||
begin
|
||||
if NULL != (value = node.value) # skip deleted or special nodes
|
||||
yield node.key, value
|
||||
end
|
||||
end while node = node.next
|
||||
end
|
||||
end
|
||||
|
||||
if (i_with_base = i + base_size) < current_table_size
|
||||
i = i_with_base # visit upper slots if present
|
||||
else
|
||||
i = base_index += 1
|
||||
end
|
||||
end
|
||||
self
|
||||
end
|
||||
|
||||
def size
|
||||
(sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values
|
||||
end
|
||||
|
||||
def empty?
|
||||
size == 0
|
||||
end
|
||||
|
||||
# Implementation for clear. Steps through each bin, removing all nodes.
|
||||
def clear
|
||||
return self unless current_table = table
|
||||
current_table_size = current_table.size
|
||||
deleted_count = i = 0
|
||||
while i < current_table_size
|
||||
if !(node = current_table.volatile_get(i))
|
||||
i += 1
|
||||
elsif (node_hash = node.hash) == MOVED
|
||||
current_table = node.key
|
||||
current_table_size = current_table.size
|
||||
elsif Node.locked_hash?(node_hash)
|
||||
decrement_size(deleted_count) # opportunistically update count
|
||||
deleted_count = 0
|
||||
node.try_await_lock(current_table, i)
|
||||
else
|
||||
current_table.try_lock_via_hash(i, node, node_hash) do
|
||||
begin
|
||||
deleted_count += 1 if NULL != node.value # recheck under lock
|
||||
node.value = nil
|
||||
end while node = node.next
|
||||
current_table.volatile_set(i, nil)
|
||||
i += 1
|
||||
end
|
||||
end
|
||||
end
|
||||
decrement_size(deleted_count)
|
||||
self
|
||||
end
|
||||
|
||||
private
|
||||
# Internal versions of the insertion methods, each a
|
||||
# little more complicated than the last. All have
|
||||
# the same basic structure:
|
||||
# 1. If table uninitialized, create
|
||||
# 2. If bin empty, try to CAS new node
|
||||
# 3. If bin stale, use new table
|
||||
# 4. Lock and validate; if valid, scan and add or update
|
||||
#
|
||||
# The others interweave other checks and/or alternative actions:
|
||||
# * Plain +get_and_set+ checks for and performs resize after insertion.
|
||||
# * compute_if_absent prescans for mapping without lock (and fails to add
|
||||
# if present), which also makes pre-emptive resize checks worthwhile.
|
||||
#
|
||||
# Someday when details settle down a bit more, it might be worth
|
||||
# some factoring to reduce sprawl.
|
||||
def internal_replace(key, expected_old_value = NULL, &block)
|
||||
hash = key_hash(key)
|
||||
current_table = table
|
||||
while current_table
|
||||
if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
|
||||
break
|
||||
elsif (node_hash = node.hash) == MOVED
|
||||
current_table = node.key
|
||||
elsif (node_hash & HASH_BITS) != hash && !node.next # precheck
|
||||
break # rules out possible existence
|
||||
elsif Node.locked_hash?(node_hash)
|
||||
try_await_lock(current_table, i, node)
|
||||
else
|
||||
succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block)
|
||||
return old_value if succeeded
|
||||
end
|
||||
end
|
||||
NULL
|
||||
end
|
||||
|
||||
def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash)
|
||||
current_table.try_lock_via_hash(i, node, node_hash) do
|
||||
predecessor_node = nil
|
||||
old_value = NULL
|
||||
begin
|
||||
if node.matches?(key, hash) && NULL != (current_value = node.value)
|
||||
if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value
|
||||
old_value = current_value
|
||||
if NULL == (node.value = yield(old_value))
|
||||
current_table.delete_node_at(i, node, predecessor_node)
|
||||
decrement_size
|
||||
end
|
||||
end
|
||||
break
|
||||
end
|
||||
|
||||
predecessor_node = node
|
||||
end while node = node.next
|
||||
|
||||
return true, old_value
|
||||
end
|
||||
end
|
||||
|
||||
def find_value_in_node_list(node, key, hash, pure_hash)
|
||||
do_check_for_resize = false
|
||||
while true
|
||||
if pure_hash == hash && node.key?(key) && NULL != (value = node.value)
|
||||
return value
|
||||
elsif node = node.next
|
||||
do_check_for_resize = true # at least 2 nodes -> check for resize
|
||||
pure_hash = node.pure_hash
|
||||
else
|
||||
return NULL
|
||||
end
|
||||
end
|
||||
ensure
|
||||
check_for_resize if do_check_for_resize
|
||||
end
|
||||
|
||||
def internal_compute(key, &block)
|
||||
hash = key_hash(key)
|
||||
current_table = table || initialize_table
|
||||
while true
|
||||
if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash)))
|
||||
succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block)
|
||||
if succeeded
|
||||
if NULL == new_value
|
||||
break nil
|
||||
else
|
||||
increment_size
|
||||
break new_value
|
||||
end
|
||||
end
|
||||
elsif (node_hash = node.hash) == MOVED
|
||||
current_table = node.key
|
||||
elsif Node.locked_hash?(node_hash)
|
||||
try_await_lock(current_table, i, node)
|
||||
else
|
||||
succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block)
|
||||
break new_value if succeeded
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash)
|
||||
added = false
|
||||
current_table.try_lock_via_hash(i, node, node_hash) do
|
||||
while true
|
||||
if node.matches?(key, hash) && NULL != (value = node.value)
|
||||
return true, value
|
||||
end
|
||||
last = node
|
||||
unless node = node.next
|
||||
last.next = Node.new(hash, key, value = yield)
|
||||
added = true
|
||||
increment_size
|
||||
return true, value
|
||||
end
|
||||
end
|
||||
end
|
||||
ensure
|
||||
check_for_resize if added
|
||||
end
|
||||
|
||||
def attempt_compute(key, hash, current_table, i, node, node_hash)
|
||||
added = false
|
||||
current_table.try_lock_via_hash(i, node, node_hash) do
|
||||
predecessor_node = nil
|
||||
while true
|
||||
if node.matches?(key, hash) && NULL != (value = node.value)
|
||||
if NULL == (node.value = value = yield(value))
|
||||
current_table.delete_node_at(i, node, predecessor_node)
|
||||
decrement_size
|
||||
value = nil
|
||||
end
|
||||
return true, value
|
||||
end
|
||||
predecessor_node = node
|
||||
unless node = node.next
|
||||
if NULL == (value = yield(NULL))
|
||||
value = nil
|
||||
else
|
||||
predecessor_node.next = Node.new(hash, key, value)
|
||||
added = true
|
||||
increment_size
|
||||
end
|
||||
return true, value
|
||||
end
|
||||
end
|
||||
end
|
||||
ensure
|
||||
check_for_resize if added
|
||||
end
|
||||
|
||||
def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash)
|
||||
node_nesting = nil
|
||||
current_table.try_lock_via_hash(i, node, node_hash) do
|
||||
node_nesting = 1
|
||||
old_value = nil
|
||||
found_old_value = false
|
||||
while node
|
||||
if node.matches?(key, hash) && NULL != (old_value = node.value)
|
||||
found_old_value = true
|
||||
node.value = value
|
||||
break
|
||||
end
|
||||
last = node
|
||||
unless node = node.next
|
||||
last.next = Node.new(hash, key, value)
|
||||
break
|
||||
end
|
||||
node_nesting += 1
|
||||
end
|
||||
|
||||
return true, old_value if found_old_value
|
||||
increment_size
|
||||
true
|
||||
end
|
||||
ensure
|
||||
check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64)
|
||||
end
|
||||
|
||||
def initialize_copy(other)
|
||||
super
|
||||
@counter = Util::Adder.new
|
||||
self.table = nil
|
||||
self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY
|
||||
self
|
||||
end
|
||||
|
||||
def try_await_lock(current_table, i, node)
|
||||
check_for_resize # try resizing if can't get lock
|
||||
node.try_await_lock(current_table, i)
|
||||
end
|
||||
|
||||
def key_hash(key)
|
||||
key.hash & HASH_BITS
|
||||
end
|
||||
|
||||
# Returns a power of two table size for the given desired capacity.
|
||||
def table_size_for(entry_count)
|
||||
size = 2
|
||||
size <<= 1 while size < entry_count
|
||||
size
|
||||
end
|
||||
|
||||
# Initializes table, using the size recorded in +size_control+.
|
||||
def initialize_table
|
||||
until current_table ||= table
|
||||
if (size_ctrl = size_control) == NOW_RESIZING
|
||||
Thread.pass # lost initialization race; just spin
|
||||
else
|
||||
try_in_resize_lock(current_table, size_ctrl) do
|
||||
initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY
|
||||
current_table = self.table = Table.new(initial_size)
|
||||
initial_size - (initial_size >> 2) # 75% load factor
|
||||
end
|
||||
end
|
||||
end
|
||||
current_table
|
||||
end
|
||||
|
||||
# If table is too small and not already resizing, creates next table and
|
||||
# transfers bins. Rechecks occupancy after a transfer to see if another
|
||||
# resize is already needed because resizings are lagging additions.
|
||||
def check_for_resize
|
||||
while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum
|
||||
try_in_resize_lock(current_table, size_ctrl) do
|
||||
self.table = rebuild(current_table)
|
||||
(table_size << 1) - (table_size >> 1) # 75% load factor
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def try_in_resize_lock(current_table, size_ctrl)
|
||||
if cas_size_control(size_ctrl, NOW_RESIZING)
|
||||
begin
|
||||
if current_table == table # recheck under lock
|
||||
size_ctrl = yield # get new size_control
|
||||
end
|
||||
ensure
|
||||
self.size_control = size_ctrl
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Moves and/or copies the nodes in each bin to new table. See above for explanation.
|
||||
def rebuild(table)
|
||||
old_table_size = table.size
|
||||
new_table = table.next_in_size_table
|
||||
# puts "#{old_table_size} -> #{new_table.size}"
|
||||
forwarder = Node.new(MOVED, new_table, NULL)
|
||||
rev_forwarder = nil
|
||||
locked_indexes = nil # holds bins to revisit; nil until needed
|
||||
locked_arr_idx = 0
|
||||
bin = old_table_size - 1
|
||||
i = bin
|
||||
while true
|
||||
if !(node = table.volatile_get(i))
|
||||
# no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table
|
||||
redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder))
|
||||
elsif Node.locked_hash?(node_hash = node.hash)
|
||||
locked_indexes ||= Array.new
|
||||
if bin < 0 && locked_arr_idx > 0
|
||||
locked_arr_idx -= 1
|
||||
i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin
|
||||
redo
|
||||
end
|
||||
if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE
|
||||
node.try_await_lock(table, i) # no other options -- block
|
||||
redo
|
||||
end
|
||||
rev_forwarder ||= Node.new(MOVED, table, NULL)
|
||||
redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list
|
||||
locked_indexes << i
|
||||
new_table.volatile_set(i, rev_forwarder)
|
||||
new_table.volatile_set(i + old_table_size, rev_forwarder)
|
||||
else
|
||||
redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder)
|
||||
end
|
||||
|
||||
if bin > 0
|
||||
i = (bin -= 1)
|
||||
elsif locked_indexes && !locked_indexes.empty?
|
||||
bin = -1
|
||||
i = locked_indexes.pop
|
||||
locked_arr_idx = locked_indexes.size - 1
|
||||
else
|
||||
return new_table
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder)
|
||||
# transiently use a locked forwarding node
|
||||
locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL)
|
||||
if old_table.cas(i, nil, locked_forwarder)
|
||||
new_table.volatile_set(i, nil) # kill the potential reverse forwarders
|
||||
new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders
|
||||
old_table.volatile_set(i, forwarder)
|
||||
locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED)
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
# Splits a normal bin with list headed by e into lo and hi parts; installs in given table.
|
||||
def split_old_bin(table, new_table, i, node, node_hash, forwarder)
|
||||
table.try_lock_via_hash(i, node, node_hash) do
|
||||
split_bin(new_table, i, node, node_hash)
|
||||
table.volatile_set(i, forwarder)
|
||||
end
|
||||
end
|
||||
|
||||
def split_bin(new_table, i, node, node_hash)
|
||||
bit = new_table.size >> 1 # bit to split on
|
||||
run_bit = node_hash & bit
|
||||
last_run = nil
|
||||
low = nil
|
||||
high = nil
|
||||
current_node = node
|
||||
# this optimises for the lowest amount of volatile writes and objects created
|
||||
while current_node = current_node.next
|
||||
unless (b = current_node.hash & bit) == run_bit
|
||||
run_bit = b
|
||||
last_run = current_node
|
||||
end
|
||||
end
|
||||
if run_bit == 0
|
||||
low = last_run
|
||||
else
|
||||
high = last_run
|
||||
end
|
||||
current_node = node
|
||||
until current_node == last_run
|
||||
pure_hash = current_node.pure_hash
|
||||
if (pure_hash & bit) == 0
|
||||
low = Node.new(pure_hash, current_node.key, current_node.value, low)
|
||||
else
|
||||
high = Node.new(pure_hash, current_node.key, current_node.value, high)
|
||||
end
|
||||
current_node = current_node.next
|
||||
end
|
||||
new_table.volatile_set(i, low)
|
||||
new_table.volatile_set(i + bit, high)
|
||||
end
|
||||
|
||||
def increment_size
|
||||
@counter.increment
|
||||
end
|
||||
|
||||
def decrement_size(by = 1)
|
||||
@counter.add(-by)
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,77 @@
|
||||
module ThreadSafe
|
||||
class SynchronizedCacheBackend < NonConcurrentCacheBackend
|
||||
require 'mutex_m'
|
||||
include Mutex_m
|
||||
# WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are
|
||||
# not allowed to call each other.
|
||||
|
||||
def [](key)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def []=(key, value)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def compute_if_absent(key)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def compute_if_present(key)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def compute(key)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def merge_pair(key, value)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def replace_pair(key, old_value, new_value)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def replace_if_exists(key, new_value)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def get_and_set(key, value)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def key?(key)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def value?(value)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def delete(key)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def delete_pair(key, value)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def clear
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def size
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
def get_or_default(key, default_value)
|
||||
synchronize { super }
|
||||
end
|
||||
|
||||
private
|
||||
def dupped_backend
|
||||
synchronize { super }
|
||||
end
|
||||
end
|
||||
end
|
@ -0,0 +1,16 @@
|
||||
module ThreadSafe
|
||||
module Util
|
||||
FIXNUM_BIT_SIZE = (0.size * 8) - 2
|
||||
MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1
|
||||
CPU_COUNT = 16 # is there a way to determine this?
|
||||
|
||||
autoload :AtomicReference, 'thread_safe/util/atomic_reference'
|
||||
autoload :Adder, 'thread_safe/util/adder'
|
||||
autoload :CheapLockable, 'thread_safe/util/cheap_lockable'
|
||||
autoload :PowerOfTwoTuple, 'thread_safe/util/power_of_two_tuple'
|
||||
autoload :Striped64, 'thread_safe/util/striped64'
|
||||
autoload :Volatile, 'thread_safe/util/volatile'
|
||||
autoload :VolatileTuple, 'thread_safe/util/volatile_tuple'
|
||||
autoload :XorShiftRandom, 'thread_safe/util/xor_shift_random'
|
||||
end
|
||||
end
|
Loading…
x
Reference in New Issue
Block a user