Update Gems.
This commit is contained in:
parent
9f458142b7
commit
e41b3e8741
@ -44,7 +44,7 @@ GEM
|
||||
method_source (1.0.0)
|
||||
mime-types (3.3.1)
|
||||
mime-types-data (~> 3.2015)
|
||||
mime-types-data (3.2020.0512)
|
||||
mime-types-data (3.2020.1104)
|
||||
mini_portile2 (2.4.0)
|
||||
minitest (5.14.2)
|
||||
mustache (1.1.1)
|
||||
@ -98,7 +98,7 @@ GEM
|
||||
rspec-support (3.10.0)
|
||||
rspec-wait (0.0.9)
|
||||
rspec (>= 3, < 4)
|
||||
rubocop (1.3.0)
|
||||
rubocop (1.3.1)
|
||||
parallel (~> 1.10)
|
||||
parser (>= 2.7.1.5)
|
||||
rainbow (>= 2.2.2, < 4.0)
|
||||
@ -109,8 +109,8 @@ GEM
|
||||
unicode-display_width (>= 1.4.0, < 2.0)
|
||||
rubocop-ast (1.1.1)
|
||||
parser (>= 2.7.1.5)
|
||||
rubocop-performance (1.8.1)
|
||||
rubocop (>= 0.87.0)
|
||||
rubocop-performance (1.9.0)
|
||||
rubocop (>= 0.90.0, < 2.0)
|
||||
rubocop-ast (>= 0.4.0)
|
||||
rubocop-rspec (2.0.0)
|
||||
rubocop (~> 1.0)
|
||||
@ -123,11 +123,11 @@ GEM
|
||||
docile (~> 1.1)
|
||||
simplecov-html (~> 0.11)
|
||||
simplecov-html (0.12.3)
|
||||
sorbet (0.5.6060)
|
||||
sorbet-static (= 0.5.6060)
|
||||
sorbet-runtime (0.5.6060)
|
||||
sorbet (0.5.6076)
|
||||
sorbet-static (= 0.5.6076)
|
||||
sorbet-runtime (0.5.6076)
|
||||
sorbet-runtime-stub (0.2.0)
|
||||
sorbet-static (0.5.6060-universal-darwin-14)
|
||||
sorbet-static (0.5.6076-universal-darwin-14)
|
||||
spoom (1.0.4)
|
||||
colorize
|
||||
sorbet (~> 0.5.5)
|
||||
@ -142,14 +142,14 @@ GEM
|
||||
thor (>= 0.19.2)
|
||||
thor (1.0.1)
|
||||
thread_safe (0.3.6)
|
||||
tzinfo (1.2.7)
|
||||
tzinfo (1.2.8)
|
||||
thread_safe (~> 0.1)
|
||||
unf (0.1.4)
|
||||
unf_ext
|
||||
unf_ext (0.0.7.7)
|
||||
unicode-display_width (1.7.0)
|
||||
webrobots (0.1.2)
|
||||
zeitwerk (2.4.0)
|
||||
zeitwerk (2.4.1)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
28
Library/Homebrew/vendor/bundle/bundler/setup.rb
vendored
28
Library/Homebrew/vendor/bundle/bundler/setup.rb
vendored
@ -7,15 +7,15 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/concurrent-ruby-1.1.7
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/i18n-1.8.5/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/minitest-5.14.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/thread_safe-0.3.6/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tzinfo-1.2.7/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/zeitwerk-2.4.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/tzinfo-1.2.8/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/zeitwerk-2.4.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/activesupport-6.0.3.4/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ast-2.4.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/bindata-2.4.8/lib"
|
||||
$:.unshift "#{path}/"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-19/2.6.0/byebug-11.1.3"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-20/2.6.0/byebug-11.1.3"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/byebug-11.1.3/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-19/2.6.0/json-2.3.1"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-20/2.6.0/json-2.3.1"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/json-2.3.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/docile-1.3.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/simplecov-html-0.12.3/lib"
|
||||
@ -27,20 +27,20 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/highline-2.0.3/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/commander-4.5.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/connection_pool-2.2.3/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/diff-lcs-1.4.4/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-19/2.6.0/unf_ext-0.0.7.7"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-20/2.6.0/unf_ext-0.0.7.7"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/unf_ext-0.0.7.7/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/unf-0.1.4/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/domain_name-0.5.20190701/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/elftools-1.1.3/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-19/2.6.0/hpricot-0.8.6"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-20/2.6.0/hpricot-0.8.6"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/hpricot-0.8.6/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/http-cookie-1.0.3/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/mime-types-data-3.2020.0512/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/mime-types-data-3.2020.1104/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/mime-types-3.3.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/net-http-digest_auth-1.4.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/net-http-persistent-4.0.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/mini_portile2-2.4.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-19/2.6.0/nokogiri-1.10.10"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-20/2.6.0/nokogiri-1.10.10"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/nokogiri-1.10.10/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ntlm-http-0.1.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/webrobots-0.1.2/lib"
|
||||
@ -51,12 +51,12 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parallel-1.20.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parallel_tests-3.3.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parser-2.7.2.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rainbow-3.0.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/sorbet-runtime-0.5.6040/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/sorbet-runtime-0.5.6076/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/parlour-4.0.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/patchelf-1.3.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/plist-3.5.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/pry-0.13.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-19/2.6.0/rdiscount-2.2.0.2"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/extensions/universal-darwin-20/2.6.0/rdiscount-2.2.0.2"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rdiscount-2.2.0.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/regexp_parser-1.8.2/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rexml-3.2.4/lib"
|
||||
@ -72,13 +72,13 @@ $:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rspec-wait-0.0.9/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-ast-1.1.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-progressbar-1.10.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/unicode-display_width-1.7.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-1.3.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-performance-1.8.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-1.3.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-performance-1.9.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-rspec-2.0.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/rubocop-sorbet-0.5.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/ruby-macho-2.5.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/sorbet-static-0.5.6042-universal-darwin-19/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/sorbet-0.5.6042/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/sorbet-static-0.5.6076-universal-darwin-20/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/sorbet-0.5.6076/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/sorbet-runtime-stub-0.2.0/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/thor-1.0.1/lib"
|
||||
$:.unshift "#{path}/../#{ruby_engine}/#{ruby_version}/gems/spoom-1.0.4/lib"
|
||||
|
||||
@ -1,134 +0,0 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module RuboCop
|
||||
module Cop
|
||||
module Performance
|
||||
# This cop identifies places where custom code finding the sum of elements
|
||||
# in some Enumerable object can be replaced by `Enumerable#sum` method.
|
||||
#
|
||||
# @example
|
||||
# # bad
|
||||
# [1, 2, 3].inject(:+)
|
||||
# [1, 2, 3].reduce(10, :+)
|
||||
# [1, 2, 3].inject(&:+)
|
||||
# [1, 2, 3].reduce { |acc, elem| acc + elem }
|
||||
#
|
||||
# # good
|
||||
# [1, 2, 3].sum
|
||||
# [1, 2, 3].sum(10)
|
||||
# [1, 2, 3].sum
|
||||
#
|
||||
class Sum < Base
|
||||
include RangeHelp
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `%<good_method>s` instead of `%<bad_method>s`.'
|
||||
|
||||
def_node_matcher :sum_candidate?, <<~PATTERN
|
||||
(send _ ${:inject :reduce} $_init ? ${(sym :+) (block_pass (sym :+))})
|
||||
PATTERN
|
||||
|
||||
def_node_matcher :sum_with_block_candidate?, <<~PATTERN
|
||||
(block
|
||||
$(send _ {:inject :reduce} $_init ?)
|
||||
(args (arg $_acc) (arg $_elem))
|
||||
$send)
|
||||
PATTERN
|
||||
|
||||
def_node_matcher :acc_plus_elem?, <<~PATTERN
|
||||
(send (lvar %1) :+ (lvar %2))
|
||||
PATTERN
|
||||
alias elem_plus_acc? acc_plus_elem?
|
||||
|
||||
def on_send(node)
|
||||
sum_candidate?(node) do |method, init, operation|
|
||||
range = sum_method_range(node)
|
||||
message = build_method_message(method, init, operation)
|
||||
|
||||
add_offense(range, message: message) do |corrector|
|
||||
autocorrect(corrector, init, range)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def on_block(node)
|
||||
sum_with_block_candidate?(node) do |send, init, var_acc, var_elem, body|
|
||||
if acc_plus_elem?(body, var_acc, var_elem) || elem_plus_acc?(body, var_elem, var_acc)
|
||||
range = sum_block_range(send, node)
|
||||
message = build_block_message(send, init, var_acc, var_elem, body)
|
||||
|
||||
add_offense(range, message: message) do |corrector|
|
||||
autocorrect(corrector, init, range)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def autocorrect(corrector, init, range)
|
||||
return if init.empty?
|
||||
|
||||
replacement = build_good_method(init)
|
||||
|
||||
corrector.replace(range, replacement)
|
||||
end
|
||||
|
||||
def sum_method_range(node)
|
||||
range_between(node.loc.selector.begin_pos, node.loc.end.end_pos)
|
||||
end
|
||||
|
||||
def sum_block_range(send, node)
|
||||
range_between(send.loc.selector.begin_pos, node.loc.end.end_pos)
|
||||
end
|
||||
|
||||
def build_method_message(method, init, operation)
|
||||
good_method = build_good_method(init)
|
||||
bad_method = build_method_bad_method(init, method, operation)
|
||||
format(MSG, good_method: good_method, bad_method: bad_method)
|
||||
end
|
||||
|
||||
def build_block_message(send, init, var_acc, var_elem, body)
|
||||
good_method = build_good_method(init)
|
||||
bad_method = build_block_bad_method(send.method_name, init, var_acc, var_elem, body)
|
||||
format(MSG, good_method: good_method, bad_method: bad_method)
|
||||
end
|
||||
|
||||
def build_good_method(init)
|
||||
good_method = 'sum'
|
||||
|
||||
unless init.empty?
|
||||
init = init.first
|
||||
good_method += "(#{init.source})" unless init.int_type? && init.value.zero?
|
||||
end
|
||||
good_method
|
||||
end
|
||||
|
||||
def build_method_bad_method(init, method, operation)
|
||||
bad_method = "#{method}("
|
||||
unless init.empty?
|
||||
init = init.first
|
||||
bad_method += "#{init.source}, "
|
||||
end
|
||||
bad_method += if operation.block_pass_type?
|
||||
'&:+)'
|
||||
else
|
||||
':+)'
|
||||
end
|
||||
bad_method
|
||||
end
|
||||
|
||||
def build_block_bad_method(method, init, var_acc, var_elem, body)
|
||||
bad_method = method.to_s
|
||||
|
||||
unless init.empty?
|
||||
init = init.first
|
||||
bad_method += "(#{init.source})"
|
||||
end
|
||||
bad_method += " { |#{var_acc}, #{var_elem}| #{body.source} }"
|
||||
bad_method
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -1,9 +0,0 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module RuboCop
|
||||
module Performance
|
||||
module Version
|
||||
STRING = '1.8.1'
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -7,6 +7,11 @@ Performance/AncestorsInclude:
|
||||
Safe: false
|
||||
VersionAdded: '1.7'
|
||||
|
||||
Performance/ArraySemiInfiniteRangeSlice:
|
||||
Description: 'Identifies places where slicing arrays with semi-infinite ranges can be replaced by `Array#take` and `Array#drop`.'
|
||||
Enabled: pending
|
||||
VersionAdded: '1.9'
|
||||
|
||||
Performance/BigDecimalWithNumericArgument:
|
||||
Description: 'Convert numeric argument to string before passing to BigDecimal.'
|
||||
Enabled: 'pending'
|
||||
@ -17,11 +22,17 @@ Performance/BindCall:
|
||||
Enabled: true
|
||||
VersionAdded: '1.6'
|
||||
|
||||
Performance/BlockGivenWithExplicitBlock:
|
||||
Description: 'Check block argument explicitly instead of using `block_given?`.'
|
||||
Enabled: pending
|
||||
VersionAdded: '1.9'
|
||||
|
||||
Performance/Caller:
|
||||
Description: >-
|
||||
Use `caller(n..n)` instead of `caller`.
|
||||
Enabled: true
|
||||
VersionAdded: '0.49'
|
||||
VersionChanged: '1.9'
|
||||
|
||||
Performance/CaseWhenSplat:
|
||||
Description: >-
|
||||
@ -51,7 +62,7 @@ Performance/ChainArrayAllocation:
|
||||
|
||||
Performance/CollectionLiteralInLoop:
|
||||
Description: 'Extract Array and Hash literals outside of loops into local variables or constants.'
|
||||
Enabled: true
|
||||
Enabled: 'pending'
|
||||
VersionAdded: '1.8'
|
||||
# Min number of elements to consider an offense
|
||||
MinSize: 1
|
||||
@ -61,6 +72,11 @@ Performance/CompareWithBlock:
|
||||
Enabled: true
|
||||
VersionAdded: '0.46'
|
||||
|
||||
Performance/ConstantRegexp:
|
||||
Description: 'Finds regular expressions with dynamic components that are all constants.'
|
||||
Enabled: pending
|
||||
VersionAdded: '1.9'
|
||||
|
||||
Performance/Count:
|
||||
Description: >-
|
||||
Use `count` instead of `{select,find_all,filter,reject}...{size,count,length}`.
|
||||
@ -154,6 +170,12 @@ Performance/IoReadlines:
|
||||
Enabled: false
|
||||
VersionAdded: '1.7'
|
||||
|
||||
Performance/MethodObjectAsBlock:
|
||||
Description: 'Use block explicitly instead of block-passing a method object.'
|
||||
Reference: 'https://github.com/JuanitoFatas/fast-ruby#normal-way-to-apply-method-vs-method-code'
|
||||
Enabled: pending
|
||||
VersionAdded: '1.9'
|
||||
|
||||
Performance/OpenStruct:
|
||||
Description: 'Use `Struct` instead of `OpenStruct`.'
|
||||
Enabled: false
|
||||
@ -283,7 +305,9 @@ Performance/TimesMap:
|
||||
Performance/UnfreezeString:
|
||||
Description: 'Use unary plus to get an unfrozen string literal.'
|
||||
Enabled: true
|
||||
SafeAutoCorrect: false
|
||||
VersionAdded: '0.50'
|
||||
VersionChanged: '1.9'
|
||||
|
||||
Performance/UriDefaultParser:
|
||||
Description: 'Use `URI::DEFAULT_PARSER` instead of `URI::Parser.new`.'
|
||||
@ -25,7 +25,7 @@ module RuboCop
|
||||
# (tricky: \s, \d, and so on are metacharacters, but other characters
|
||||
# escaped with a slash are just literals. LITERAL_REGEX takes all
|
||||
# that into account.)
|
||||
/\A\\A(?:#{Util::LITERAL_REGEX})+\z/.match?(regex_str)
|
||||
/\A\\A(?:#{Util::LITERAL_REGEX})+\z/o.match?(regex_str)
|
||||
end
|
||||
|
||||
def literal_at_start_with_caret?(regex_str)
|
||||
@ -35,21 +35,21 @@ module RuboCop
|
||||
# (tricky: \s, \d, and so on are metacharacters, but other characters
|
||||
# escaped with a slash are just literals. LITERAL_REGEX takes all
|
||||
# that into account.)
|
||||
/\A\^(?:#{Util::LITERAL_REGEX})+\z/.match?(regex_str)
|
||||
/\A\^(?:#{Util::LITERAL_REGEX})+\z/o.match?(regex_str)
|
||||
end
|
||||
|
||||
def literal_at_end_with_backslash_z?(regex_str)
|
||||
# is this regexp 'literal' in the sense of only matching literal
|
||||
# chars, rather than using metachars like . and * and so on?
|
||||
# also, is it anchored at the end of the string?
|
||||
/\A(?:#{Util::LITERAL_REGEX})+\\z\z/.match?(regex_str)
|
||||
/\A(?:#{Util::LITERAL_REGEX})+\\z\z/o.match?(regex_str)
|
||||
end
|
||||
|
||||
def literal_at_end_with_dollar?(regex_str)
|
||||
# is this regexp 'literal' in the sense of only matching literal
|
||||
# chars, rather than using metachars like . and * and so on?
|
||||
# also, is it anchored at the end of the string?
|
||||
/\A(?:#{Util::LITERAL_REGEX})+\$\z/.match?(regex_str)
|
||||
/\A(?:#{Util::LITERAL_REGEX})+\$\z/o.match?(regex_str)
|
||||
end
|
||||
|
||||
def drop_start_metacharacter(regexp_string)
|
||||
@ -18,6 +18,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `<=` instead of `ancestors.include?`.'
|
||||
RESTRICT_ON_SEND = %i[include?].freeze
|
||||
|
||||
def_node_matcher :ancestors_include_candidate?, <<~PATTERN
|
||||
(send (send $_subclass :ancestors) :include? $_superclass)
|
||||
@ -0,0 +1,74 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module RuboCop
|
||||
module Cop
|
||||
module Performance
|
||||
# This cop identifies places where slicing arrays with semi-infinite ranges
|
||||
# can be replaced by `Array#take` and `Array#drop`.
|
||||
#
|
||||
# @example
|
||||
# # bad
|
||||
# # array[..2]
|
||||
# # array[...2]
|
||||
# # array[2..]
|
||||
# # array[2...]
|
||||
# # array.slice(..2)
|
||||
#
|
||||
# # good
|
||||
# array.take(3)
|
||||
# array.take(2)
|
||||
# array.drop(2)
|
||||
# array.drop(2)
|
||||
# array.take(3)
|
||||
#
|
||||
class ArraySemiInfiniteRangeSlice < Base
|
||||
include RangeHelp
|
||||
extend AutoCorrector
|
||||
extend TargetRubyVersion
|
||||
|
||||
minimum_target_ruby_version 2.7
|
||||
|
||||
MSG = 'Use `%<prefer>s` instead of `%<current>s` with semi-infinite range.'
|
||||
|
||||
SLICE_METHODS = Set[:[], :slice].freeze
|
||||
RESTRICT_ON_SEND = SLICE_METHODS
|
||||
|
||||
def_node_matcher :endless_range_slice?, <<~PATTERN
|
||||
(send $_ $%SLICE_METHODS $#endless_range?)
|
||||
PATTERN
|
||||
|
||||
def_node_matcher :endless_range?, <<~PATTERN
|
||||
{
|
||||
({irange erange} nil? (int positive?))
|
||||
({irange erange} (int positive?) nil?)
|
||||
}
|
||||
PATTERN
|
||||
|
||||
def on_send(node)
|
||||
endless_range_slice?(node) do |receiver, method_name, range_node|
|
||||
prefer = range_node.begin ? :drop : :take
|
||||
message = format(MSG, prefer: prefer, current: method_name)
|
||||
|
||||
add_offense(node, message: message) do |corrector|
|
||||
corrector.replace(node, correction(receiver, range_node))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def correction(receiver, range_node)
|
||||
method_call = if range_node.begin
|
||||
"drop(#{range_node.begin.value})"
|
||||
elsif range_node.irange_type?
|
||||
"take(#{range_node.end.value + 1})"
|
||||
else
|
||||
"take(#{range_node.end.value})"
|
||||
end
|
||||
|
||||
"#{receiver.source}.#{method_call}"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -20,6 +20,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Convert numeric argument to string before passing to `BigDecimal`.'
|
||||
RESTRICT_ON_SEND = %i[BigDecimal].freeze
|
||||
|
||||
def_node_matcher :big_decimal_with_numeric_argument?, <<~PATTERN
|
||||
(send nil? :BigDecimal $numeric_type? ...)
|
||||
@ -28,6 +28,7 @@ module RuboCop
|
||||
|
||||
MSG = 'Use `bind_call(%<bind_arg>s%<comma>s%<call_args>s)` ' \
|
||||
'instead of `bind(%<bind_arg>s).call(%<call_args>s)`.'
|
||||
RESTRICT_ON_SEND = %i[call].freeze
|
||||
|
||||
def_node_matcher :bind_with_call_method?, <<~PATTERN
|
||||
(send
|
||||
@ -0,0 +1,52 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module RuboCop
|
||||
module Cop
|
||||
module Performance
|
||||
# This cop identifies unnecessary use of a `block_given?` where explicit check
|
||||
# of block argument would suffice.
|
||||
#
|
||||
# @example
|
||||
# # bad
|
||||
# def method(&block)
|
||||
# do_something if block_given?
|
||||
# end
|
||||
#
|
||||
# # good
|
||||
# def method(&block)
|
||||
# do_something if block
|
||||
# end
|
||||
#
|
||||
# # good - block is reassigned
|
||||
# def method(&block)
|
||||
# block ||= -> { do_something }
|
||||
# warn "Using default ..." unless block_given?
|
||||
# # ...
|
||||
# end
|
||||
#
|
||||
class BlockGivenWithExplicitBlock < Base
|
||||
extend AutoCorrector
|
||||
|
||||
RESTRICT_ON_SEND = %i[block_given?].freeze
|
||||
MSG = 'Check block argument explicitly instead of using `block_given?`.'
|
||||
|
||||
def_node_matcher :reassigns_block_arg?, '`(lvasgn %1 ...)'
|
||||
|
||||
def on_send(node)
|
||||
def_node = node.each_ancestor(:def, :defs).first
|
||||
return unless def_node
|
||||
|
||||
block_arg = def_node.arguments.find(&:blockarg_type?)
|
||||
return unless block_arg
|
||||
|
||||
block_arg_name = block_arg.loc.name.source.to_sym
|
||||
return if reassigns_block_arg?(def_node, block_arg_name)
|
||||
|
||||
add_offense(node) do |corrector|
|
||||
corrector.replace(node, block_arg_name)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -19,10 +19,10 @@ module RuboCop
|
||||
# caller_locations(2..2).first
|
||||
# caller_locations(1..1).first
|
||||
class Caller < Base
|
||||
MSG_BRACE = 'Use `%<method>s(%<n>d..%<n>d).first`' \
|
||||
' instead of `%<method>s[%<m>d]`.'
|
||||
MSG_FIRST = 'Use `%<method>s(%<n>d..%<n>d).first`' \
|
||||
' instead of `%<method>s.first`.'
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `%<preferred_method>s` instead of `%<current_method>s`.'
|
||||
RESTRICT_ON_SEND = %i[first []].freeze
|
||||
|
||||
def_node_matcher :slow_caller?, <<~PATTERN
|
||||
{
|
||||
@ -41,26 +41,24 @@ module RuboCop
|
||||
def on_send(node)
|
||||
return unless caller_with_scope_method?(node)
|
||||
|
||||
message = message(node)
|
||||
add_offense(node, message: message)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def message(node)
|
||||
method_name = node.receiver.method_name
|
||||
caller_arg = node.receiver.first_argument
|
||||
n = caller_arg ? int_value(caller_arg) : 1
|
||||
|
||||
if node.method?(:[])
|
||||
m = int_value(node.first_argument)
|
||||
n += m
|
||||
format(MSG_BRACE, n: n, m: m, method: method_name)
|
||||
else
|
||||
format(MSG_FIRST, n: n, method: method_name)
|
||||
end
|
||||
|
||||
preferred_method = "#{method_name}(#{n}..#{n}).first"
|
||||
|
||||
message = format(MSG, preferred_method: preferred_method, current_method: node.source)
|
||||
add_offense(node, message: message) do |corrector|
|
||||
corrector.replace(node, preferred_method)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def int_value(node)
|
||||
node.children[0]
|
||||
end
|
||||
@ -23,6 +23,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `%<good>s` instead of `%<bad>s`.'
|
||||
RESTRICT_ON_SEND = %i[== eql? !=].freeze
|
||||
CASE_METHODS = %i[downcase upcase].freeze
|
||||
|
||||
def_node_matcher :downcase_eq, <<~PATTERN
|
||||
@ -0,0 +1,68 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module RuboCop
|
||||
module Cop
|
||||
module Performance
|
||||
# This cop finds regular expressions with dynamic components that are all constants.
|
||||
#
|
||||
# Ruby allocates a new Regexp object every time it executes a code containing such
|
||||
# a regular expression. It is more efficient to extract it into a constant
|
||||
# or add an `/o` option to perform `#{}` interpolation only once and reuse that
|
||||
# Regexp object.
|
||||
#
|
||||
# @example
|
||||
#
|
||||
# # bad
|
||||
# def tokens(pattern)
|
||||
# pattern.scan(TOKEN).reject { |token| token.match?(/\A#{SEPARATORS}\Z/) }
|
||||
# end
|
||||
#
|
||||
# # good
|
||||
# ALL_SEPARATORS = /\A#{SEPARATORS}\Z/
|
||||
# def tokens(pattern)
|
||||
# pattern.scan(TOKEN).reject { |token| token.match?(ALL_SEPARATORS) }
|
||||
# end
|
||||
#
|
||||
# # good
|
||||
# def tokens(pattern)
|
||||
# pattern.scan(TOKEN).reject { |token| token.match?(/\A#{SEPARATORS}\Z/o) }
|
||||
# end
|
||||
#
|
||||
class ConstantRegexp < Base
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Extract this regexp into a constant or append an `/o` option to its options.'
|
||||
|
||||
def on_regexp(node)
|
||||
return if within_const_assignment?(node) ||
|
||||
!include_interpolated_const?(node) ||
|
||||
node.single_interpolation?
|
||||
|
||||
add_offense(node) do |corrector|
|
||||
corrector.insert_after(node, 'o')
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def within_const_assignment?(node)
|
||||
node.each_ancestor(:casgn).any?
|
||||
end
|
||||
|
||||
def_node_matcher :regexp_escape?, <<~PATTERN
|
||||
(send
|
||||
(const nil? :Regexp) :escape const_type?)
|
||||
PATTERN
|
||||
|
||||
def include_interpolated_const?(node)
|
||||
return false unless node.interpolation?
|
||||
|
||||
node.each_child_node(:begin).all? do |begin_node|
|
||||
inner_node = begin_node.children.first
|
||||
inner_node && (inner_node.const_type? || regexp_escape?(inner_node))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -42,6 +42,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `count` instead of `%<selector>s...%<counter>s`.'
|
||||
RESTRICT_ON_SEND = %i[count length size].freeze
|
||||
|
||||
def_node_matcher :count_candidate?, <<~PATTERN
|
||||
{
|
||||
@ -51,6 +51,7 @@ module RuboCop
|
||||
minimum_target_ruby_version 2.5
|
||||
|
||||
MSG = 'Use `%<prefer>s` instead of `%<current>s`.'
|
||||
RESTRICT_ON_SEND = %i[gsub gsub! sub sub!].freeze
|
||||
|
||||
PREFERRED_METHODS = {
|
||||
gsub: :delete_prefix,
|
||||
@ -51,6 +51,7 @@ module RuboCop
|
||||
minimum_target_ruby_version 2.5
|
||||
|
||||
MSG = 'Use `%<prefer>s` instead of `%<current>s`.'
|
||||
RESTRICT_ON_SEND = %i[gsub gsub! sub sub!].freeze
|
||||
|
||||
PREFERRED_METHODS = {
|
||||
gsub: :delete_suffix,
|
||||
@ -4,8 +4,8 @@ module RuboCop
|
||||
module Cop
|
||||
module Performance
|
||||
# This cop is used to identify usages of `first`, `last`, `[0]` or `[-1]`
|
||||
# chained to `select`, `find_all`, or `find_all`
|
||||
# and change them to use `detect` instead.
|
||||
# chained to `select`, `find_all` or `filter` and change them to use
|
||||
# `detect` instead.
|
||||
#
|
||||
# @example
|
||||
# # bad
|
||||
@ -39,6 +39,7 @@ module RuboCop
|
||||
'`%<first_method>s[%<index>i]`.'
|
||||
INDEX_REVERSE_MSG = 'Use `reverse.%<prefer>s` instead of ' \
|
||||
'`%<first_method>s[%<index>i]`.'
|
||||
RESTRICT_ON_SEND = %i[first last []].freeze
|
||||
|
||||
def_node_matcher :detect_candidate?, <<~PATTERN
|
||||
{
|
||||
@ -47,6 +47,7 @@ module RuboCop
|
||||
|
||||
MSG = 'Use `String#end_with?` instead of a regex match anchored to ' \
|
||||
'the end of the string.'
|
||||
RESTRICT_ON_SEND = %i[match =~ match?].freeze
|
||||
|
||||
def_node_matcher :redundant_regex?, <<~PATTERN
|
||||
{(send $!nil? {:match :=~ :match?} (regexp (str $#literal_at_end?) (regopt)))
|
||||
@ -47,6 +47,7 @@ module RuboCop
|
||||
#
|
||||
class FixedSize < Base
|
||||
MSG = 'Do not compute the size of statically sized objects.'
|
||||
RESTRICT_ON_SEND = %i[count length size].freeze
|
||||
|
||||
def_node_matcher :counter, <<~MATCHER
|
||||
(send ${array hash str sym} {:count :length :size} $...)
|
||||
@ -19,6 +19,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `flat_map` instead of `%<method>s...%<flatten>s`.'
|
||||
RESTRICT_ON_SEND = %i[flatten flatten!].freeze
|
||||
FLATTEN_MULTIPLE_LEVELS = ' Beware, `flat_map` only flattens 1 level ' \
|
||||
'and `flatten` can be used to flatten ' \
|
||||
'multiple levels.'
|
||||
@ -39,6 +39,8 @@ module RuboCop
|
||||
class InefficientHashSearch < Base
|
||||
extend AutoCorrector
|
||||
|
||||
RESTRICT_ON_SEND = %i[include?].freeze
|
||||
|
||||
def_node_matcher :inefficient_include?, <<~PATTERN
|
||||
(send (send $_ {:keys :values}) :include? _)
|
||||
PATTERN
|
||||
@ -29,14 +29,14 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `%<good>s` instead of `%<bad>s`.'
|
||||
ENUMERABLE_METHODS = (Enumerable.instance_methods + [:each]).freeze
|
||||
RESTRICT_ON_SEND = (Enumerable.instance_methods + [:each]).freeze
|
||||
|
||||
def_node_matcher :readlines_on_class?, <<~PATTERN
|
||||
$(send $(send (const nil? {:IO :File}) :readlines ...) #enumerable_method?)
|
||||
$(send $(send (const nil? {:IO :File}) :readlines ...) _)
|
||||
PATTERN
|
||||
|
||||
def_node_matcher :readlines_on_instance?, <<~PATTERN
|
||||
$(send $(send ${nil? !const_type?} :readlines ...) #enumerable_method? ...)
|
||||
$(send $(send ${nil? !const_type?} :readlines ...) _ ...)
|
||||
PATTERN
|
||||
|
||||
def on_send(node)
|
||||
@ -55,10 +55,6 @@ module RuboCop
|
||||
|
||||
private
|
||||
|
||||
def enumerable_method?(node)
|
||||
ENUMERABLE_METHODS.include?(node.to_sym)
|
||||
end
|
||||
|
||||
def autocorrect(corrector, enumerable_call, readlines_call, receiver)
|
||||
# We cannot safely correct `.readlines` method called on IO/File classes
|
||||
# due to its signature and we are not sure with implicit receiver
|
||||
@ -0,0 +1,32 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module RuboCop
|
||||
module Cop
|
||||
module Performance
|
||||
# This cop identifies places where methods are converted to blocks, with the
|
||||
# use of `&method`, and passed as arguments to method calls.
|
||||
# It is faster to replace those with explicit blocks, calling those methods inside.
|
||||
#
|
||||
# @example
|
||||
# # bad
|
||||
# array.map(&method(:do_something))
|
||||
# [1, 2, 3].each(&out.method(:puts))
|
||||
#
|
||||
# # good
|
||||
# array.map { |x| do_something(x) }
|
||||
# [1, 2, 3].each { |x| out.puts(x) }
|
||||
#
|
||||
class MethodObjectAsBlock < Base
|
||||
MSG = 'Use block explicitly instead of block-passing a method object.'
|
||||
|
||||
def_node_matcher :method_object_as_argument?, <<~PATTERN
|
||||
(^send (send _ :method sym))
|
||||
PATTERN
|
||||
|
||||
def on_block_pass(node)
|
||||
add_offense(node) if method_object_as_argument?(node)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -30,6 +30,7 @@ module RuboCop
|
||||
class OpenStruct < Base
|
||||
MSG = 'Consider using `Struct` over `OpenStruct` ' \
|
||||
'to optimize the performance.'
|
||||
RESTRICT_ON_SEND = %i[new].freeze
|
||||
|
||||
def_node_matcher :open_struct, <<~PATTERN
|
||||
(send (const {nil? cbase} :OpenStruct) :new ...)
|
||||
@ -28,6 +28,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `Range#cover?` instead of `Range#%<bad_method>s`.'
|
||||
RESTRICT_ON_SEND = %i[include? member?].freeze
|
||||
|
||||
# TODO: If we traced out assignments of variables to their uses, we
|
||||
# might pick up on a few more instances of this issue
|
||||
@ -80,11 +80,11 @@ module RuboCop
|
||||
def calls_to_report(argname, body)
|
||||
return [] if blockarg_assigned?(body, argname)
|
||||
|
||||
calls = to_enum(:blockarg_calls, body, argname)
|
||||
blockarg_calls(body, argname).map do |call|
|
||||
return [] if args_include_block_pass?(call)
|
||||
|
||||
return [] if calls.any? { |call| args_include_block_pass?(call) }
|
||||
|
||||
calls
|
||||
call
|
||||
end
|
||||
end
|
||||
|
||||
def args_include_block_pass?(blockcall)
|
||||
@ -22,6 +22,7 @@ module RuboCop
|
||||
|
||||
MSG = 'Use `=~` in places where the `MatchData` returned by ' \
|
||||
'`#match` will not be used.'
|
||||
RESTRICT_ON_SEND = %i[match].freeze
|
||||
|
||||
# 'match' is a fairly generic name, so we don't flag it unless we see
|
||||
# a string or regexp literal on one side or the other
|
||||
@ -29,6 +29,7 @@ module RuboCop
|
||||
|
||||
AREF_ASGN = '%<receiver>s[%<key>s] = %<value>s'
|
||||
MSG = 'Use `%<prefer>s` instead of `%<current>s`.'
|
||||
RESTRICT_ON_SEND = %i[merge!].freeze
|
||||
|
||||
WITH_MODIFIER_CORRECTION = <<~RUBY
|
||||
%<keyword>s %<condition>s
|
||||
@ -44,10 +44,10 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `%<good_method>s` instead of `%<bad_method>s`.'
|
||||
REPLACEABLE_METHODS = %i[[] slice first last take drop length size empty?].freeze
|
||||
RESTRICT_ON_SEND = %i[[] slice first last take drop length size empty?].freeze
|
||||
|
||||
def_node_matcher :redundant_chars_call?, <<~PATTERN
|
||||
(send $(send _ :chars) $#replaceable_method? $...)
|
||||
(send $(send _ :chars) $_ $...)
|
||||
PATTERN
|
||||
|
||||
def on_send(node)
|
||||
@ -66,10 +66,6 @@ module RuboCop
|
||||
|
||||
private
|
||||
|
||||
def replaceable_method?(method_name)
|
||||
REPLACEABLE_METHODS.include?(method_name)
|
||||
end
|
||||
|
||||
def offense_range(receiver, node)
|
||||
range_between(receiver.loc.selector.begin_pos, node.loc.expression.end_pos)
|
||||
end
|
||||
@ -17,6 +17,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `reverse_each` instead of `reverse.each`.'
|
||||
RESTRICT_ON_SEND = %i[each].freeze
|
||||
UNDERSCORE = '_'
|
||||
|
||||
def_node_matcher :reverse_each?, <<~MATCHER
|
||||
@ -21,6 +21,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `%<good_method>s` instead of `%<bad_method>s`.'
|
||||
RESTRICT_ON_SEND = %i[first].freeze
|
||||
|
||||
def_node_matcher :reverse_first_candidate?, <<~PATTERN
|
||||
(send $(send _ :reverse) :first (int _)?)
|
||||
@ -39,6 +39,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `size` instead of `count`.'
|
||||
RESTRICT_ON_SEND = %i[count].freeze
|
||||
|
||||
def_node_matcher :array?, <<~PATTERN
|
||||
{
|
||||
@ -22,6 +22,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `%<prefer>s` instead of `%<current>s`.'
|
||||
RESTRICT_ON_SEND = %i[gsub gsub!].freeze
|
||||
|
||||
PREFERRED_METHODS = {
|
||||
gsub: :squeeze,
|
||||
@ -58,7 +59,7 @@ module RuboCop
|
||||
private
|
||||
|
||||
def repeating_literal?(regex_str)
|
||||
regex_str.match?(/\A(?:#{Util::LITERAL_REGEX})\+\z/)
|
||||
regex_str.match?(/\A(?:#{Util::LITERAL_REGEX})\+\z/o)
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -47,6 +47,7 @@ module RuboCop
|
||||
|
||||
MSG = 'Use `String#start_with?` instead of a regex match anchored to ' \
|
||||
'the beginning of the string.'
|
||||
RESTRICT_ON_SEND = %i[match =~ match?].freeze
|
||||
|
||||
def_node_matcher :redundant_regex?, <<~PATTERN
|
||||
{(send $!nil? {:match :=~ :match?} (regexp (str $#literal_at_start?) (regopt)))
|
||||
@ -23,6 +23,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `String#include?` instead of a regex match with literal-only pattern.'
|
||||
RESTRICT_ON_SEND = %i[match =~ match?].freeze
|
||||
|
||||
def_node_matcher :redundant_regex?, <<~PATTERN
|
||||
{(send $!nil? {:match :=~ :match?} (regexp (str $#literal?) (regopt)))
|
||||
@ -47,7 +48,7 @@ module RuboCop
|
||||
private
|
||||
|
||||
def literal?(regex_str)
|
||||
regex_str.match?(/\A#{Util::LITERAL_REGEX}+\z/)
|
||||
regex_str.match?(/\A#{Util::LITERAL_REGEX}+\z/o)
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -23,6 +23,7 @@ module RuboCop
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `%<prefer>s` instead of `%<current>s`.'
|
||||
RESTRICT_ON_SEND = %i[gsub gsub!].freeze
|
||||
DETERMINISTIC_REGEX = /\A(?:#{LITERAL_REGEX})+\Z/.freeze
|
||||
DELETE = 'delete'
|
||||
TR = 'tr'
|
||||
@ -0,0 +1,236 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module RuboCop
|
||||
module Cop
|
||||
module Performance
|
||||
# This cop identifies places where custom code finding the sum of elements
|
||||
# in some Enumerable object can be replaced by `Enumerable#sum` method.
|
||||
#
|
||||
# This cop can change auto-correction scope depending on the value of
|
||||
# `SafeAutoCorrect`.
|
||||
# Its auto-correction is marked as safe by default (`SafeAutoCorrect: true`)
|
||||
# to prevent `TypeError` in auto-correced code when initial value is not
|
||||
# specified as shown below:
|
||||
#
|
||||
# [source,ruby]
|
||||
# ----
|
||||
# ['a', 'b'].sum # => (String can't be coerced into Integer)
|
||||
# ----
|
||||
#
|
||||
# Therefore if initial value is not specified, unsafe auto-corrected will not occur.
|
||||
#
|
||||
# If you always want to enable auto-correction, you can set `SafeAutoCorrect: false`.
|
||||
#
|
||||
# [source,yaml]
|
||||
# ----
|
||||
# Performance/Sum:
|
||||
# SafeAutoCorrect: false
|
||||
# ----
|
||||
#
|
||||
# Please note that the auto-correction command line option will be changed from
|
||||
# `rubocop -a` to `rubocop -A`, which includes unsafe auto-correction.
|
||||
#
|
||||
# @example
|
||||
# # bad
|
||||
# [1, 2, 3].inject(:+) # These bad cases with no initial value are unsafe and
|
||||
# [1, 2, 3].inject(&:+) # will not be auto-correced by default. If you want to
|
||||
# [1, 2, 3].reduce { |acc, elem| acc + elem } # auto-corrected, you can set `SafeAutoCorrect: false`.
|
||||
# [1, 2, 3].reduce(10, :+)
|
||||
# [1, 2, 3].map { |elem| elem ** 2 }.sum
|
||||
# [1, 2, 3].collect(&:count).sum(10)
|
||||
#
|
||||
# # good
|
||||
# [1, 2, 3].sum
|
||||
# [1, 2, 3].sum(10)
|
||||
# [1, 2, 3].sum { |elem| elem ** 2 }
|
||||
# [1, 2, 3].sum(10, &:count)
|
||||
#
|
||||
class Sum < Base
|
||||
include RangeHelp
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use `%<good_method>s` instead of `%<bad_method>s`.'
|
||||
MSG_IF_NO_INIT_VALUE =
|
||||
'Use `%<good_method>s` instead of `%<bad_method>s`, unless calling `%<bad_method>s` on an empty array.'
|
||||
RESTRICT_ON_SEND = %i[inject reduce sum].freeze
|
||||
|
||||
def_node_matcher :sum_candidate?, <<~PATTERN
|
||||
(send _ ${:inject :reduce} $_init ? ${(sym :+) (block_pass (sym :+))})
|
||||
PATTERN
|
||||
|
||||
def_node_matcher :sum_map_candidate?, <<~PATTERN
|
||||
(send
|
||||
{
|
||||
(block $(send _ {:map :collect}) ...)
|
||||
$(send _ {:map :collect} (block_pass _))
|
||||
}
|
||||
:sum $_init ?)
|
||||
PATTERN
|
||||
|
||||
def_node_matcher :sum_with_block_candidate?, <<~PATTERN
|
||||
(block
|
||||
$(send _ {:inject :reduce} $_init ?)
|
||||
(args (arg $_acc) (arg $_elem))
|
||||
$send)
|
||||
PATTERN
|
||||
|
||||
def_node_matcher :acc_plus_elem?, <<~PATTERN
|
||||
(send (lvar %1) :+ (lvar %2))
|
||||
PATTERN
|
||||
alias elem_plus_acc? acc_plus_elem?
|
||||
|
||||
def on_send(node)
|
||||
return if empty_array_literal?(node)
|
||||
|
||||
handle_sum_candidate(node)
|
||||
handle_sum_map_candidate(node)
|
||||
end
|
||||
|
||||
def on_block(node)
|
||||
sum_with_block_candidate?(node) do |send, init, var_acc, var_elem, body|
|
||||
if acc_plus_elem?(body, var_acc, var_elem) || elem_plus_acc?(body, var_elem, var_acc)
|
||||
range = sum_block_range(send, node)
|
||||
message = build_block_message(send, init, var_acc, var_elem, body)
|
||||
|
||||
add_offense(range, message: message) do |corrector|
|
||||
autocorrect(corrector, init, range)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def handle_sum_candidate(node)
|
||||
sum_candidate?(node) do |method, init, operation|
|
||||
range = sum_method_range(node)
|
||||
message = build_method_message(node, method, init, operation)
|
||||
|
||||
add_offense(range, message: message) do |corrector|
|
||||
autocorrect(corrector, init, range)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def handle_sum_map_candidate(node)
|
||||
sum_map_candidate?(node) do |map, init|
|
||||
next if node.block_literal? || node.block_argument?
|
||||
|
||||
message = build_sum_map_message(map.method_name, init)
|
||||
|
||||
add_offense(sum_map_range(map, node), message: message) do |corrector|
|
||||
autocorrect_sum_map(corrector, node, map, init)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def empty_array_literal?(node)
|
||||
receiver = node.children.first
|
||||
array_literal?(node) && receiver && receiver.children.empty?
|
||||
end
|
||||
|
||||
def array_literal?(node)
|
||||
receiver = node.children.first
|
||||
receiver&.literal? && receiver&.array_type?
|
||||
end
|
||||
|
||||
def autocorrect(corrector, init, range)
|
||||
return if init.empty? && safe_autocorrect?
|
||||
|
||||
replacement = build_good_method(init)
|
||||
|
||||
corrector.replace(range, replacement)
|
||||
end
|
||||
|
||||
def autocorrect_sum_map(corrector, sum, map, init)
|
||||
sum_range = method_call_with_args_range(sum)
|
||||
map_range = method_call_with_args_range(map)
|
||||
|
||||
block_pass = map.last_argument if map.last_argument&.block_pass_type?
|
||||
replacement = build_good_method(init, block_pass)
|
||||
|
||||
corrector.remove(sum_range)
|
||||
corrector.replace(map_range, ".#{replacement}")
|
||||
end
|
||||
|
||||
def sum_method_range(node)
|
||||
range_between(node.loc.selector.begin_pos, node.loc.end.end_pos)
|
||||
end
|
||||
|
||||
def sum_map_range(map, sum)
|
||||
range_between(map.loc.selector.begin_pos, sum.source_range.end.end_pos)
|
||||
end
|
||||
|
||||
def sum_block_range(send, node)
|
||||
range_between(send.loc.selector.begin_pos, node.loc.end.end_pos)
|
||||
end
|
||||
|
||||
def build_method_message(node, method, init, operation)
|
||||
good_method = build_good_method(init)
|
||||
bad_method = build_method_bad_method(init, method, operation)
|
||||
msg = if init.empty? && !array_literal?(node)
|
||||
MSG_IF_NO_INIT_VALUE
|
||||
else
|
||||
MSG
|
||||
end
|
||||
format(msg, good_method: good_method, bad_method: bad_method)
|
||||
end
|
||||
|
||||
def build_sum_map_message(method, init)
|
||||
sum_method = build_good_method(init)
|
||||
good_method = "#{sum_method} { ... }"
|
||||
bad_method = "#{method} { ... }.#{sum_method}"
|
||||
format(MSG, good_method: good_method, bad_method: bad_method)
|
||||
end
|
||||
|
||||
def build_block_message(send, init, var_acc, var_elem, body)
|
||||
good_method = build_good_method(init)
|
||||
bad_method = build_block_bad_method(send.method_name, init, var_acc, var_elem, body)
|
||||
format(MSG, good_method: good_method, bad_method: bad_method)
|
||||
end
|
||||
|
||||
def build_good_method(init, block_pass = nil)
|
||||
good_method = 'sum'
|
||||
|
||||
args = []
|
||||
unless init.empty?
|
||||
init = init.first
|
||||
args << init.source unless init.int_type? && init.value.zero?
|
||||
end
|
||||
args << block_pass.source if block_pass
|
||||
good_method += "(#{args.join(', ')})" unless args.empty?
|
||||
good_method
|
||||
end
|
||||
|
||||
def build_method_bad_method(init, method, operation)
|
||||
bad_method = "#{method}("
|
||||
unless init.empty?
|
||||
init = init.first
|
||||
bad_method += "#{init.source}, "
|
||||
end
|
||||
bad_method += if operation.block_pass_type?
|
||||
'&:+)'
|
||||
else
|
||||
':+)'
|
||||
end
|
||||
bad_method
|
||||
end
|
||||
|
||||
def build_block_bad_method(method, init, var_acc, var_elem, body)
|
||||
bad_method = method.to_s
|
||||
|
||||
unless init.empty?
|
||||
init = init.first
|
||||
bad_method += "(#{init.source})"
|
||||
end
|
||||
bad_method += " { |#{var_acc}, #{var_elem}| #{body.source} }"
|
||||
bad_method
|
||||
end
|
||||
|
||||
def method_call_with_args_range(node)
|
||||
node.receiver.source_range.end.join(node.source_range.end)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -23,6 +23,7 @@ module RuboCop
|
||||
MESSAGE = 'Use `Array.new(%<count>s)` with a block ' \
|
||||
'instead of `.times.%<map_or_collect>s`'
|
||||
MESSAGE_ONLY_IF = 'only if `%<count>s` is always 0 or more'
|
||||
RESTRICT_ON_SEND = %i[map collect].freeze
|
||||
|
||||
def on_send(node)
|
||||
check(node)
|
||||
@ -10,6 +10,7 @@ module RuboCop
|
||||
# NOTE: `String.new` (without operator) is not exactly the same as `+''`.
|
||||
# These differ in encoding. `String.new.encoding` is always `ASCII-8BIT`.
|
||||
# However, `(+'').encoding` is the same as script encoding(e.g. `UTF-8`).
|
||||
# Therefore, auto-correction is unsafe.
|
||||
# So, if you expect `ASCII-8BIT` encoding, disable this cop.
|
||||
#
|
||||
# @example
|
||||
@ -24,7 +25,10 @@ module RuboCop
|
||||
# +'something'
|
||||
# +''
|
||||
class UnfreezeString < Base
|
||||
extend AutoCorrector
|
||||
|
||||
MSG = 'Use unary plus to get an unfrozen string literal.'
|
||||
RESTRICT_ON_SEND = %i[dup new].freeze
|
||||
|
||||
def_node_matcher :dup_string?, <<~PATTERN
|
||||
(send {str dstr} :dup)
|
||||
@ -38,7 +42,21 @@ module RuboCop
|
||||
PATTERN
|
||||
|
||||
def on_send(node)
|
||||
add_offense(node) if dup_string?(node) || string_new?(node)
|
||||
return unless dup_string?(node) || string_new?(node)
|
||||
|
||||
add_offense(node) do |corrector|
|
||||
corrector.replace(node, "+#{string_value(node)}")
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def string_value(node)
|
||||
if node.receiver.source == 'String' && node.method?(:new)
|
||||
node.arguments.empty? ? "''" : node.first_argument.source
|
||||
else
|
||||
node.receiver.source
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -18,6 +18,7 @@ module RuboCop
|
||||
|
||||
MSG = 'Use `%<double_colon>sURI::DEFAULT_PARSER` instead of ' \
|
||||
'`%<double_colon>sURI::Parser.new`.'
|
||||
RESTRICT_ON_SEND = %i[new].freeze
|
||||
|
||||
def_node_matcher :uri_parser_new?, <<~PATTERN
|
||||
(send
|
||||
@ -4,13 +4,16 @@ require_relative 'mixin/regexp_metacharacter'
|
||||
require_relative 'mixin/sort_block'
|
||||
|
||||
require_relative 'performance/ancestors_include'
|
||||
require_relative 'performance/array_semi_infinite_range_slice'
|
||||
require_relative 'performance/big_decimal_with_numeric_argument'
|
||||
require_relative 'performance/bind_call'
|
||||
require_relative 'performance/block_given_with_explicit_block'
|
||||
require_relative 'performance/caller'
|
||||
require_relative 'performance/case_when_splat'
|
||||
require_relative 'performance/casecmp'
|
||||
require_relative 'performance/collection_literal_in_loop'
|
||||
require_relative 'performance/compare_with_block'
|
||||
require_relative 'performance/constant_regexp'
|
||||
require_relative 'performance/count'
|
||||
require_relative 'performance/delete_prefix'
|
||||
require_relative 'performance/delete_suffix'
|
||||
@ -20,6 +23,7 @@ require_relative 'performance/end_with'
|
||||
require_relative 'performance/fixed_size'
|
||||
require_relative 'performance/flat_map'
|
||||
require_relative 'performance/inefficient_hash_search'
|
||||
require_relative 'performance/method_object_as_block'
|
||||
require_relative 'performance/open_struct'
|
||||
require_relative 'performance/range_include'
|
||||
require_relative 'performance/io_readlines'
|
||||
@ -0,0 +1,14 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module RuboCop
|
||||
module Performance
|
||||
# This module holds the RuboCop Performance version information.
|
||||
module Version
|
||||
STRING = '1.9.0'
|
||||
|
||||
def self.document_version
|
||||
STRING.match('\d+\.\d+').to_s
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -1,300 +0,0 @@
|
||||
module TZInfo
|
||||
# Use send as a workaround for erroneous 'wrong number of arguments' errors
|
||||
# with JRuby 9.0.5.0 when calling methods with Java implementations. See #114.
|
||||
send(:using, RubyCoreSupport::UntaintExt) if RubyCoreSupport.const_defined?(:UntaintExt)
|
||||
|
||||
# An InvalidZoneinfoFile exception is raised if an attempt is made to load an
|
||||
# invalid zoneinfo file.
|
||||
class InvalidZoneinfoFile < StandardError
|
||||
end
|
||||
|
||||
# Represents a timezone defined by a compiled zoneinfo TZif (\0, 2 or 3) file.
|
||||
#
|
||||
# @private
|
||||
class ZoneinfoTimezoneInfo < TransitionDataTimezoneInfo #:nodoc:
|
||||
|
||||
# Minimum supported timestamp (inclusive).
|
||||
#
|
||||
# Time.utc(1700, 1, 1).to_i
|
||||
MIN_TIMESTAMP = -8520336000
|
||||
|
||||
# Maximum supported timestamp (exclusive).
|
||||
#
|
||||
# Time.utc(2500, 1, 1).to_i
|
||||
MAX_TIMESTAMP = 16725225600
|
||||
|
||||
# Constructs the new ZoneinfoTimezoneInfo with an identifier and path
|
||||
# to the file.
|
||||
def initialize(identifier, file_path)
|
||||
super(identifier)
|
||||
|
||||
File.open(file_path, 'rb') do |file|
|
||||
parse(file)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
# Unpack will return unsigned 32-bit integers. Translate to
|
||||
# signed 32-bit.
|
||||
def make_signed_int32(long)
|
||||
long >= 0x80000000 ? long - 0x100000000 : long
|
||||
end
|
||||
|
||||
# Unpack will return a 64-bit integer as two unsigned 32-bit integers
|
||||
# (most significant first). Translate to signed 64-bit
|
||||
def make_signed_int64(high, low)
|
||||
unsigned = (high << 32) | low
|
||||
unsigned >= 0x8000000000000000 ? unsigned - 0x10000000000000000 : unsigned
|
||||
end
|
||||
|
||||
# Read bytes from file and check that the correct number of bytes could
|
||||
# be read. Raises InvalidZoneinfoFile if the number of bytes didn't match
|
||||
# the number requested.
|
||||
def check_read(file, bytes)
|
||||
result = file.read(bytes)
|
||||
|
||||
unless result && result.length == bytes
|
||||
raise InvalidZoneinfoFile, "Expected #{bytes} bytes reading '#{file.path}', but got #{result ? result.length : 0} bytes"
|
||||
end
|
||||
|
||||
result
|
||||
end
|
||||
|
||||
# Zoneinfo files don't include the offset from standard time (std_offset)
|
||||
# for DST periods. Derive the base offset (utc_offset) where DST is
|
||||
# observed from either the previous or next non-DST period.
|
||||
#
|
||||
# Returns the index of the offset to be used prior to the first
|
||||
# transition.
|
||||
def derive_offsets(transitions, offsets)
|
||||
# The first non-DST offset (if there is one) is the offset observed
|
||||
# before the first transition. Fallback to the first DST offset if there
|
||||
# are no non-DST offsets.
|
||||
first_non_dst_offset_index = offsets.index {|o| !o[:is_dst] }
|
||||
first_offset_index = first_non_dst_offset_index || 0
|
||||
return first_offset_index if transitions.empty?
|
||||
|
||||
# Determine the utc_offset of the next non-dst offset at each transition.
|
||||
utc_offset_from_next = nil
|
||||
|
||||
transitions.reverse_each do |transition|
|
||||
offset = offsets[transition[:offset]]
|
||||
if offset[:is_dst]
|
||||
transition[:utc_offset_from_next] = utc_offset_from_next if utc_offset_from_next
|
||||
else
|
||||
utc_offset_from_next = offset[:utc_total_offset]
|
||||
end
|
||||
end
|
||||
|
||||
utc_offset_from_previous = first_non_dst_offset_index ? offsets[first_non_dst_offset_index][:utc_total_offset] : nil
|
||||
defined_offsets = {}
|
||||
|
||||
transitions.each do |transition|
|
||||
offset_index = transition[:offset]
|
||||
offset = offsets[offset_index]
|
||||
utc_total_offset = offset[:utc_total_offset]
|
||||
|
||||
if offset[:is_dst]
|
||||
utc_offset_from_next = transition[:utc_offset_from_next]
|
||||
|
||||
difference_to_previous = (utc_total_offset - (utc_offset_from_previous || utc_total_offset)).abs
|
||||
difference_to_next = (utc_total_offset - (utc_offset_from_next || utc_total_offset)).abs
|
||||
|
||||
utc_offset = if difference_to_previous == 3600
|
||||
utc_offset_from_previous
|
||||
elsif difference_to_next == 3600
|
||||
utc_offset_from_next
|
||||
elsif difference_to_previous > 0 && difference_to_next > 0
|
||||
difference_to_previous < difference_to_next ? utc_offset_from_previous : utc_offset_from_next
|
||||
elsif difference_to_previous > 0
|
||||
utc_offset_from_previous
|
||||
elsif difference_to_next > 0
|
||||
utc_offset_from_next
|
||||
else
|
||||
# No difference, assume a 1 hour offset from standard time.
|
||||
utc_total_offset - 3600
|
||||
end
|
||||
|
||||
if !offset[:utc_offset]
|
||||
offset[:utc_offset] = utc_offset
|
||||
defined_offsets[offset] = offset_index
|
||||
elsif offset[:utc_offset] != utc_offset
|
||||
# An earlier transition has already derived a different
|
||||
# utc_offset. Define a new offset or reuse an existing identically
|
||||
# defined offset.
|
||||
new_offset = offset.dup
|
||||
new_offset[:utc_offset] = utc_offset
|
||||
|
||||
offset_index = defined_offsets[new_offset]
|
||||
|
||||
unless offset_index
|
||||
offsets << new_offset
|
||||
offset_index = offsets.length - 1
|
||||
defined_offsets[new_offset] = offset_index
|
||||
end
|
||||
|
||||
transition[:offset] = offset_index
|
||||
end
|
||||
else
|
||||
utc_offset_from_previous = utc_total_offset
|
||||
end
|
||||
end
|
||||
|
||||
first_offset_index
|
||||
end
|
||||
|
||||
# Defines an offset for the timezone based on the given index and offset
|
||||
# Hash.
|
||||
def define_offset(index, offset)
|
||||
utc_total_offset = offset[:utc_total_offset]
|
||||
utc_offset = offset[:utc_offset]
|
||||
|
||||
if utc_offset
|
||||
# DST offset with base utc_offset derived by derive_offsets.
|
||||
std_offset = utc_total_offset - utc_offset
|
||||
elsif offset[:is_dst]
|
||||
# DST offset unreferenced by a transition (offset in use before the
|
||||
# first transition). No derived base UTC offset, so assume 1 hour
|
||||
# DST.
|
||||
utc_offset = utc_total_offset - 3600
|
||||
std_offset = 3600
|
||||
else
|
||||
# Non-DST offset.
|
||||
utc_offset = utc_total_offset
|
||||
std_offset = 0
|
||||
end
|
||||
|
||||
offset index, utc_offset, std_offset, offset[:abbr].untaint.to_sym
|
||||
end
|
||||
|
||||
# Parses a zoneinfo file and intializes the DataTimezoneInfo structures.
|
||||
def parse(file)
|
||||
magic, version, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt =
|
||||
check_read(file, 44).unpack('a4 a x15 NNNNNN')
|
||||
|
||||
if magic != 'TZif'
|
||||
raise InvalidZoneinfoFile, "The file '#{file.path}' does not start with the expected header."
|
||||
end
|
||||
|
||||
if (version == '2' || version == '3') && RubyCoreSupport.time_supports_64bit
|
||||
# Skip the first 32-bit section and read the header of the second 64-bit section
|
||||
file.seek(timecnt * 5 + typecnt * 6 + charcnt + leapcnt * 8 + ttisgmtcnt + ttisstdcnt, IO::SEEK_CUR)
|
||||
|
||||
prev_version = version
|
||||
|
||||
magic, version, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt =
|
||||
check_read(file, 44).unpack('a4 a x15 NNNNNN')
|
||||
|
||||
unless magic == 'TZif' && (version == prev_version)
|
||||
raise InvalidZoneinfoFile, "The file '#{file.path}' contains an invalid 64-bit section header."
|
||||
end
|
||||
|
||||
using_64bit = true
|
||||
elsif version != '3' && version != '2' && version != "\0"
|
||||
raise InvalidZoneinfoFile, "The file '#{file.path}' contains a version of the zoneinfo format that is not currently supported."
|
||||
else
|
||||
using_64bit = false
|
||||
end
|
||||
|
||||
unless leapcnt == 0
|
||||
raise InvalidZoneinfoFile, "The zoneinfo file '#{file.path}' contains leap second data. TZInfo requires zoneinfo files that omit leap seconds."
|
||||
end
|
||||
|
||||
transitions = []
|
||||
|
||||
if using_64bit
|
||||
timecnt.times do |i|
|
||||
high, low = check_read(file, 8).unpack('NN'.freeze)
|
||||
transition_time = make_signed_int64(high, low)
|
||||
transitions << {:at => transition_time}
|
||||
end
|
||||
else
|
||||
timecnt.times do |i|
|
||||
transition_time = make_signed_int32(check_read(file, 4).unpack('N'.freeze)[0])
|
||||
transitions << {:at => transition_time}
|
||||
end
|
||||
end
|
||||
|
||||
timecnt.times do |i|
|
||||
localtime_type = check_read(file, 1).unpack('C'.freeze)[0]
|
||||
transitions[i][:offset] = localtime_type
|
||||
end
|
||||
|
||||
offsets = []
|
||||
|
||||
typecnt.times do |i|
|
||||
gmtoff, isdst, abbrind = check_read(file, 6).unpack('NCC'.freeze)
|
||||
gmtoff = make_signed_int32(gmtoff)
|
||||
isdst = isdst == 1
|
||||
offset = {:utc_total_offset => gmtoff, :is_dst => isdst, :abbr_index => abbrind}
|
||||
|
||||
unless isdst
|
||||
offset[:utc_offset] = gmtoff
|
||||
offset[:std_offset] = 0
|
||||
end
|
||||
|
||||
offsets << offset
|
||||
end
|
||||
|
||||
abbrev = check_read(file, charcnt)
|
||||
|
||||
offsets.each do |o|
|
||||
abbrev_start = o[:abbr_index]
|
||||
raise InvalidZoneinfoFile, "Abbreviation index is out of range in file '#{file.path}'" unless abbrev_start < abbrev.length
|
||||
|
||||
abbrev_end = abbrev.index("\0", abbrev_start)
|
||||
raise InvalidZoneinfoFile, "Missing abbreviation null terminator in file '#{file.path}'" unless abbrev_end
|
||||
|
||||
o[:abbr] = RubyCoreSupport.force_encoding(abbrev[abbrev_start...abbrev_end], 'UTF-8')
|
||||
end
|
||||
|
||||
transitions.each do |t|
|
||||
if t[:offset] < 0 || t[:offset] >= offsets.length
|
||||
raise InvalidZoneinfoFile, "Invalid offset referenced by transition in file '#{file.path}'."
|
||||
end
|
||||
end
|
||||
|
||||
# Derive the offsets from standard time (std_offset).
|
||||
first_offset_index = derive_offsets(transitions, offsets)
|
||||
|
||||
define_offset(first_offset_index, offsets[first_offset_index])
|
||||
|
||||
offsets.each_with_index do |o, i|
|
||||
define_offset(i, o) unless i == first_offset_index
|
||||
end
|
||||
|
||||
if !using_64bit && !RubyCoreSupport.time_supports_negative
|
||||
# Filter out transitions that are not supported by Time on this
|
||||
# platform.
|
||||
|
||||
# Move the last transition before the epoch up to the epoch. This
|
||||
# allows for accurate conversions for all supported timestamps on the
|
||||
# platform.
|
||||
|
||||
before_epoch, after_epoch = transitions.partition {|t| t[:at] < 0}
|
||||
|
||||
if before_epoch.length > 0 && after_epoch.length > 0 && after_epoch.first[:at] != 0
|
||||
last_before = before_epoch.last
|
||||
last_before[:at] = 0
|
||||
transitions = [last_before] + after_epoch
|
||||
else
|
||||
transitions = after_epoch
|
||||
end
|
||||
end
|
||||
|
||||
# Ignore transitions that occur outside of a defined window. The
|
||||
# transition index cannot handle a large range of transition times.
|
||||
#
|
||||
# This is primarily intended to ignore the far in the past transition
|
||||
# added in zic 2014c (at timestamp -2**63 in zic 2014c and at the
|
||||
# approximate time of the big bang from zic 2014d).
|
||||
transitions.each do |t|
|
||||
at = t[:at]
|
||||
if at >= MIN_TIMESTAMP && at < MAX_TIMESTAMP
|
||||
time = Time.at(at).utc
|
||||
transition time.year, time.mon, t[:offset], at
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -10,6 +10,8 @@ require 'tzinfo/timezone_definition'
|
||||
|
||||
require 'tzinfo/timezone_offset'
|
||||
require 'tzinfo/timezone_transition'
|
||||
require 'tzinfo/transition_rule'
|
||||
require 'tzinfo/annual_rules'
|
||||
require 'tzinfo/timezone_transition_definition'
|
||||
|
||||
require 'tzinfo/timezone_index_definition'
|
||||
@ -22,6 +24,7 @@ require 'tzinfo/zoneinfo_timezone_info'
|
||||
|
||||
require 'tzinfo/data_source'
|
||||
require 'tzinfo/ruby_data_source'
|
||||
require 'tzinfo/posix_time_zone_parser'
|
||||
require 'tzinfo/zoneinfo_data_source'
|
||||
|
||||
require 'tzinfo/timezone_period'
|
||||
51
Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/annual_rules.rb
vendored
Normal file
51
Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/annual_rules.rb
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
module TZInfo
|
||||
# A set of rules that define when transitions occur in time zones with
|
||||
# annually occurring daylight savings time.
|
||||
#
|
||||
# @private
|
||||
class AnnualRules #:nodoc:
|
||||
# Returned by #transitions. #offset is the TimezoneOffset that applies
|
||||
# from the UTC TimeOrDateTime #at. #previous_offset is the prior
|
||||
# TimezoneOffset.
|
||||
Transition = Struct.new(:offset, :previous_offset, :at)
|
||||
|
||||
# The standard offset that applies when daylight savings time is not in
|
||||
# force.
|
||||
attr_reader :std_offset
|
||||
|
||||
# The offset that applies when daylight savings time is in force.
|
||||
attr_reader :dst_offset
|
||||
|
||||
# The rule that determines when daylight savings time starts.
|
||||
attr_reader :dst_start_rule
|
||||
|
||||
# The rule that determines when daylight savings time ends.
|
||||
attr_reader :dst_end_rule
|
||||
|
||||
# Initializes a new {AnnualRules} instance.
|
||||
def initialize(std_offset, dst_offset, dst_start_rule, dst_end_rule)
|
||||
@std_offset = std_offset
|
||||
@dst_offset = dst_offset
|
||||
@dst_start_rule = dst_start_rule
|
||||
@dst_end_rule = dst_end_rule
|
||||
end
|
||||
|
||||
# Returns the transitions between standard and daylight savings time for a
|
||||
# given year. The results are ordered by time of occurrence (earliest to
|
||||
# latest).
|
||||
def transitions(year)
|
||||
start_dst = apply_rule(@dst_start_rule, @std_offset, @dst_offset, year)
|
||||
end_dst = apply_rule(@dst_end_rule, @dst_offset, @std_offset, year)
|
||||
|
||||
end_dst.at < start_dst.at ? [end_dst, start_dst] : [start_dst, end_dst]
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Applies a given rule between offsets on a year.
|
||||
def apply_rule(rule, from_offset, to_offset, year)
|
||||
at = rule.at(from_offset, year)
|
||||
Transition.new(to_offset, from_offset, at)
|
||||
end
|
||||
end
|
||||
end
|
||||
136
Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/posix_time_zone_parser.rb
vendored
Normal file
136
Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/posix_time_zone_parser.rb
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
# encoding: UTF-8
|
||||
# frozen_string_literal: true
|
||||
|
||||
require 'strscan'
|
||||
|
||||
module TZInfo
|
||||
# An {InvalidPosixTimeZone} exception is raised if an invalid POSIX-style
|
||||
# time zone string is encountered.
|
||||
#
|
||||
# @private
|
||||
class InvalidPosixTimeZone < StandardError #:nodoc:
|
||||
end
|
||||
|
||||
# A parser for POSIX-style TZ strings used in zoneinfo files and specified
|
||||
# by tzfile.5 and tzset.3.
|
||||
#
|
||||
# @private
|
||||
class PosixTimeZoneParser #:nodoc:
|
||||
# Parses a POSIX-style TZ string, returning either a TimezoneOffset or
|
||||
# an AnnualRules instance.
|
||||
def parse(tz_string)
|
||||
raise InvalidPosixTimeZone unless tz_string.kind_of?(String)
|
||||
return nil if tz_string.empty?
|
||||
|
||||
s = StringScanner.new(tz_string)
|
||||
check_scan(s, /([^-+,\d<][^-+,\d]*) | <([^>]+)>/x)
|
||||
std_abbrev = s[1] || s[2]
|
||||
check_scan(s, /([-+]?\d+)(?::(\d+)(?::(\d+))?)?/)
|
||||
std_offset = get_offset_from_hms(s[1], s[2], s[3])
|
||||
|
||||
if s.scan(/([^-+,\d<][^-+,\d]*) | <([^>]+)>/x)
|
||||
dst_abbrev = s[1] || s[2]
|
||||
|
||||
if s.scan(/([-+]?\d+)(?::(\d+)(?::(\d+))?)?/)
|
||||
dst_offset = get_offset_from_hms(s[1], s[2], s[3])
|
||||
else
|
||||
# POSIX is negative for ahead of UTC.
|
||||
dst_offset = std_offset - 3600
|
||||
end
|
||||
|
||||
dst_difference = std_offset - dst_offset
|
||||
|
||||
start_rule = parse_rule(s, 'start')
|
||||
end_rule = parse_rule(s, 'end')
|
||||
|
||||
raise InvalidPosixTimeZone, "Expected the end of a POSIX-style time zone string but found '#{s.rest}'." if s.rest?
|
||||
|
||||
if start_rule.is_always_first_day_of_year? && start_rule.transition_at == 0 &&
|
||||
end_rule.is_always_last_day_of_year? && end_rule.transition_at == 86400 + dst_difference
|
||||
# Constant daylight savings time.
|
||||
# POSIX is negative for ahead of UTC.
|
||||
TimezoneOffset.new(-std_offset, dst_difference, dst_abbrev.to_sym)
|
||||
else
|
||||
AnnualRules.new(
|
||||
TimezoneOffset.new(-std_offset, 0, std_abbrev.to_sym),
|
||||
TimezoneOffset.new(-std_offset, dst_difference, dst_abbrev.to_sym),
|
||||
start_rule,
|
||||
end_rule)
|
||||
end
|
||||
elsif !s.rest?
|
||||
# Constant standard time.
|
||||
# POSIX is negative for ahead of UTC.
|
||||
TimezoneOffset.new(-std_offset, 0, std_abbrev.to_sym)
|
||||
else
|
||||
raise InvalidPosixTimeZone, "Expected the end of a POSIX-style time zone string but found '#{s.rest}'."
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Parses the rule from the TZ string, returning a TransitionRule.
|
||||
def parse_rule(s, type)
|
||||
check_scan(s, /,(?: (?: J(\d+) ) | (\d+) | (?: M(\d+)\.(\d)\.(\d) ) )/x)
|
||||
julian_day_of_year = s[1]
|
||||
absolute_day_of_year = s[2]
|
||||
month = s[3]
|
||||
week = s[4]
|
||||
day_of_week = s[5]
|
||||
|
||||
if s.scan(/\//)
|
||||
check_scan(s, /([-+]?\d+)(?::(\d+)(?::(\d+))?)?/)
|
||||
transition_at = get_seconds_after_midnight_from_hms(s[1], s[2], s[3])
|
||||
else
|
||||
transition_at = 7200
|
||||
end
|
||||
|
||||
begin
|
||||
if julian_day_of_year
|
||||
JulianDayOfYearTransitionRule.new(julian_day_of_year.to_i, transition_at)
|
||||
elsif absolute_day_of_year
|
||||
AbsoluteDayOfYearTransitionRule.new(absolute_day_of_year.to_i, transition_at)
|
||||
elsif week == '5'
|
||||
LastDayOfMonthTransitionRule.new(month.to_i, day_of_week.to_i, transition_at)
|
||||
else
|
||||
DayOfMonthTransitionRule.new(month.to_i, week.to_i, day_of_week.to_i, transition_at)
|
||||
end
|
||||
rescue ArgumentError => e
|
||||
raise InvalidPosixTimeZone, "Invalid #{type} rule in POSIX-style time zone string: #{e}"
|
||||
end
|
||||
end
|
||||
|
||||
# Returns an offset in seconds from hh:mm:ss values. The value can be
|
||||
# negative. -02:33:12 would represent 2 hours, 33 minutes and 12 seconds
|
||||
# ahead of UTC.
|
||||
def get_offset_from_hms(h, m, s)
|
||||
h = h.to_i
|
||||
m = m.to_i
|
||||
s = s.to_i
|
||||
raise InvalidPosixTimeZone, "Invalid minute #{m} in offset for POSIX-style time zone string." if m > 59
|
||||
raise InvalidPosixTimeZone, "Invalid second #{s} in offset for POSIX-style time zone string." if s > 59
|
||||
magnitude = (h.abs * 60 + m) * 60 + s
|
||||
h < 0 ? -magnitude : magnitude
|
||||
end
|
||||
|
||||
# Returns the seconds from midnight from hh:mm:ss values. Hours can exceed
|
||||
# 24 for a time on the following day. Hours can be negative to subtract
|
||||
# hours from midnight on the given day. -02:33:12 represents 22:33:12 on
|
||||
# the prior day.
|
||||
def get_seconds_after_midnight_from_hms(h, m, s)
|
||||
h = h.to_i
|
||||
m = m.to_i
|
||||
s = s.to_i
|
||||
raise InvalidPosixTimeZone, "Invalid minute #{m} in time for POSIX-style time zone string." if m > 59
|
||||
raise InvalidPosixTimeZone, "Invalid second #{s} in time for POSIX-style time zone string." if s > 59
|
||||
(h * 3600) + m * 60 + s
|
||||
end
|
||||
|
||||
# Scans for a pattern and raises an exception if the pattern does not
|
||||
# match the input.
|
||||
def check_scan(s, pattern)
|
||||
result = s.scan(pattern)
|
||||
raise InvalidPosixTimeZone, "Expected '#{s.rest}' to match #{pattern} in POSIX-style time zone string." unless result
|
||||
result
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -160,6 +160,17 @@ module TZInfo
|
||||
end
|
||||
end
|
||||
alias :day :mday
|
||||
|
||||
# Returns the day of the week (0..6 for Sunday to Saturday).
|
||||
def wday
|
||||
if @time
|
||||
@time.wday
|
||||
elsif @datetime
|
||||
@datetime.wday
|
||||
else
|
||||
to_time.wday
|
||||
end
|
||||
end
|
||||
|
||||
# Returns the hour of the day (0..23).
|
||||
def hour
|
||||
325
Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_rule.rb
vendored
Normal file
325
Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/transition_rule.rb
vendored
Normal file
@ -0,0 +1,325 @@
|
||||
require 'date'
|
||||
|
||||
module TZInfo
|
||||
# Base class for rules definining the transition between standard and daylight
|
||||
# savings time.
|
||||
class TransitionRule #:nodoc:
|
||||
# Returns the number of seconds after midnight local time on the day
|
||||
# identified by the rule at which the transition occurs. Can be negative to
|
||||
# denote a time on the prior day. Can be greater than or equal to 86,400 to
|
||||
# denote a time of the following day.
|
||||
attr_reader :transition_at
|
||||
|
||||
# Initializes a new TransitionRule.
|
||||
def initialize(transition_at)
|
||||
raise ArgumentError, 'Invalid transition_at' unless transition_at.kind_of?(Integer)
|
||||
@transition_at = transition_at
|
||||
end
|
||||
|
||||
# Calculates the UTC time of the transition from a given offset on a given
|
||||
# year.
|
||||
def at(offset, year)
|
||||
day = get_day(year)
|
||||
day.add_with_convert(@transition_at - offset.utc_total_offset)
|
||||
end
|
||||
|
||||
# Determines if this TransitionRule is equal to another instance.
|
||||
def ==(r)
|
||||
r.kind_of?(TransitionRule) && @transition_at == r.transition_at
|
||||
end
|
||||
alias eql? ==
|
||||
|
||||
# Returns a hash based on hash_args (defaulting to transition_at).
|
||||
def hash
|
||||
hash_args.hash
|
||||
end
|
||||
|
||||
protected
|
||||
|
||||
# Returns an Array of parameters that will influence the output of hash.
|
||||
def hash_args
|
||||
[@transition_at]
|
||||
end
|
||||
|
||||
def new_time_or_datetime(year, month = 1, day = 1)
|
||||
result = if ((year >= 2039 || (year == 2038 && (month >= 2 || (month == 1 && day >= 20)))) && !RubyCoreSupport.time_supports_64bit) ||
|
||||
(year < 1970 && !RubyCoreSupport.time_supports_negative)
|
||||
|
||||
# Time handles 29 February on a non-leap year as 1 March.
|
||||
# DateTime rejects. Advance manually.
|
||||
if month == 2 && day == 29 && !Date.gregorian_leap?(year)
|
||||
month = 3
|
||||
day = 1
|
||||
end
|
||||
|
||||
RubyCoreSupport.datetime_new(year, month, day)
|
||||
else
|
||||
Time.utc(year, month, day)
|
||||
end
|
||||
|
||||
TimeOrDateTime.wrap(result)
|
||||
end
|
||||
end
|
||||
|
||||
# A base class for transition rules that activate based on an integer day of
|
||||
# the year.
|
||||
#
|
||||
# @private
|
||||
class DayOfYearTransitionRule < TransitionRule #:nodoc:
|
||||
# Initializes a new DayOfYearTransitionRule.
|
||||
def initialize(day, transition_at)
|
||||
super(transition_at)
|
||||
raise ArgumentError, 'Invalid day' unless day.kind_of?(Integer)
|
||||
@seconds = day * 86400
|
||||
end
|
||||
|
||||
# Determines if this DayOfYearTransitionRule is equal to another instance.
|
||||
def ==(r)
|
||||
super(r) && r.kind_of?(DayOfYearTransitionRule) && @seconds == r.seconds
|
||||
end
|
||||
alias eql? ==
|
||||
|
||||
protected
|
||||
|
||||
# @return [Integer] the day multipled by the number of seconds in a day.
|
||||
attr_reader :seconds
|
||||
|
||||
# Returns an Array of parameters that will influence the output of hash.
|
||||
def hash_args
|
||||
[@seconds] + super
|
||||
end
|
||||
end
|
||||
|
||||
# Defines transitions that occur on the zero-based nth day of the year.
|
||||
#
|
||||
# Day 0 is 1 January.
|
||||
#
|
||||
# Leap days are counted. Day 59 will be 29 February on a leap year and 1 March
|
||||
# on a non-leap year. Day 365 will be 31 December on a leap year and 1 January
|
||||
# the following year on a non-leap year.
|
||||
#
|
||||
# @private
|
||||
class AbsoluteDayOfYearTransitionRule < DayOfYearTransitionRule #:nodoc:
|
||||
# Initializes a new AbsoluteDayOfYearTransitionRule.
|
||||
def initialize(day, transition_at = 0)
|
||||
super(day, transition_at)
|
||||
raise ArgumentError, 'Invalid day' unless day >= 0 && day <= 365
|
||||
end
|
||||
|
||||
# Returns true if the day specified by this transition is the first in the
|
||||
# year (a day number of 0), otherwise false.
|
||||
def is_always_first_day_of_year?
|
||||
seconds == 0
|
||||
end
|
||||
|
||||
# @returns false.
|
||||
def is_always_last_day_of_year?
|
||||
false
|
||||
end
|
||||
|
||||
# Determines if this AbsoluteDayOfYearTransitionRule is equal to another
|
||||
# instance.
|
||||
def ==(r)
|
||||
super(r) && r.kind_of?(AbsoluteDayOfYearTransitionRule)
|
||||
end
|
||||
alias eql? ==
|
||||
|
||||
protected
|
||||
|
||||
# Returns a TimeOrDateTime representing midnight local time on the day
|
||||
# specified by the rule for the given offset and year.
|
||||
def get_day(year)
|
||||
new_time_or_datetime(year).add_with_convert(seconds)
|
||||
end
|
||||
|
||||
# Returns an Array of parameters that will influence the output of hash.
|
||||
def hash_args
|
||||
[AbsoluteDayOfYearTransitionRule] + super
|
||||
end
|
||||
end
|
||||
|
||||
# Defines transitions that occur on the one-based nth Julian day of the year.
|
||||
#
|
||||
# Leap days are not counted. Day 1 is 1 January. Day 60 is always 1 March.
|
||||
# Day 365 is always 31 December.
|
||||
#
|
||||
# @private
|
||||
class JulianDayOfYearTransitionRule < DayOfYearTransitionRule #:nodoc:
|
||||
# The 60 days in seconds.
|
||||
LEAP = 60 * 86400
|
||||
|
||||
# The length of a non-leap year in seconds.
|
||||
YEAR = 365 * 86400
|
||||
|
||||
# Initializes a new JulianDayOfYearTransitionRule.
|
||||
def initialize(day, transition_at = 0)
|
||||
super(day, transition_at)
|
||||
raise ArgumentError, 'Invalid day' unless day >= 1 && day <= 365
|
||||
end
|
||||
|
||||
# Returns true if the day specified by this transition is the first in the
|
||||
# year (a day number of 1), otherwise false.
|
||||
def is_always_first_day_of_year?
|
||||
seconds == 86400
|
||||
end
|
||||
|
||||
# Returns true if the day specified by this transition is the last in the
|
||||
# year (a day number of 365), otherwise false.
|
||||
def is_always_last_day_of_year?
|
||||
seconds == YEAR
|
||||
end
|
||||
|
||||
# Determines if this JulianDayOfYearTransitionRule is equal to another
|
||||
# instance.
|
||||
def ==(r)
|
||||
super(r) && r.kind_of?(JulianDayOfYearTransitionRule)
|
||||
end
|
||||
alias eql? ==
|
||||
|
||||
protected
|
||||
|
||||
# Returns a TimeOrDateTime representing midnight local time on the day
|
||||
# specified by the rule for the given offset and year.
|
||||
def get_day(year)
|
||||
# Returns 1 March on non-leap years.
|
||||
leap = new_time_or_datetime(year, 2, 29)
|
||||
diff = seconds - LEAP
|
||||
diff += 86400 if diff >= 0 && leap.mday == 29
|
||||
leap.add_with_convert(diff)
|
||||
end
|
||||
|
||||
# Returns an Array of parameters that will influence the output of hash.
|
||||
def hash_args
|
||||
[JulianDayOfYearTransitionRule] + super
|
||||
end
|
||||
end
|
||||
|
||||
# A base class for rules that transition on a particular day of week of a
|
||||
# given week (subclasses specify which week of the month).
|
||||
#
|
||||
# @private
|
||||
class DayOfWeekTransitionRule < TransitionRule #:nodoc:
|
||||
# Initializes a new DayOfWeekTransitionRule.
|
||||
def initialize(month, day_of_week, transition_at)
|
||||
super(transition_at)
|
||||
raise ArgumentError, 'Invalid month' unless month.kind_of?(Integer) && month >= 1 && month <= 12
|
||||
raise ArgumentError, 'Invalid day_of_week' unless day_of_week.kind_of?(Integer) && day_of_week >= 0 && day_of_week <= 6
|
||||
@month = month
|
||||
@day_of_week = day_of_week
|
||||
end
|
||||
|
||||
# Returns false.
|
||||
def is_always_first_day_of_year?
|
||||
false
|
||||
end
|
||||
|
||||
# Returns false.
|
||||
def is_always_last_day_of_year?
|
||||
false
|
||||
end
|
||||
|
||||
# Determines if this DayOfWeekTransitionRule is equal to another instance.
|
||||
def ==(r)
|
||||
super(r) && r.kind_of?(DayOfWeekTransitionRule) && @month == r.month && @day_of_week == r.day_of_week
|
||||
end
|
||||
alias eql? ==
|
||||
|
||||
protected
|
||||
|
||||
# Returns the month of the year (1 to 12).
|
||||
attr_reader :month
|
||||
|
||||
# Returns the day of the week (0 to 6 for Sunday to Monday).
|
||||
attr_reader :day_of_week
|
||||
|
||||
# Returns an Array of parameters that will influence the output of hash.
|
||||
def hash_args
|
||||
[@month, @day_of_week] + super
|
||||
end
|
||||
end
|
||||
|
||||
# A rule that transitions on the nth occurrence of a particular day of week
|
||||
# of a calendar month.
|
||||
#
|
||||
# @private
|
||||
class DayOfMonthTransitionRule < DayOfWeekTransitionRule #:nodoc:
|
||||
# Initializes a new DayOfMonthTransitionRule.
|
||||
def initialize(month, week, day_of_week, transition_at = 0)
|
||||
super(month, day_of_week, transition_at)
|
||||
raise ArgumentError, 'Invalid week' unless week.kind_of?(Integer) && week >= 1 && week <= 4
|
||||
@offset_start = (week - 1) * 7 + 1
|
||||
end
|
||||
|
||||
# Determines if this DayOfMonthTransitionRule is equal to another instance.
|
||||
def ==(r)
|
||||
super(r) && r.kind_of?(DayOfMonthTransitionRule) && @offset_start == r.offset_start
|
||||
end
|
||||
alias eql? ==
|
||||
|
||||
protected
|
||||
|
||||
# Returns the day the week starts on for a month starting on a Sunday.
|
||||
attr_reader :offset_start
|
||||
|
||||
# Returns a TimeOrDateTime representing midnight local time on the day
|
||||
# specified by the rule for the given offset and year.
|
||||
def get_day(year)
|
||||
candidate = new_time_or_datetime(year, month, @offset_start)
|
||||
diff = day_of_week - candidate.wday
|
||||
|
||||
if diff < 0
|
||||
candidate.add_with_convert((7 + diff) * 86400)
|
||||
elsif diff > 0
|
||||
candidate.add_with_convert(diff * 86400)
|
||||
else
|
||||
candidate
|
||||
end
|
||||
end
|
||||
|
||||
# Returns an Array of parameters that will influence the output of hash.
|
||||
def hash_args
|
||||
[@offset_start] + super
|
||||
end
|
||||
end
|
||||
|
||||
# A rule that transitions on the last occurrence of a particular day of week
|
||||
# of a calendar month.
|
||||
#
|
||||
# @private
|
||||
class LastDayOfMonthTransitionRule < DayOfWeekTransitionRule #:nodoc:
|
||||
# Initializes a new LastDayOfMonthTransitionRule.
|
||||
def initialize(month, day_of_week, transition_at = 0)
|
||||
super(month, day_of_week, transition_at)
|
||||
end
|
||||
|
||||
# Determines if this LastDayOfMonthTransitionRule is equal to another
|
||||
# instance.
|
||||
def ==(r)
|
||||
super(r) && r.kind_of?(LastDayOfMonthTransitionRule)
|
||||
end
|
||||
alias eql? ==
|
||||
|
||||
protected
|
||||
|
||||
# Returns a TimeOrDateTime representing midnight local time on the day
|
||||
# specified by the rule for the given offset and year.
|
||||
def get_day(year)
|
||||
next_month = month + 1
|
||||
if next_month == 13
|
||||
year += 1
|
||||
next_month = 1
|
||||
end
|
||||
|
||||
candidate = new_time_or_datetime(year, next_month).add_with_convert(-86400)
|
||||
diff = candidate.wday - day_of_week
|
||||
|
||||
if diff < 0
|
||||
candidate - (diff + 7) * 86400
|
||||
elsif diff > 0
|
||||
candidate - diff * 86400
|
||||
else
|
||||
candidate
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -192,6 +192,7 @@ module TZInfo
|
||||
@zoneinfo_dir = File.expand_path(@zoneinfo_dir).freeze
|
||||
@timezone_index = load_timezone_index.freeze
|
||||
@country_index = load_country_index(iso3166_tab_path, zone_tab_path).freeze
|
||||
@posix_tz_parser = PosixTimeZoneParser.new
|
||||
end
|
||||
|
||||
# Returns a TimezoneInfo instance for a given identifier.
|
||||
@ -208,7 +209,7 @@ module TZInfo
|
||||
path.untaint
|
||||
|
||||
begin
|
||||
ZoneinfoTimezoneInfo.new(identifier, path)
|
||||
ZoneinfoTimezoneInfo.new(identifier, path, @posix_tz_parser)
|
||||
rescue InvalidZoneinfoFile => e
|
||||
raise InvalidTimezoneIdentifier, e.message
|
||||
end
|
||||
515
Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_timezone_info.rb
vendored
Normal file
515
Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/tzinfo-1.2.8/lib/tzinfo/zoneinfo_timezone_info.rb
vendored
Normal file
@ -0,0 +1,515 @@
|
||||
module TZInfo
|
||||
# Use send as a workaround for erroneous 'wrong number of arguments' errors
|
||||
# with JRuby 9.0.5.0 when calling methods with Java implementations. See #114.
|
||||
send(:using, RubyCoreSupport::UntaintExt) if RubyCoreSupport.const_defined?(:UntaintExt)
|
||||
|
||||
# An InvalidZoneinfoFile exception is raised if an attempt is made to load an
|
||||
# invalid zoneinfo file.
|
||||
class InvalidZoneinfoFile < StandardError
|
||||
end
|
||||
|
||||
# Represents a timezone defined by a compiled zoneinfo TZif (\0, 2 or 3) file.
|
||||
#
|
||||
# @private
|
||||
class ZoneinfoTimezoneInfo < TransitionDataTimezoneInfo #:nodoc:
|
||||
# The year to generate transitions up to.
|
||||
#
|
||||
# @private
|
||||
GENERATE_UP_TO = RubyCoreSupport.time_supports_64bit ? Time.now.utc.year + 100 : 2037
|
||||
|
||||
# Minimum supported timestamp (inclusive).
|
||||
#
|
||||
# Time.utc(1700, 1, 1).to_i
|
||||
MIN_TIMESTAMP = -8520336000
|
||||
|
||||
# Maximum supported timestamp (exclusive).
|
||||
#
|
||||
# Time.utc(2500, 1, 1).to_i
|
||||
MAX_TIMESTAMP = 16725225600
|
||||
|
||||
# Constructs the new ZoneinfoTimezoneInfo with an identifier, path
|
||||
# to the file and parser to use to parse the POSIX-like TZ string.
|
||||
def initialize(identifier, file_path, posix_tz_parser)
|
||||
super(identifier)
|
||||
|
||||
File.open(file_path, 'rb') do |file|
|
||||
parse(file, posix_tz_parser)
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
# Unpack will return unsigned 32-bit integers. Translate to
|
||||
# signed 32-bit.
|
||||
def make_signed_int32(long)
|
||||
long >= 0x80000000 ? long - 0x100000000 : long
|
||||
end
|
||||
|
||||
# Unpack will return a 64-bit integer as two unsigned 32-bit integers
|
||||
# (most significant first). Translate to signed 64-bit
|
||||
def make_signed_int64(high, low)
|
||||
unsigned = (high << 32) | low
|
||||
unsigned >= 0x8000000000000000 ? unsigned - 0x10000000000000000 : unsigned
|
||||
end
|
||||
|
||||
# Read bytes from file and check that the correct number of bytes could
|
||||
# be read. Raises InvalidZoneinfoFile if the number of bytes didn't match
|
||||
# the number requested.
|
||||
def check_read(file, bytes)
|
||||
result = file.read(bytes)
|
||||
|
||||
unless result && result.length == bytes
|
||||
raise InvalidZoneinfoFile, "Expected #{bytes} bytes reading '#{file.path}', but got #{result ? result.length : 0} bytes"
|
||||
end
|
||||
|
||||
result
|
||||
end
|
||||
|
||||
# Zoneinfo files don't include the offset from standard time (std_offset)
|
||||
# for DST periods. Derive the base offset (utc_offset) where DST is
|
||||
# observed from either the previous or next non-DST period.
|
||||
#
|
||||
# Returns the index of the offset to be used prior to the first
|
||||
# transition.
|
||||
def derive_offsets(transitions, offsets)
|
||||
# The first non-DST offset (if there is one) is the offset observed
|
||||
# before the first transition. Fallback to the first DST offset if there
|
||||
# are no non-DST offsets.
|
||||
first_non_dst_offset_index = offsets.index {|o| !o[:is_dst] }
|
||||
first_offset_index = first_non_dst_offset_index || 0
|
||||
return first_offset_index if transitions.empty?
|
||||
|
||||
# Determine the utc_offset of the next non-dst offset at each transition.
|
||||
utc_offset_from_next = nil
|
||||
|
||||
transitions.reverse_each do |transition|
|
||||
offset = offsets[transition[:offset]]
|
||||
if offset[:is_dst]
|
||||
transition[:utc_offset_from_next] = utc_offset_from_next if utc_offset_from_next
|
||||
else
|
||||
utc_offset_from_next = offset[:utc_total_offset]
|
||||
end
|
||||
end
|
||||
|
||||
utc_offset_from_previous = first_non_dst_offset_index ? offsets[first_non_dst_offset_index][:utc_total_offset] : nil
|
||||
defined_offsets = {}
|
||||
|
||||
transitions.each do |transition|
|
||||
offset_index = transition[:offset]
|
||||
offset = offsets[offset_index]
|
||||
utc_total_offset = offset[:utc_total_offset]
|
||||
|
||||
if offset[:is_dst]
|
||||
utc_offset_from_next = transition[:utc_offset_from_next]
|
||||
|
||||
difference_to_previous = (utc_total_offset - (utc_offset_from_previous || utc_total_offset)).abs
|
||||
difference_to_next = (utc_total_offset - (utc_offset_from_next || utc_total_offset)).abs
|
||||
|
||||
utc_offset = if difference_to_previous == 3600
|
||||
utc_offset_from_previous
|
||||
elsif difference_to_next == 3600
|
||||
utc_offset_from_next
|
||||
elsif difference_to_previous > 0 && difference_to_next > 0
|
||||
difference_to_previous < difference_to_next ? utc_offset_from_previous : utc_offset_from_next
|
||||
elsif difference_to_previous > 0
|
||||
utc_offset_from_previous
|
||||
elsif difference_to_next > 0
|
||||
utc_offset_from_next
|
||||
else
|
||||
# No difference, assume a 1 hour offset from standard time.
|
||||
utc_total_offset - 3600
|
||||
end
|
||||
|
||||
if !offset[:utc_offset]
|
||||
offset[:utc_offset] = utc_offset
|
||||
defined_offsets[offset] = offset_index
|
||||
elsif offset[:utc_offset] != utc_offset
|
||||
# An earlier transition has already derived a different
|
||||
# utc_offset. Define a new offset or reuse an existing identically
|
||||
# defined offset.
|
||||
new_offset = offset.dup
|
||||
new_offset[:utc_offset] = utc_offset
|
||||
|
||||
offset_index = defined_offsets[new_offset]
|
||||
|
||||
unless offset_index
|
||||
offsets << new_offset
|
||||
offset_index = offsets.length - 1
|
||||
defined_offsets[new_offset] = offset_index
|
||||
end
|
||||
|
||||
transition[:offset] = offset_index
|
||||
end
|
||||
else
|
||||
utc_offset_from_previous = utc_total_offset
|
||||
end
|
||||
end
|
||||
|
||||
first_offset_index
|
||||
end
|
||||
|
||||
# Remove transitions before a minimum supported value. If there is not a
|
||||
# transition exactly on the minimum supported value move the latest from
|
||||
# before up to the minimum supported value.
|
||||
def remove_unsupported_negative_transitions(transitions, min_supported)
|
||||
result = transitions.drop_while {|t| t[:at] < min_supported }
|
||||
if result.empty? || (result[0][:at] > min_supported && result.length < transitions.length)
|
||||
last_before = transitions[-1 - result.length]
|
||||
last_before[:at] = min_supported
|
||||
[last_before] + result
|
||||
else
|
||||
result
|
||||
end
|
||||
end
|
||||
|
||||
# Determines if the offset from a transition matches the offset from a
|
||||
# rule. This is a looser match than TimezoneOffset#==, not requiring that
|
||||
# the utc_offset and std_offset both match (which have to be derived for
|
||||
# transitions, but are known for rules.
|
||||
def offset_matches_rule?(offset, rule_offset)
|
||||
offset[:utc_total_offset] == rule_offset.utc_total_offset &&
|
||||
offset[:is_dst] == rule_offset.dst? &&
|
||||
offset[:abbr] == rule_offset.abbreviation.to_s
|
||||
end
|
||||
|
||||
# Determins if the offset from a transition exactly matches the offset
|
||||
# from a rule.
|
||||
def offset_equals_rule?(offset, rule_offset)
|
||||
offset_matches_rule?(offset, rule_offset) &&
|
||||
(offset[:utc_offset] || (offset[:is_dst] ? offset[:utc_total_offset] - 3600 : offset[:utc_total_offset])) == rule_offset.utc_offset
|
||||
end
|
||||
|
||||
# Finds an offset hash that is an exact match to the rule offset specified.
|
||||
def find_existing_offset_index(offsets, rule_offset)
|
||||
offsets.find_index {|o| offset_equals_rule?(o, rule_offset) }
|
||||
end
|
||||
|
||||
# Gets an existing matching offset index or adds a new offset hash for a
|
||||
# rule offset.
|
||||
def get_rule_offset_index(offsets, offset)
|
||||
index = find_existing_offset_index(offsets, offset)
|
||||
unless index
|
||||
index = offsets.length
|
||||
offsets << {:utc_total_offset => offset.utc_total_offset, :utc_offset => offset.utc_offset, :is_dst => offset.dst?, :abbr => offset.abbreviation}
|
||||
end
|
||||
index
|
||||
end
|
||||
|
||||
# Gets a hash mapping rule offsets to indexes in offsets, creating new
|
||||
# offset hashes if required.
|
||||
def get_rule_offset_indexes(offsets, annual_rules)
|
||||
{
|
||||
annual_rules.std_offset => get_rule_offset_index(offsets, annual_rules.std_offset),
|
||||
annual_rules.dst_offset => get_rule_offset_index(offsets, annual_rules.dst_offset)
|
||||
}
|
||||
end
|
||||
|
||||
# Converts an array of rule transitions to hashes.
|
||||
def convert_transitions_to_hashes(offset_indexes, transitions)
|
||||
transitions.map {|t| {:at => t.at.to_i, :offset => offset_indexes[t.offset]} }
|
||||
end
|
||||
|
||||
# Apply the rules from the TZ string when there were no defined
|
||||
# transitions. Checks for a matching offset. Returns the rules-based
|
||||
# constant offset or generates transitions from 1970 until 100 years into
|
||||
# the future (at the time of loading zoneinfo_timezone_info.rb) or 2037 if
|
||||
# limited to 32-bit Times.
|
||||
def apply_rules_without_transitions(file, offsets, first_offset_index, rules)
|
||||
first_offset = offsets[first_offset_index]
|
||||
|
||||
if rules.kind_of?(TimezoneOffset)
|
||||
unless offset_matches_rule?(first_offset, rules)
|
||||
raise InvalidZoneinfoFile, "Constant offset POSIX-style TZ string does not match constant offset in file '#{file.path}'."
|
||||
end
|
||||
|
||||
first_offset[:utc_offset] = rules.utc_offset
|
||||
[]
|
||||
else
|
||||
transitions = 1970.upto(GENERATE_UP_TO).map {|y| rules.transitions(y) }.flatten
|
||||
first_transition = transitions[0]
|
||||
|
||||
if offset_matches_rule?(first_offset, first_transition.previous_offset)
|
||||
# Correct the first offset if it isn't an exact match.
|
||||
first_offset[:utc_offset] = first_transition.previous_offset.utc_offset
|
||||
else
|
||||
# Not transitioning from the designated first offset.
|
||||
if offset_matches_rule?(first_offset, first_transition.offset)
|
||||
# Correct the first offset if it isn't an exact match.
|
||||
first_offset[:utc_offset] = first_transition.offset.utc_offset
|
||||
|
||||
# Skip an unnecessary transition to the first offset.
|
||||
transitions.shift
|
||||
end
|
||||
|
||||
# If the first offset doesn't match either the offset or previous
|
||||
# offset, then it will be retained.
|
||||
end
|
||||
|
||||
offset_indexes = get_rule_offset_indexes(offsets, rules)
|
||||
convert_transitions_to_hashes(offset_indexes, transitions)
|
||||
end
|
||||
end
|
||||
|
||||
# Validates the rules offset against the offset of the last defined
|
||||
# transition. Replaces the transition with an equivalent using the rules
|
||||
# offset if the rules give a different definition for the base offset.
|
||||
def replace_last_transition_offset_if_valid_and_needed(file, transitions, offsets)
|
||||
last_transition = transitions.last
|
||||
last_offset = offsets[last_transition[:offset]]
|
||||
rule_offset = yield last_offset
|
||||
|
||||
unless offset_matches_rule?(last_offset, rule_offset)
|
||||
raise InvalidZoneinfoFile, "Offset from POSIX-style TZ string does not match final transition in file '#{file.path}'."
|
||||
end
|
||||
|
||||
# The total_utc_offset and abbreviation must always be the same. The
|
||||
# base utc_offset and std_offset might differ. In which case the rule
|
||||
# should be used as it will be more precise.
|
||||
last_offset[:utc_offset] = rule_offset.utc_offset
|
||||
last_transition
|
||||
end
|
||||
|
||||
# todo: port over validate_and_fix_last_defined_transition_offset
|
||||
# when fixing the previous offset will need to define a new one
|
||||
|
||||
# Validates the offset indicated to be observed by the rules before the
|
||||
# first generated transition against the offset of the last defined
|
||||
# transition.
|
||||
#
|
||||
# Fix the last defined transition if it differ on just base/std offsets
|
||||
# (which are derived). Raise an error if the observed UTC offset or
|
||||
# abbreviations differ.
|
||||
def validate_and_fix_last_defined_transition_offset(file, offsets, last_defined, first_rule_offset)
|
||||
offset_of_last_defined = offsets[last_defined[:offset]]
|
||||
|
||||
if offset_equals_rule?(offset_of_last_defined, first_rule_offset)
|
||||
last_defined
|
||||
else
|
||||
if offset_matches_rule?(offset_of_last_defined, first_rule_offset)
|
||||
# The same overall offset, but differing in the base or std
|
||||
# offset (which are derived). Correct by using the rule.
|
||||
|
||||
offset_index = get_rule_offset_index(offsets, first_rule_offset)
|
||||
{:at => last_defined[:at], :offset => offset_index}
|
||||
else
|
||||
raise InvalidZoneinfoFile, "The first offset indicated by the POSIX-style TZ string did not match the final defined offset in file '#{file.path}'."
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Apply the rules from the TZ string when there were defined transitions.
|
||||
# Checks for a matching offset with the last transition. Redefines the
|
||||
# last transition if required and if the rules don't specific a constant
|
||||
# offset, generates transitions until 100 years into the future (at the
|
||||
# time of loading zoneinfo_timezone_info.rb) or 2037 if limited to 32-bit
|
||||
# Times.
|
||||
def apply_rules_with_transitions(file, transitions, offsets, first_offset_index, rules)
|
||||
last_defined = transitions[-1]
|
||||
|
||||
if rules.kind_of?(TimezoneOffset)
|
||||
transitions[-1] = validate_and_fix_last_defined_transition_offset(file, offsets, last_defined, rules)
|
||||
else
|
||||
previous_offset_index = transitions.length > 1 ? transitions[-2][:offset] : first_offset_index
|
||||
previous_offset = offsets[previous_offset_index]
|
||||
last_year = (Time.at(last_defined[:at]).utc + previous_offset[:utc_total_offset]).year
|
||||
|
||||
if last_year <= GENERATE_UP_TO
|
||||
generated = rules.transitions(last_year).find_all {|t| t.at > last_defined[:at] } +
|
||||
(last_year + 1).upto(GENERATE_UP_TO).map {|y| rules.transitions(y) }.flatten
|
||||
|
||||
unless generated.empty?
|
||||
transitions[-1] = validate_and_fix_last_defined_transition_offset(file, offsets, last_defined, generated[0].previous_offset)
|
||||
rule_offset_indexes = get_rule_offset_indexes(offsets, rules)
|
||||
transitions.concat(convert_transitions_to_hashes(rule_offset_indexes, generated))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Defines an offset for the timezone based on the given index and offset
|
||||
# Hash.
|
||||
def define_offset(index, offset)
|
||||
utc_total_offset = offset[:utc_total_offset]
|
||||
utc_offset = offset[:utc_offset]
|
||||
|
||||
if utc_offset
|
||||
# DST offset with base utc_offset derived by derive_offsets.
|
||||
std_offset = utc_total_offset - utc_offset
|
||||
elsif offset[:is_dst]
|
||||
# DST offset unreferenced by a transition (offset in use before the
|
||||
# first transition). No derived base UTC offset, so assume 1 hour
|
||||
# DST.
|
||||
utc_offset = utc_total_offset - 3600
|
||||
std_offset = 3600
|
||||
else
|
||||
# Non-DST offset.
|
||||
utc_offset = utc_total_offset
|
||||
std_offset = 0
|
||||
end
|
||||
|
||||
offset index, utc_offset, std_offset, offset[:abbr].untaint.to_sym
|
||||
end
|
||||
|
||||
# Parses a zoneinfo file and intializes the DataTimezoneInfo structures.
|
||||
def parse(file, posix_tz_parser)
|
||||
magic, version, ttisutccnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt =
|
||||
check_read(file, 44).unpack('a4 a x15 NNNNNN')
|
||||
|
||||
if magic != 'TZif'
|
||||
raise InvalidZoneinfoFile, "The file '#{file.path}' does not start with the expected header."
|
||||
end
|
||||
|
||||
if version == '2' || version == '3'
|
||||
# Skip the first 32-bit section and read the header of the second
|
||||
# 64-bit section. The 64-bit section is always used even if the
|
||||
# runtime platform doesn't support 64-bit timestamps. In "slim" format
|
||||
# zoneinfo files the 32-bit section will be empty.
|
||||
file.seek(timecnt * 5 + typecnt * 6 + charcnt + leapcnt * 8 + ttisstdcnt + ttisutccnt, IO::SEEK_CUR)
|
||||
|
||||
prev_version = version
|
||||
|
||||
magic, version, ttisutccnt, ttisstdcnt, leapcnt, timecnt, typecnt, charcnt =
|
||||
check_read(file, 44).unpack('a4 a x15 NNNNNN')
|
||||
|
||||
unless magic == 'TZif' && (version == prev_version)
|
||||
raise InvalidZoneinfoFile, "The file '#{file.path}' contains an invalid 64-bit section header."
|
||||
end
|
||||
|
||||
using_64bit = true
|
||||
elsif version != '3' && version != '2' && version != "\0"
|
||||
raise InvalidZoneinfoFile, "The file '#{file.path}' contains a version of the zoneinfo format that is not currently supported."
|
||||
else
|
||||
using_64bit = false
|
||||
end
|
||||
|
||||
unless leapcnt == 0
|
||||
raise InvalidZoneinfoFile, "The zoneinfo file '#{file.path}' contains leap second data. TZInfo requires zoneinfo files that omit leap seconds."
|
||||
end
|
||||
|
||||
transitions = []
|
||||
|
||||
if using_64bit
|
||||
timecnt.times do |i|
|
||||
high, low = check_read(file, 8).unpack('NN'.freeze)
|
||||
transition_time = make_signed_int64(high, low)
|
||||
transitions << {:at => transition_time}
|
||||
end
|
||||
else
|
||||
timecnt.times do |i|
|
||||
transition_time = make_signed_int32(check_read(file, 4).unpack('N'.freeze)[0])
|
||||
transitions << {:at => transition_time}
|
||||
end
|
||||
end
|
||||
|
||||
timecnt.times do |i|
|
||||
localtime_type = check_read(file, 1).unpack('C'.freeze)[0]
|
||||
transitions[i][:offset] = localtime_type
|
||||
end
|
||||
|
||||
offsets = []
|
||||
|
||||
typecnt.times do |i|
|
||||
gmtoff, isdst, abbrind = check_read(file, 6).unpack('NCC'.freeze)
|
||||
gmtoff = make_signed_int32(gmtoff)
|
||||
isdst = isdst == 1
|
||||
offset = {:utc_total_offset => gmtoff, :is_dst => isdst, :abbr_index => abbrind}
|
||||
|
||||
unless isdst
|
||||
offset[:utc_offset] = gmtoff
|
||||
end
|
||||
|
||||
offsets << offset
|
||||
end
|
||||
|
||||
abbrev = check_read(file, charcnt)
|
||||
|
||||
if using_64bit
|
||||
# Skip to the POSIX-style TZ string.
|
||||
file.seek(ttisstdcnt + ttisutccnt, IO::SEEK_CUR) # + leapcnt * 8, but leapcnt is checked above and guaranteed to be 0.
|
||||
tz_string_start = check_read(file, 1)
|
||||
raise InvalidZoneinfoFile, "Expected newline starting POSIX-style TZ string in file '#{file.path}'." unless tz_string_start == "\n"
|
||||
tz_string = RubyCoreSupport.force_encoding(file.readline("\n"), 'UTF-8')
|
||||
raise InvalidZoneinfoFile, "Expected newline ending POSIX-style TZ string in file '#{file.path}'." unless tz_string.chomp!("\n")
|
||||
|
||||
begin
|
||||
rules = posix_tz_parser.parse(tz_string)
|
||||
rescue InvalidPosixTimeZone => e
|
||||
raise InvalidZoneinfoFile, "Failed to parse POSIX-style TZ string in file '#{file.path}': #{e}"
|
||||
end
|
||||
else
|
||||
rules = nil
|
||||
end
|
||||
|
||||
offsets.each do |o|
|
||||
abbrev_start = o[:abbr_index]
|
||||
raise InvalidZoneinfoFile, "Abbreviation index is out of range in file '#{file.path}'" unless abbrev_start < abbrev.length
|
||||
|
||||
abbrev_end = abbrev.index("\0", abbrev_start)
|
||||
raise InvalidZoneinfoFile, "Missing abbreviation null terminator in file '#{file.path}'" unless abbrev_end
|
||||
|
||||
o[:abbr] = RubyCoreSupport.force_encoding(abbrev[abbrev_start...abbrev_end], 'UTF-8')
|
||||
end
|
||||
|
||||
transitions.each do |t|
|
||||
if t[:offset] < 0 || t[:offset] >= offsets.length
|
||||
raise InvalidZoneinfoFile, "Invalid offset referenced by transition in file '#{file.path}'."
|
||||
end
|
||||
end
|
||||
|
||||
# Derive the offsets from standard time (std_offset).
|
||||
first_offset_index = derive_offsets(transitions, offsets)
|
||||
|
||||
# Filter out transitions that are not supported by Time on this
|
||||
# platform.
|
||||
unless transitions.empty?
|
||||
if !RubyCoreSupport.time_supports_negative
|
||||
transitions = remove_unsupported_negative_transitions(transitions, 0)
|
||||
elsif !RubyCoreSupport.time_supports_64bit
|
||||
transitions = remove_unsupported_negative_transitions(transitions, -2**31)
|
||||
else
|
||||
# Ignore transitions that occur outside of a defined window. The
|
||||
# transition index cannot handle a large range of transition times.
|
||||
#
|
||||
# This is primarily intended to ignore the far in the past
|
||||
# transition added in zic 2014c (at timestamp -2**63 in zic 2014c
|
||||
# and at the approximate time of the big bang from zic 2014d).
|
||||
#
|
||||
# Assumes MIN_TIMESTAMP is less than -2**31.
|
||||
transitions = remove_unsupported_negative_transitions(transitions, MIN_TIMESTAMP)
|
||||
end
|
||||
|
||||
if !RubyCoreSupport.time_supports_64bit
|
||||
i = transitions.find_index {|t| t[:at] >= 2**31 }
|
||||
had_later_transition = !!i
|
||||
transitions = transitions.first(i) if i
|
||||
else
|
||||
had_later_transition = false
|
||||
end
|
||||
end
|
||||
|
||||
if rules && !had_later_transition
|
||||
if transitions.empty?
|
||||
transitions = apply_rules_without_transitions(file, offsets, first_offset_index, rules)
|
||||
else
|
||||
apply_rules_with_transitions(file, transitions, offsets, first_offset_index, rules)
|
||||
end
|
||||
end
|
||||
|
||||
define_offset(first_offset_index, offsets[first_offset_index])
|
||||
|
||||
used_offset_indexes = transitions.map {|t| t[:offset] }.to_set
|
||||
|
||||
offsets.each_with_index do |o, i|
|
||||
define_offset(i, o) if i != first_offset_index && used_offset_indexes.include?(i)
|
||||
end
|
||||
|
||||
# Ignore transitions that occur outside of a defined window. The
|
||||
# transition index cannot handle a large range of transition times.
|
||||
transitions.each do |t|
|
||||
at = t[:at]
|
||||
break if at >= MAX_TIMESTAMP
|
||||
time = Time.at(at).utc
|
||||
transition time.year, time.mon, t[:offset], at
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@ -14,24 +14,22 @@ module Zeitwerk
|
||||
# the file system, to the loader responsible for them.
|
||||
#
|
||||
# @private
|
||||
# @return [{String => Zeitwerk::Loader}]
|
||||
# @sig Hash[String, Zeitwerk::Loader]
|
||||
attr_reader :cpaths
|
||||
|
||||
# @private
|
||||
# @return [Mutex]
|
||||
# @sig Mutex
|
||||
attr_reader :mutex
|
||||
|
||||
# @private
|
||||
# @return [TracePoint]
|
||||
# @sig TracePoint
|
||||
attr_reader :tracer
|
||||
|
||||
# Asserts `cpath` corresponds to an explicit namespace for which `loader`
|
||||
# is responsible.
|
||||
#
|
||||
# @private
|
||||
# @param cpath [String]
|
||||
# @param loader [Zeitwerk::Loader]
|
||||
# @return [void]
|
||||
# @sig (String, Zeitwerk::Loader) -> void
|
||||
def register(cpath, loader)
|
||||
mutex.synchronize do
|
||||
cpaths[cpath] = loader
|
||||
@ -42,19 +40,22 @@ module Zeitwerk
|
||||
end
|
||||
|
||||
# @private
|
||||
# @param loader [Zeitwerk::Loader]
|
||||
# @return [void]
|
||||
# @sig (Zeitwerk::Loader) -> void
|
||||
def unregister(loader)
|
||||
cpaths.delete_if { |_cpath, l| l == loader }
|
||||
disable_tracer_if_unneeded
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# @sig () -> void
|
||||
def disable_tracer_if_unneeded
|
||||
mutex.synchronize do
|
||||
tracer.disable if cpaths.empty?
|
||||
end
|
||||
end
|
||||
|
||||
# @sig (TracePoint) -> void
|
||||
def tracepoint_class_callback(event)
|
||||
# If the class is a singleton class, we won't do anything with it so we
|
||||
# can bail out immediately. This is several orders of magnitude faster
|
||||
@ -2,16 +2,14 @@
|
||||
|
||||
module Zeitwerk
|
||||
class GemInflector < Inflector
|
||||
# @param root_file [String]
|
||||
# @sig (String) -> void
|
||||
def initialize(root_file)
|
||||
namespace = File.basename(root_file, ".rb")
|
||||
lib_dir = File.dirname(root_file)
|
||||
@version_file = File.join(lib_dir, namespace, "version.rb")
|
||||
end
|
||||
|
||||
# @param basename [String]
|
||||
# @param abspath [String]
|
||||
# @return [String]
|
||||
# @sig (String, String) -> String
|
||||
def camelize(basename, abspath)
|
||||
abspath == @version_file ? "VERSION" : super
|
||||
end
|
||||
@ -11,9 +11,7 @@ module Zeitwerk
|
||||
#
|
||||
# Takes into account hard-coded mappings configured with `inflect`.
|
||||
#
|
||||
# @param basename [String]
|
||||
# @param _abspath [String]
|
||||
# @return [String]
|
||||
# @sig (String, String) -> String
|
||||
def camelize(basename, _abspath)
|
||||
overrides[basename] || basename.split('_').each(&:capitalize!).join
|
||||
end
|
||||
@ -30,8 +28,7 @@ module Zeitwerk
|
||||
# inflector.camelize("mysql_adapter", abspath) # => "MySQLAdapter"
|
||||
# inflector.camelize("users_controller", abspath) # => "UsersController"
|
||||
#
|
||||
# @param inflections [{String => String}]
|
||||
# @return [void]
|
||||
# @sig (Hash[String, String]) -> void
|
||||
def inflect(inflections)
|
||||
overrides.merge!(inflections)
|
||||
end
|
||||
@ -41,7 +38,7 @@ module Zeitwerk
|
||||
# Hard-coded basename to constant name user maps that override the default
|
||||
# inflection logic.
|
||||
#
|
||||
# @return [{String => String}]
|
||||
# @sig () -> Hash[String, String]
|
||||
def overrides
|
||||
@overrides ||= {}
|
||||
end
|
||||
@ -19,8 +19,7 @@ module Kernel
|
||||
# already existing ancestor chains.
|
||||
alias_method :zeitwerk_original_require, :require
|
||||
|
||||
# @param path [String]
|
||||
# @return [Boolean]
|
||||
# @sig (String) -> true | false
|
||||
def require(path)
|
||||
if loader = Zeitwerk::Registry.loader_for(path)
|
||||
if path.end_with?(".rb")
|
||||
@ -9,13 +9,13 @@ module Zeitwerk
|
||||
include Callbacks
|
||||
include RealModName
|
||||
|
||||
# @return [String]
|
||||
# @sig String
|
||||
attr_reader :tag
|
||||
|
||||
# @return [#camelize]
|
||||
# @sig #camelize
|
||||
attr_accessor :inflector
|
||||
|
||||
# @return [#call, #debug, nil]
|
||||
# @sig #call | #debug | nil
|
||||
attr_accessor :logger
|
||||
|
||||
# Absolute paths of the root directories. Stored in a hash to preserve
|
||||
@ -30,20 +30,20 @@ module Zeitwerk
|
||||
# interface for it is `push_dir` and `dirs`.
|
||||
#
|
||||
# @private
|
||||
# @return [{String => true}]
|
||||
# @sig Hash[String, true]
|
||||
attr_reader :root_dirs
|
||||
|
||||
# Absolute paths of files or directories that have to be preloaded.
|
||||
#
|
||||
# @private
|
||||
# @return [<String>]
|
||||
# @sig Array[String]
|
||||
attr_reader :preloads
|
||||
|
||||
# Absolute paths of files, directories, or glob patterns to be totally
|
||||
# ignored.
|
||||
#
|
||||
# @private
|
||||
# @return [Set<String>]
|
||||
# @sig Set[String]
|
||||
attr_reader :ignored_glob_patterns
|
||||
|
||||
# The actual collection of absolute file and directory names at the time the
|
||||
@ -51,20 +51,20 @@ module Zeitwerk
|
||||
# reload.
|
||||
#
|
||||
# @private
|
||||
# @return [Set<String>]
|
||||
# @sig Set[String]
|
||||
attr_reader :ignored_paths
|
||||
|
||||
# Absolute paths of directories or glob patterns to be collapsed.
|
||||
#
|
||||
# @private
|
||||
# @return [Set<String>]
|
||||
# @sig Set[String]
|
||||
attr_reader :collapse_glob_patterns
|
||||
|
||||
# The actual collection of absolute directory names at the time the collapse
|
||||
# glob patterns were expanded. Computed on setup, and recomputed on reload.
|
||||
#
|
||||
# @private
|
||||
# @return [Set<String>]
|
||||
# @sig Set[String]
|
||||
attr_reader :collapse_dirs
|
||||
|
||||
# Maps real absolute paths for which an autoload has been set ---and not
|
||||
@ -76,7 +76,7 @@ module Zeitwerk
|
||||
# ...
|
||||
#
|
||||
# @private
|
||||
# @return [{String => (Module, Symbol)}]
|
||||
# @sig Hash[String, [Module, Symbol]]
|
||||
attr_reader :autoloads
|
||||
|
||||
# We keep track of autoloaded directories to remove them from the registry
|
||||
@ -86,7 +86,7 @@ module Zeitwerk
|
||||
# to concurrency (see why in Zeitwerk::Loader::Callbacks#on_dir_autoloaded).
|
||||
#
|
||||
# @private
|
||||
# @return [<String>]
|
||||
# @sig Array[String]
|
||||
attr_reader :autoloaded_dirs
|
||||
|
||||
# Stores metadata needed for unloading. Its entries look like this:
|
||||
@ -102,7 +102,7 @@ module Zeitwerk
|
||||
# or eager loaded. Otherwise, the collection remains empty.
|
||||
#
|
||||
# @private
|
||||
# @return [{String => (String, (Module, Symbol))}]
|
||||
# @sig Hash[String, [String, [Module, Symbol]]]
|
||||
attr_reader :to_unload
|
||||
|
||||
# Maps constant paths of namespaces to arrays of corresponding directories.
|
||||
@ -120,21 +120,21 @@ module Zeitwerk
|
||||
# up the corresponding autoloads.
|
||||
#
|
||||
# @private
|
||||
# @return [{String => <String>}]
|
||||
# @sig Hash[String, Array[String]]
|
||||
attr_reader :lazy_subdirs
|
||||
|
||||
# Absolute paths of files or directories not to be eager loaded.
|
||||
#
|
||||
# @private
|
||||
# @return [Set<String>]
|
||||
# @sig Set[String]
|
||||
attr_reader :eager_load_exclusions
|
||||
|
||||
# @private
|
||||
# @return [Mutex]
|
||||
# @sig Mutex
|
||||
attr_reader :mutex
|
||||
|
||||
# @private
|
||||
# @return [Mutex]
|
||||
# @sig Mutex
|
||||
attr_reader :mutex2
|
||||
|
||||
def initialize
|
||||
@ -170,7 +170,7 @@ module Zeitwerk
|
||||
# Sets a tag for the loader, useful for logging.
|
||||
#
|
||||
# @param tag [#to_s]
|
||||
# @return [void]
|
||||
# @sig (#to_s) -> void
|
||||
def tag=(tag)
|
||||
@tag = tag.to_s
|
||||
end
|
||||
@ -178,7 +178,7 @@ module Zeitwerk
|
||||
# Absolute paths of the root directories. This is a read-only collection,
|
||||
# please push here via `push_dir`.
|
||||
#
|
||||
# @return [<String>]
|
||||
# @sig () -> Array[String]
|
||||
def dirs
|
||||
root_dirs.keys.freeze
|
||||
end
|
||||
@ -189,10 +189,8 @@ module Zeitwerk
|
||||
# the same process already manages that directory or one of its ascendants
|
||||
# or descendants.
|
||||
#
|
||||
# @param path [<String, Pathname>]
|
||||
# @param namespace [Class, Module]
|
||||
# @raise [Zeitwerk::Error]
|
||||
# @return [void]
|
||||
# @sig (String | Pathname, Module) -> void
|
||||
def push_dir(path, namespace: Object)
|
||||
# Note that Class < Module.
|
||||
unless namespace.is_a?(Module)
|
||||
@ -212,7 +210,7 @@ module Zeitwerk
|
||||
# There is no way to undo this, either you want to reload or you don't.
|
||||
#
|
||||
# @raise [Zeitwerk::Error]
|
||||
# @return [void]
|
||||
# @sig () -> void
|
||||
def enable_reloading
|
||||
mutex.synchronize do
|
||||
break if @reloading_enabled
|
||||
@ -225,15 +223,14 @@ module Zeitwerk
|
||||
end
|
||||
end
|
||||
|
||||
# @return [Boolean]
|
||||
# @sig () -> bool
|
||||
def reloading_enabled?
|
||||
@reloading_enabled
|
||||
end
|
||||
|
||||
# Files or directories to be preloaded instead of lazy loaded.
|
||||
#
|
||||
# @param paths [<String, Pathname, <String, Pathname>>]
|
||||
# @return [void]
|
||||
# @sig (*(String | Pathname | Array[String | Pathname])) -> void
|
||||
def preload(*paths)
|
||||
mutex.synchronize do
|
||||
expand_paths(paths).each do |abspath|
|
||||
@ -245,8 +242,7 @@ module Zeitwerk
|
||||
|
||||
# Configure files, directories, or glob patterns to be totally ignored.
|
||||
#
|
||||
# @param paths [<String, Pathname, <String, Pathname>>]
|
||||
# @return [void]
|
||||
# @sig (*(String | Pathname | Array[String | Pathname])) -> void
|
||||
def ignore(*glob_patterns)
|
||||
glob_patterns = expand_paths(glob_patterns)
|
||||
mutex.synchronize do
|
||||
@ -257,8 +253,7 @@ module Zeitwerk
|
||||
|
||||
# Configure directories or glob patterns to be collapsed.
|
||||
#
|
||||
# @param paths [<String, Pathname, <String, Pathname>>]
|
||||
# @return [void]
|
||||
# @sig (*(String | Pathname | Array[String | Pathname])) -> void
|
||||
def collapse(*glob_patterns)
|
||||
glob_patterns = expand_paths(glob_patterns)
|
||||
mutex.synchronize do
|
||||
@ -269,7 +264,7 @@ module Zeitwerk
|
||||
|
||||
# Sets autoloads in the root namespace and preloads files, if any.
|
||||
#
|
||||
# @return [void]
|
||||
# @sig () -> void
|
||||
def setup
|
||||
mutex.synchronize do
|
||||
break if @setup
|
||||
@ -291,7 +286,7 @@ module Zeitwerk
|
||||
# unload them.
|
||||
#
|
||||
# @private
|
||||
# @return [void]
|
||||
# @sig () -> void
|
||||
def unload
|
||||
mutex.synchronize do
|
||||
# We are going to keep track of the files that were required by our
|
||||
@ -354,7 +349,7 @@ module Zeitwerk
|
||||
# client code in the README of the project.
|
||||
#
|
||||
# @raise [Zeitwerk::Error]
|
||||
# @return [void]
|
||||
# @sig () -> void
|
||||
def reload
|
||||
if reloading_enabled?
|
||||
unload
|
||||
@ -371,7 +366,7 @@ module Zeitwerk
|
||||
# are not eager loaded. You can opt-out specifically in specific files and
|
||||
# directories with `do_not_eager_load`.
|
||||
#
|
||||
# @return [void]
|
||||
# @sig () -> void
|
||||
def eager_load
|
||||
mutex.synchronize do
|
||||
break if @eager_loaded
|
||||
@ -414,8 +409,7 @@ module Zeitwerk
|
||||
# Let eager load ignore the given files or directories. The constants
|
||||
# defined in those files are still autoloadable.
|
||||
#
|
||||
# @param paths [<String, Pathname, <String, Pathname>>]
|
||||
# @return [void]
|
||||
# @sig (*(String | Pathname | Array[String | Pathname])) -> void
|
||||
def do_not_eager_load(*paths)
|
||||
mutex.synchronize { eager_load_exclusions.merge(expand_paths(paths)) }
|
||||
end
|
||||
@ -423,8 +417,7 @@ module Zeitwerk
|
||||
# Says if the given constant path would be unloaded on reload. This
|
||||
# predicate returns `false` if reloading is disabled.
|
||||
#
|
||||
# @param cpath [String]
|
||||
# @return [Boolean]
|
||||
# @sig (String) -> bool
|
||||
def unloadable_cpath?(cpath)
|
||||
to_unload.key?(cpath)
|
||||
end
|
||||
@ -432,21 +425,20 @@ module Zeitwerk
|
||||
# Returns an array with the constant paths that would be unloaded on reload.
|
||||
# This predicate returns an empty array if reloading is disabled.
|
||||
#
|
||||
# @return [<String>]
|
||||
# @sig () -> Array[String]
|
||||
def unloadable_cpaths
|
||||
to_unload.keys.freeze
|
||||
end
|
||||
|
||||
# Logs to `$stdout`, handy shortcut for debugging.
|
||||
#
|
||||
# @return [void]
|
||||
# @sig () -> void
|
||||
def log!
|
||||
@logger = ->(msg) { puts msg }
|
||||
end
|
||||
|
||||
# @private
|
||||
# @param dir [String]
|
||||
# @return [Boolean]
|
||||
# @sig (String) -> bool
|
||||
def manages?(dir)
|
||||
dir = dir + "/"
|
||||
ignored_paths.each do |ignored_path|
|
||||
@ -463,11 +455,11 @@ module Zeitwerk
|
||||
# --- Class methods ---------------------------------------------------------------------------
|
||||
|
||||
class << self
|
||||
# @return [#call, #debug, nil]
|
||||
# @sig #call | #debug | nil
|
||||
attr_accessor :default_logger
|
||||
|
||||
# @private
|
||||
# @return [Mutex]
|
||||
# @sig Mutex
|
||||
attr_accessor :mutex
|
||||
|
||||
# This is a shortcut for
|
||||
@ -481,7 +473,7 @@ module Zeitwerk
|
||||
# except that this method returns the same object in subsequent calls from
|
||||
# the same file, in the unlikely case the gem wants to be able to reload.
|
||||
#
|
||||
# @return [Zeitwerk::Loader]
|
||||
# @sig () -> Zeitwerk::Loader
|
||||
def for_gem
|
||||
called_from = caller_locations(1, 1).first.path
|
||||
Registry.loader_for_gem(called_from)
|
||||
@ -489,7 +481,7 @@ module Zeitwerk
|
||||
|
||||
# Broadcasts `eager_load` to all loaders.
|
||||
#
|
||||
# @return [void]
|
||||
# @sig () -> void
|
||||
def eager_load_all
|
||||
Registry.loaders.each(&:eager_load)
|
||||
end
|
||||
@ -497,7 +489,7 @@ module Zeitwerk
|
||||
# Returns an array with the absolute paths of the root directories of all
|
||||
# registered loaders. This is a read-only collection.
|
||||
#
|
||||
# @return [<String>]
|
||||
# @sig () -> Array[String]
|
||||
def all_dirs
|
||||
Registry.loaders.flat_map(&:dirs).freeze
|
||||
end
|
||||
@ -507,16 +499,14 @@ module Zeitwerk
|
||||
|
||||
private # -------------------------------------------------------------------------------------
|
||||
|
||||
# @return [<String>]
|
||||
# @sig () -> Array[String]
|
||||
def actual_root_dirs
|
||||
root_dirs.reject do |root_dir, _namespace|
|
||||
!dir?(root_dir) || ignored_paths.member?(root_dir)
|
||||
end
|
||||
end
|
||||
|
||||
# @param dir [String]
|
||||
# @param parent [Module]
|
||||
# @return [void]
|
||||
# @sig (String, Module) -> void
|
||||
def set_autoloads_in_dir(dir, parent)
|
||||
ls(dir) do |basename, abspath|
|
||||
begin
|
||||
@ -559,10 +549,7 @@ module Zeitwerk
|
||||
end
|
||||
end
|
||||
|
||||
# @param parent [Module]
|
||||
# @param cname [Symbol]
|
||||
# @param subdir [String]
|
||||
# @return [void]
|
||||
# @sig (Module, Symbol, String) -> void
|
||||
def autoload_subdir(parent, cname, subdir)
|
||||
if autoload_path = autoload_for?(parent, cname)
|
||||
cpath = cpath(parent, cname)
|
||||
@ -582,10 +569,7 @@ module Zeitwerk
|
||||
end
|
||||
end
|
||||
|
||||
# @param parent [Module]
|
||||
# @param cname [Symbol]
|
||||
# @param file [String]
|
||||
# @return [void]
|
||||
# @sig (Module, Symbol, String) -> void
|
||||
def autoload_file(parent, cname, file)
|
||||
if autoload_path = autoload_for?(parent, cname)
|
||||
# First autoload for a Ruby file wins, just ignore subsequent ones.
|
||||
@ -606,11 +590,10 @@ module Zeitwerk
|
||||
end
|
||||
end
|
||||
|
||||
# @param dir [String] directory that would have autovivified a module
|
||||
# @param file [String] the file where the namespace is explicitly defined
|
||||
# @param parent [Module]
|
||||
# @param cname [Symbol]
|
||||
# @return [void]
|
||||
# `dir` is the directory that would have autovivified a namespace. `file` is
|
||||
# the file where we've found the namespace is explicitly defined.
|
||||
#
|
||||
# @sig (dir: String, file: String, parent: Module, cname: Symbol) -> void
|
||||
def promote_namespace_from_implicit_to_explicit(dir:, file:, parent:, cname:)
|
||||
autoloads.delete(dir)
|
||||
Registry.unregister_autoload(dir)
|
||||
@ -619,10 +602,7 @@ module Zeitwerk
|
||||
register_explicit_namespace(cpath(parent, cname))
|
||||
end
|
||||
|
||||
# @param parent [Module]
|
||||
# @param cname [Symbol]
|
||||
# @param abspath [String]
|
||||
# @return [void]
|
||||
# @sig (Module, Symbol, String) -> void
|
||||
def set_autoload(parent, cname, abspath)
|
||||
# $LOADED_FEATURES stores real paths since Ruby 2.4.4. We set and save the
|
||||
# real path to be able to delete it from $LOADED_FEATURES on unload, and to
|
||||
@ -649,9 +629,7 @@ module Zeitwerk
|
||||
end
|
||||
end
|
||||
|
||||
# @param parent [Module]
|
||||
# @param cname [Symbol]
|
||||
# @return [String, nil]
|
||||
# @sig (Module, Symbol) -> String?
|
||||
def autoload_for?(parent, cname)
|
||||
strict_autoload_path(parent, cname) || Registry.inception?(cpath(parent, cname))
|
||||
end
|
||||
@ -672,9 +650,7 @@ module Zeitwerk
|
||||
#
|
||||
# We need a way to strictly check in parent ignoring ancestors.
|
||||
#
|
||||
# @param parent [Module]
|
||||
# @param cname [Symbol]
|
||||
# @return [String, nil]
|
||||
# @sig (Module, Symbol) -> String?
|
||||
if method(:autoload?).arity == 1
|
||||
def strict_autoload_path(parent, cname)
|
||||
parent.autoload?(cname) if cdef?(parent, cname)
|
||||
@ -688,15 +664,14 @@ module Zeitwerk
|
||||
# This method is called this way because I prefer `preload` to be the method
|
||||
# name to configure preloads in the public interface.
|
||||
#
|
||||
# @return [void]
|
||||
# @sig () -> void
|
||||
def do_preload
|
||||
preloads.each do |abspath|
|
||||
do_preload_abspath(abspath)
|
||||
end
|
||||
end
|
||||
|
||||
# @param abspath [String]
|
||||
# @return [void]
|
||||
# @sig (String) -> void
|
||||
def do_preload_abspath(abspath)
|
||||
if ruby?(abspath)
|
||||
do_preload_file(abspath)
|
||||
@ -705,31 +680,25 @@ module Zeitwerk
|
||||
end
|
||||
end
|
||||
|
||||
# @param dir [String]
|
||||
# @return [void]
|
||||
# @sig (String) -> void
|
||||
def do_preload_dir(dir)
|
||||
ls(dir) do |_basename, abspath|
|
||||
do_preload_abspath(abspath)
|
||||
end
|
||||
end
|
||||
|
||||
# @param file [String]
|
||||
# @return [Boolean]
|
||||
# @sig (String) -> bool
|
||||
def do_preload_file(file)
|
||||
log("preloading #{file}") if logger
|
||||
require file
|
||||
end
|
||||
|
||||
# @param parent [Module]
|
||||
# @param cname [Symbol]
|
||||
# @return [String]
|
||||
# @sig (Module, Symbol) -> String
|
||||
def cpath(parent, cname)
|
||||
parent.equal?(Object) ? cname.to_s : "#{real_mod_name(parent)}::#{cname}"
|
||||
end
|
||||
|
||||
# @param dir [String]
|
||||
# @yieldparam path [String, String]
|
||||
# @return [void]
|
||||
# @sig (String) { (String, String) -> void } -> void
|
||||
def ls(dir)
|
||||
Dir.foreach(dir) do |basename|
|
||||
next if basename.start_with?(".")
|
||||
@ -743,57 +712,55 @@ module Zeitwerk
|
||||
end
|
||||
end
|
||||
|
||||
# @param path [String]
|
||||
# @return [Boolean]
|
||||
# @sig (String) -> bool
|
||||
def ruby?(path)
|
||||
path.end_with?(".rb")
|
||||
end
|
||||
|
||||
# @param path [String]
|
||||
# @return [Boolean]
|
||||
# @sig (String) -> bool
|
||||
def dir?(path)
|
||||
File.directory?(path)
|
||||
end
|
||||
|
||||
# @param paths [<String, Pathname, <String, Pathname>>]
|
||||
# @return [<String>]
|
||||
# @sig (String | Pathname | Array[String | Pathname]) -> Array[String]
|
||||
def expand_paths(paths)
|
||||
paths.flatten.map! { |path| File.expand_path(path) }
|
||||
end
|
||||
|
||||
# @param glob_patterns [<String>]
|
||||
# @return [<String>]
|
||||
# @sig (Array[String]) -> Array[String]
|
||||
def expand_glob_patterns(glob_patterns)
|
||||
# Note that Dir.glob works with regular file names just fine. That is,
|
||||
# glob patterns technically need no wildcards.
|
||||
glob_patterns.flat_map { |glob_pattern| Dir.glob(glob_pattern) }
|
||||
end
|
||||
|
||||
# @return [void]
|
||||
# @sig () -> void
|
||||
def recompute_ignored_paths
|
||||
ignored_paths.replace(expand_glob_patterns(ignored_glob_patterns))
|
||||
end
|
||||
|
||||
# @return [void]
|
||||
# @sig () -> void
|
||||
def recompute_collapse_dirs
|
||||
collapse_dirs.replace(expand_glob_patterns(collapse_glob_patterns))
|
||||
end
|
||||
|
||||
# @param message [String]
|
||||
# @return [void]
|
||||
# @sig (String) -> void
|
||||
def log(message)
|
||||
method_name = logger.respond_to?(:debug) ? :debug : :call
|
||||
logger.send(method_name, "Zeitwerk@#{tag}: #{message}")
|
||||
end
|
||||
|
||||
# @sig (Module, Symbol) -> bool
|
||||
def cdef?(parent, cname)
|
||||
parent.const_defined?(cname, false)
|
||||
end
|
||||
|
||||
# @sig (String) -> void
|
||||
def register_explicit_namespace(cpath)
|
||||
ExplicitNamespace.register(cpath, self)
|
||||
end
|
||||
|
||||
# @sig (String) -> void
|
||||
def raise_if_conflicting_directory(dir)
|
||||
self.class.mutex.synchronize do
|
||||
Registry.loaders.each do |loader|
|
||||
@ -808,19 +775,15 @@ module Zeitwerk
|
||||
end
|
||||
end
|
||||
|
||||
# @param parent [Module]
|
||||
# @param cname [Symbol]
|
||||
# @return [void]
|
||||
# @sig (Module, Symbol) -> void
|
||||
def unload_autoload(parent, cname)
|
||||
parent.send(:remove_const, cname)
|
||||
parent.__send__(:remove_const, cname)
|
||||
log("autoload for #{cpath(parent, cname)} removed") if logger
|
||||
end
|
||||
|
||||
# @param parent [Module]
|
||||
# @param cname [Symbol]
|
||||
# @return [void]
|
||||
# @sig (Module, Symbol) -> void
|
||||
def unload_cref(parent, cname)
|
||||
parent.send(:remove_const, cname)
|
||||
parent.__send__(:remove_const, cname)
|
||||
log("#{cpath(parent, cname)} unloaded") if logger
|
||||
end
|
||||
end
|
||||
@ -4,8 +4,7 @@ module Zeitwerk::Loader::Callbacks
|
||||
# Invoked from our decorated Kernel#require when a managed file is autoloaded.
|
||||
#
|
||||
# @private
|
||||
# @param file [String]
|
||||
# @return [void]
|
||||
# @sig (String) -> void
|
||||
def on_file_autoloaded(file)
|
||||
cref = autoloads.delete(file)
|
||||
to_unload[cpath(*cref)] = [file, cref] if reloading_enabled?
|
||||
@ -22,8 +21,7 @@ module Zeitwerk::Loader::Callbacks
|
||||
# autoloaded.
|
||||
#
|
||||
# @private
|
||||
# @param dir [String]
|
||||
# @return [void]
|
||||
# @sig (String) -> void
|
||||
def on_dir_autoloaded(dir)
|
||||
# Module#autoload does not serialize concurrent requires, and we handle
|
||||
# directories ourselves, so the callback needs to account for concurrency.
|
||||
@ -59,8 +57,7 @@ module Zeitwerk::Loader::Callbacks
|
||||
# subdirectories, we descend into them now.
|
||||
#
|
||||
# @private
|
||||
# @param namespace [Module]
|
||||
# @return [void]
|
||||
# @sig (Module) -> void
|
||||
def on_namespace_loaded(namespace)
|
||||
if subdirs = lazy_subdirs.delete(real_mod_name(namespace))
|
||||
subdirs.each do |subdir|
|
||||
@ -7,8 +7,7 @@ module Zeitwerk::RealModName
|
||||
#
|
||||
# The name method can be overridden, hence the indirection in this method.
|
||||
#
|
||||
# @param mod [Class, Module]
|
||||
# @return [String, nil]
|
||||
# @sig (Module) -> String?
|
||||
if UnboundMethod.method_defined?(:bind_call)
|
||||
def real_mod_name(mod)
|
||||
UNBOUND_METHOD_MODULE_NAME.bind_call(mod)
|
||||
@ -7,14 +7,14 @@ module Zeitwerk
|
||||
# them from being garbage collected.
|
||||
#
|
||||
# @private
|
||||
# @return [<Zeitwerk::Loader>]
|
||||
# @sig Array[Zeitwerk::Loader]
|
||||
attr_reader :loaders
|
||||
|
||||
# Registers loaders created with `for_gem` to make the method idempotent
|
||||
# in case of reload.
|
||||
#
|
||||
# @private
|
||||
# @return [{String => Zeitwerk::Loader}]
|
||||
# @sig Hash[String, Zeitwerk::Loader]
|
||||
attr_reader :loaders_managing_gems
|
||||
|
||||
# Maps real paths to the loaders responsible for them.
|
||||
@ -23,7 +23,7 @@ module Zeitwerk
|
||||
# invoke callbacks and autovivify modules.
|
||||
#
|
||||
# @private
|
||||
# @return [{String => Zeitwerk::Loader}]
|
||||
# @sig Hash[String, Zeitwerk::Loader]
|
||||
attr_reader :autoloads
|
||||
|
||||
# This hash table addresses an edge case in which an autoload is ignored.
|
||||
@ -62,14 +62,13 @@ module Zeitwerk
|
||||
# end
|
||||
#
|
||||
# @private
|
||||
# @return [{String => (String, Zeitwerk::Loader)}]
|
||||
# @sig Hash[String, [String, Zeitwerk::Loader]]
|
||||
attr_reader :inceptions
|
||||
|
||||
# Registers a loader.
|
||||
#
|
||||
# @private
|
||||
# @param loader [Zeitwerk::Loader]
|
||||
# @return [void]
|
||||
# @sig (Zeitwerk::Loader) -> void
|
||||
def register_loader(loader)
|
||||
loaders << loader
|
||||
end
|
||||
@ -78,8 +77,7 @@ module Zeitwerk
|
||||
# file. That is how Zeitwerk::Loader.for_gem is idempotent.
|
||||
#
|
||||
# @private
|
||||
# @param root_file [String]
|
||||
# @return [Zeitwerk::Loader]
|
||||
# @sig (String) -> Zeitwerk::Loader
|
||||
def loader_for_gem(root_file)
|
||||
loaders_managing_gems[root_file] ||= begin
|
||||
Loader.new.tap do |loader|
|
||||
@ -91,32 +89,25 @@ module Zeitwerk
|
||||
end
|
||||
|
||||
# @private
|
||||
# @param loader [Zeitwerk::Loader]
|
||||
# @param realpath [String]
|
||||
# @return [void]
|
||||
# @sig (Zeitwerk::Loader, String) -> String
|
||||
def register_autoload(loader, realpath)
|
||||
autoloads[realpath] = loader
|
||||
end
|
||||
|
||||
# @private
|
||||
# @param realpath [String]
|
||||
# @return [void]
|
||||
# @sig (String) -> void
|
||||
def unregister_autoload(realpath)
|
||||
autoloads.delete(realpath)
|
||||
end
|
||||
|
||||
# @private
|
||||
# @param cpath [String]
|
||||
# @param realpath [String]
|
||||
# @param loader [Zeitwerk::Loader]
|
||||
# @return [void]
|
||||
# @sig (String, String, Zeitwerk::Loader) -> void
|
||||
def register_inception(cpath, realpath, loader)
|
||||
inceptions[cpath] = [realpath, loader]
|
||||
end
|
||||
|
||||
# @private
|
||||
# @param cpath [String]
|
||||
# @return [String, nil]
|
||||
# @sig (String) -> String?
|
||||
def inception?(cpath)
|
||||
if pair = inceptions[cpath]
|
||||
pair.first
|
||||
@ -124,15 +115,13 @@ module Zeitwerk
|
||||
end
|
||||
|
||||
# @private
|
||||
# @param path [String]
|
||||
# @return [Zeitwerk::Loader, nil]
|
||||
# @sig (String) -> Zeitwerk::Loader?
|
||||
def loader_for(path)
|
||||
autoloads[path]
|
||||
end
|
||||
|
||||
# @private
|
||||
# @param loader [Zeitwerk::Loader]
|
||||
# @return [void]
|
||||
# @sig (Zeitwerk::Loader) -> void
|
||||
def on_unload(loader)
|
||||
autoloads.delete_if { |_path, object| object == loader }
|
||||
inceptions.delete_if { |_cpath, (_path, object)| object == loader }
|
||||
@ -1,5 +1,5 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
module Zeitwerk
|
||||
VERSION = "2.4.0"
|
||||
VERSION = "2.4.1"
|
||||
end
|
||||
Loading…
x
Reference in New Issue
Block a user