Skip to content

Commit

Permalink
Merge pull request #50090 from JuliaLang/backports-release-1.9
Browse files Browse the repository at this point in the history
Backports for 1.9.2
  • Loading branch information
KristofferC authored Jun 30, 2023
2 parents 13751df + 01a5a7c commit 3f2c5a3
Show file tree
Hide file tree
Showing 51 changed files with 739 additions and 241 deletions.
16 changes: 14 additions & 2 deletions base/Enums.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,14 @@ Base.cconvert(::Type{T}, x::Enum{T2}) where {T<:Integer,T2<:Integer} = T(x)::T
Base.write(io::IO, x::Enum{T}) where {T<:Integer} = write(io, T(x))
Base.read(io::IO, ::Type{T}) where {T<:Enum} = T(read(io, basetype(T)))

"""
_enum_hash(x::Enum, h::UInt)
Compute hash for an enum value `x`. This internal method will be specialized
for every enum type created through [`@enum`](@ref).
"""
_enum_hash(x::Enum, h::UInt) = invoke(hash, Tuple{Any, UInt}, x, h)
Base.hash(x::Enum, h::UInt) = _enum_hash(x, h)
Base.isless(x::T, y::T) where {T<:Enum} = isless(basetype(T)(x), basetype(T)(y))

Base.Symbol(x::Enum) = namemap(typeof(x))[Integer(x)]::Symbol
Expand Down Expand Up @@ -206,8 +214,12 @@ macro enum(T::Union{Symbol,Expr}, syms...)
Enums.namemap(::Type{$(esc(typename))}) = $(esc(namemap))
Base.typemin(x::Type{$(esc(typename))}) = $(esc(typename))($lo)
Base.typemax(x::Type{$(esc(typename))}) = $(esc(typename))($hi)
let enum_hash = hash($(esc(typename)))
Base.hash(x::$(esc(typename)), h::UInt) = hash(enum_hash, hash(Integer(x), h))
let type_hash = hash($(esc(typename)))
# Use internal `_enum_hash` to allow users to specialize
# `Base.hash` for their own enum types without overwriting the
# method we would define here. This avoids a warning for
# precompilation.
Enums._enum_hash(x::$(esc(typename)), h::UInt) = hash(type_hash, hash(Integer(x), h))
end
let insts = (Any[ $(esc(typename))(v) for v in $values ]...,)
Base.instances(::Type{$(esc(typename))}) = insts
Expand Down
4 changes: 1 addition & 3 deletions base/compiler/abstractinterpretation.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2441,11 +2441,9 @@ function abstract_eval_globalref(interp::AbstractInterpreter, g::GlobalRef, fram
nothrow = false
if isa(rt, Const)
consistent = ALWAYS_TRUE
nothrow = true
if is_mutation_free_argtype(rt)
inaccessiblememonly = ALWAYS_TRUE
nothrow = true
else
nothrow = true
end
elseif isdefined_globalref(g)
nothrow = true
Expand Down
2 changes: 1 addition & 1 deletion base/compiler/ssair/inlining.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1401,7 +1401,7 @@ function compute_inlining_cases(@nospecialize(info::CallInfo), flag::UInt8, sig:
fully_covered &= split_fully_covered
end

joint_effects = Effects(joint_effects; nothrow=fully_covered)
fully_covered || (joint_effects = Effects(joint_effects; nothrow=false))

if handled_all_cases && revisit_idx !== nothing
# we handled everything except one match with unmatched sparams,
Expand Down
2 changes: 1 addition & 1 deletion base/compiler/tfuncs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1488,7 +1488,7 @@ function apply_type_nothrow(@specialize(lattice::AbstractLattice), argtypes::Vec
end
else
istype || return false
if !(T <: u.var.ub)
if isa(u.var.ub, TypeVar) || !(T <: u.var.ub)
return false
end
if exact ? !(u.var.lb <: T) : !(u.var.lb === Bottom)
Expand Down
5 changes: 2 additions & 3 deletions base/compiler/typeinfer.jl
Original file line number Diff line number Diff line change
Expand Up @@ -367,10 +367,9 @@ end
function transform_result_for_cache(interp::AbstractInterpreter,
linfo::MethodInstance, valid_worlds::WorldRange, result::InferenceResult)
inferred_result = result.src
# If we decided not to optimize, drop the OptimizationState now.
# External interpreters can override as necessary to cache additional information
if inferred_result isa OptimizationState{typeof(interp)}
inferred_result = ir_to_codeinf!(inferred_result)
# TODO respect must_be_codeinf setting here?
result.src = inferred_result = ir_to_codeinf!(inferred_result)
end
if inferred_result isa CodeInfo
inferred_result.min_world = first(valid_worlds)
Expand Down
1 change: 1 addition & 0 deletions base/expr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -925,6 +925,7 @@ end
@atomic order ex
Mark `var` or `ex` as being performed atomically, if `ex` is a supported expression.
If no `order` is specified it defaults to :sequentially_consistent.
@atomic a.b.x = new
@atomic a.b.x += addend
Expand Down
15 changes: 6 additions & 9 deletions base/gmp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -808,8 +808,8 @@ Base.deepcopy_internal(x::BigInt, stackdict::IdDict) = get!(() -> MPZ.set(x), st

## streamlined hashing for BigInt, by avoiding allocation from shifts ##

if Limb === UInt
# this condition is true most (all?) of the time, and in this case we can define
if Limb === UInt64 === UInt
# On 64 bit systems we can define
# an optimized version for BigInt of hash_integer (used e.g. for Rational{BigInt}),
# and of hash

Expand All @@ -819,7 +819,7 @@ if Limb === UInt
GC.@preserve n begin
s = n.size
s == 0 && return hash_integer(0, h)
p = convert(Ptr{UInt}, n.d)
p = convert(Ptr{UInt64}, n.d)
b = unsafe_load(p)
h ⊻= hash_uint(ifelse(s < 0, -b, b) h)
for k = 2:abs(s)
Expand All @@ -829,14 +829,11 @@ if Limb === UInt
end
end

_divLimb(n) = UInt === UInt64 ? n >>> 6 : n >>> 5
_modLimb(n) = UInt === UInt64 ? n & 63 : n & 31

function hash(x::BigInt, h::UInt)
GC.@preserve x begin
sz = x.size
sz == 0 && return hash(0, h)
ptr = Ptr{UInt}(x.d)
ptr = Ptr{UInt64}(x.d)
if sz == 1
return hash(unsafe_load(ptr), h)
elseif sz == -1
Expand All @@ -845,8 +842,8 @@ if Limb === UInt
end
pow = trailing_zeros(x)
nd = Base.ndigits0z(x, 2)
idx = _divLimb(pow) + 1
shift = _modLimb(pow) % UInt
idx = (pow >>> 6) + 1
shift = (pow & 63) % UInt
upshift = BITS_PER_LIMB - shift
asz = abs(sz)
if shift == 0
Expand Down
35 changes: 32 additions & 3 deletions base/initdefs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,7 @@ const atexit_hooks = Callable[
() -> Filesystem.temp_cleanup_purge(force=true)
]
const _atexit_hooks_lock = ReentrantLock()
global _atexit_hooks_finished::Bool = false

"""
atexit(f)
Expand All @@ -374,12 +375,40 @@ exit code `n` (instead of the original exit code). If more than one exit hook
calls `exit(n)`, then Julia will exit with the exit code corresponding to the
last called exit hook that calls `exit(n)`. (Because exit hooks are called in
LIFO order, "last called" is equivalent to "first registered".)
Note: Once all exit hooks have been called, no more exit hooks can be registered,
and any call to `atexit(f)` after all hooks have completed will throw an exception.
This situation may occur if you are registering exit hooks from background Tasks that
may still be executing concurrently during shutdown.
"""
atexit(f::Function) = Base.@lock _atexit_hooks_lock (pushfirst!(atexit_hooks, f); nothing)
function atexit(f::Function)
Base.@lock _atexit_hooks_lock begin
_atexit_hooks_finished && error("cannot register new atexit hook; already exiting.")
pushfirst!(atexit_hooks, f)
return nothing
end
end

function _atexit(exitcode::Cint)
while !isempty(atexit_hooks)
f = popfirst!(atexit_hooks)
# Don't hold the lock around the iteration, just in case any other thread executing in
# parallel tries to register a new atexit hook while this is running. We don't want to
# block that thread from proceeding, and we can allow it to register its hook which we
# will immediately run here.
while true
local f
Base.@lock _atexit_hooks_lock begin
# If this is the last iteration, atomically disable atexit hooks to prevent
# someone from registering a hook that will never be run.
# (We do this inside the loop, so that it is atomic: no one can have registered
# a hook that never gets run, and we run all the hooks we know about until
# the vector is empty.)
if isempty(atexit_hooks)
global _atexit_hooks_finished = true
break
end

f = popfirst!(atexit_hooks)
end
try
if hasmethod(f, (Cint,))
f(exitcode)
Expand Down
6 changes: 5 additions & 1 deletion base/loading.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1810,7 +1810,7 @@ function _require(pkg::PkgId, env=nothing)
else
@warn "The call to compilecache failed to create a usable precompiled cache file for $pkg" exception=m
end
# fall-through to loading the file locally
# fall-through to loading the file locally if not incremental
else
cachefile, ocachefile = cachefile::Tuple{String, Union{Nothing, String}}
m = _tryrequire_from_serialized(pkg, cachefile, ocachefile)
Expand All @@ -1820,6 +1820,10 @@ function _require(pkg::PkgId, env=nothing)
return m
end
end
if JLOptions().incremental != 0
# during incremental precompilation, this should be fail-fast
throw(PrecompilableError())
end
end
end

Expand Down
1 change: 1 addition & 0 deletions base/options.jl
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ struct JLOptions
rr_detach::Int8
strip_metadata::Int8
strip_ir::Int8
permalloc_pkgimg::Int8
heap_size_hint::UInt64
end

Expand Down
42 changes: 21 additions & 21 deletions base/sort.jl
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ export # not exported by Base
SMALL_ALGORITHM,
SMALL_THRESHOLD

abstract type Algorithm end

## functions requiring only ordering ##

Expand Down Expand Up @@ -414,19 +415,18 @@ macro getkw(syms...)
Expr(:block, (:($(esc(:((kw, $sym) = $getter(v, o, kw))))) for (sym, getter) in zip(syms, getters))...)
end

for (sym, deps, exp, type) in [
(:lo, (), :(firstindex(v)), Integer),
(:hi, (), :(lastindex(v)), Integer),
(:mn, (), :(throw(ArgumentError("mn is needed but has not been computed"))), :(eltype(v))),
(:mx, (), :(throw(ArgumentError("mx is needed but has not been computed"))), :(eltype(v))),
(:scratch, (), nothing, :(Union{Nothing, Vector})), # could have different eltype
(:allow_legacy_dispatch, (), true, Bool)]
for (sym, exp, type) in [
(:lo, :(firstindex(v)), Integer),
(:hi, :(lastindex(v)), Integer),
(:mn, :(throw(ArgumentError("mn is needed but has not been computed"))), :(eltype(v))),
(:mx, :(throw(ArgumentError("mx is needed but has not been computed"))), :(eltype(v))),
(:scratch, nothing, :(Union{Nothing, Vector})), # could have different eltype
(:legacy_dispatch_entry, nothing, Union{Nothing, Algorithm})]
usym = Symbol(:_, sym)
@eval function $usym(v, o, kw)
# using missing instead of nothing because scratch could === nothing.
res = get(kw, $(Expr(:quote, sym)), missing)
res !== missing && return kw, res::$type
@getkw $(deps...)
$sym = $exp
(;kw..., $sym), $sym::$type
end
Expand Down Expand Up @@ -484,8 +484,6 @@ internal or recursive calls.
"""
function _sort! end

abstract type Algorithm end


"""
MissingOptimization(next) <: Algorithm
Expand All @@ -509,12 +507,12 @@ struct WithoutMissingVector{T, U} <: AbstractVector{T}
new{nonmissingtype(eltype(data)), typeof(data)}(data)
end
end
Base.@propagate_inbounds function Base.getindex(v::WithoutMissingVector, i)
Base.@propagate_inbounds function Base.getindex(v::WithoutMissingVector, i::Integer)
out = v.data[i]
@assert !(out isa Missing)
out::eltype(v)
end
Base.@propagate_inbounds function Base.setindex!(v::WithoutMissingVector, x, i)
Base.@propagate_inbounds function Base.setindex!(v::WithoutMissingVector, x, i::Integer)
v.data[i] = x
v
end
Expand Down Expand Up @@ -575,8 +573,10 @@ function _sort!(v::AbstractVector, a::MissingOptimization, o::Ordering, kw)
# we can assume v is equal to eachindex(o.data) which allows a copying partition
# without allocations.
lo_i, hi_i = lo, hi
for (i,x) in zip(eachindex(o.data), o.data)
if ismissing(x) == (o.order == Reverse) # should i go at the beginning?
cv = eachindex(o.data) # equal to copy(v)
for i in lo:hi
x = o.data[cv[i]]
if ismissing(x) == (o.order == Reverse) # should x go at the beginning/end?
v[lo_i] = i
lo_i += 1
else
Expand Down Expand Up @@ -2120,25 +2120,25 @@ end
# Support 3-, 5-, and 6-argument versions of sort! for calling into the internals in the old way
sort!(v::AbstractVector, a::Algorithm, o::Ordering) = sort!(v, firstindex(v), lastindex(v), a, o)
function sort!(v::AbstractVector, lo::Integer, hi::Integer, a::Algorithm, o::Ordering)
_sort!(v, a, o, (; lo, hi, allow_legacy_dispatch=false))
_sort!(v, a, o, (; lo, hi, legacy_dispatch_entry=a))
v
end
sort!(v::AbstractVector, lo::Integer, hi::Integer, a::Algorithm, o::Ordering, _) = sort!(v, lo, hi, a, o)
function sort!(v::AbstractVector, lo::Integer, hi::Integer, a::Algorithm, o::Ordering, scratch::Vector)
_sort!(v, a, o, (; lo, hi, scratch, allow_legacy_dispatch=false))
_sort!(v, a, o, (; lo, hi, scratch, legacy_dispatch_entry=a))
v
end

# Support dispatch on custom algorithms in the old way
# sort!(::AbstractVector, ::Integer, ::Integer, ::MyCustomAlgorithm, ::Ordering) = ...
function _sort!(v::AbstractVector, a::Algorithm, o::Ordering, kw)
@getkw lo hi scratch allow_legacy_dispatch
if allow_legacy_dispatch
@getkw lo hi scratch legacy_dispatch_entry
if legacy_dispatch_entry === a
# This error prevents infinite recursion for unknown algorithms
throw(ArgumentError("Base.Sort._sort!(::$(typeof(v)), ::$(typeof(a)), ::$(typeof(o)), ::Any) is not defined"))
else
sort!(v, lo, hi, a, o)
scratch
else
# This error prevents infinite recursion for unknown algorithms
throw(ArgumentError("Base.Sort._sort!(::$(typeof(v)), ::$(typeof(a)), ::$(typeof(o))) is not defined"))
end
end

Expand Down
Loading

0 comments on commit 3f2c5a3

Please sign in to comment.