diff --git a/base/Base.jl b/base/Base.jl index 9ad0cdf7a6661..a3fe44a2add86 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -427,7 +427,7 @@ end_base_include = time_ns() const _sysimage_modules = PkgId[] in_sysimage(pkgid::PkgId) = pkgid in _sysimage_modules -# Precompiles for Revise +# Precompiles for Revise and other packages # TODO: move these to contrib/generate_precompile.jl # The problem is they don't work there for match = _methods(+, (Int, Int), -1, get_world_counter()) @@ -461,6 +461,23 @@ for match = _methods(+, (Int, Int), -1, get_world_counter()) # Code loading uses this sortperm(mtime.(readdir(".")), rev=true) + # JLLWrappers uses these + Dict{UUID,Set{String}}()[UUID("692b3bcd-3c85-4b1f-b108-f13ce0eb3210")] = Set{String}() + get!(Set{String}, Dict{UUID,Set{String}}(), UUID("692b3bcd-3c85-4b1f-b108-f13ce0eb3210")) + eachindex(IndexLinear(), Expr[]) + push!(Expr[], Expr(:return, false)) + vcat(String[], String[]) + k, v = (:hello => nothing) + precompile(indexed_iterate, (Pair{Symbol, Union{Nothing, String}}, Int)) + precompile(indexed_iterate, (Pair{Symbol, Union{Nothing, String}}, Int, Int)) + # Preferences uses these + precompile(get_preferences, (UUID,)) + precompile(record_compiletime_preference, (UUID, String)) + get(Dict{String,Any}(), "missing", nothing) + delete!(Dict{String,Any}(), "missing") + for (k, v) in Dict{String,Any}() + println(k) + end break # only actually need to do this once end diff --git a/base/binaryplatforms.jl b/base/binaryplatforms.jl index 61e1af796999d..87c7f029f0563 100644 --- a/base/binaryplatforms.jl +++ b/base/binaryplatforms.jl @@ -40,10 +40,10 @@ struct Platform <: AbstractPlatform # The "compare strategy" allows selective overriding on how a tag is compared compare_strategies::Dict{String,Function} - function Platform(arch::String, os::String; + # Passing `tags` as a `Dict` avoids the need to infer different NamedTuple specializations + function Platform(arch::String, os::String, _tags::Dict{String}; validate_strict::Bool = false, - compare_strategies::Dict{String,<:Function} = Dict{String,Function}(), - kwargs...) + compare_strategies::Dict{String,<:Function} = Dict{String,Function}()) # A wee bit of normalization os = lowercase(os) arch = CPUID.normalize_arch(arch) @@ -52,8 +52,9 @@ struct Platform <: AbstractPlatform "arch" => arch, "os" => os, ) - for (tag, value) in kwargs - tag = lowercase(string(tag::Symbol)) + for (tag, value) in _tags + value = value::Union{String,VersionNumber,Nothing} + tag = lowercase(tag) if tag ∈ ("arch", "os") throw(ArgumentError("Cannot double-pass key $(tag)")) end @@ -70,8 +71,8 @@ struct Platform <: AbstractPlatform if tag ∈ ("libgfortran_version", "libstdcxx_version", "os_version") if isa(value, VersionNumber) value = string(value) - elseif isa(value, AbstractString) - v = tryparse(VersionNumber, String(value)::String) + elseif isa(value, String) + v = tryparse(VersionNumber, value) if isa(v, VersionNumber) value = string(v) end @@ -110,6 +111,19 @@ struct Platform <: AbstractPlatform end end +# Keyword interface (to avoid inference of specialized NamedTuple methods, use the Dict interface for `tags`) +function Platform(arch::String, os::String; + validate_strict::Bool = false, + compare_strategies::Dict{String,<:Function} = Dict{String,Function}(), + kwargs...) + tags = Dict{String,Any}(String(tag)::String=>tagvalue(value) for (tag, value) in kwargs) + return Platform(arch, os, tags; validate_strict, compare_strategies) +end + +tagvalue(v::Union{String,VersionNumber,Nothing}) = v +tagvalue(v::Symbol) = String(v) +tagvalue(v::AbstractString) = convert(String, v)::String + # Simple tag insertion that performs a little bit of validation function add_tag!(tags::Dict{String,String}, tag::String, value::String) # I know we said only alphanumeric and dots, but let's be generous so that we can expand @@ -699,21 +713,22 @@ function Base.parse(::Type{Platform}, triplet::AbstractString; validate_strict:: end # Extract the information we're interested in: + tags = Dict{String,Any}() arch = get_field(m, arch_mapping) os = get_field(m, os_mapping) - libc = get_field(m, libc_mapping) - call_abi = get_field(m, call_abi_mapping) - libgfortran_version = get_field(m, libgfortran_version_mapping) - libstdcxx_version = get_field(m, libstdcxx_version_mapping) - cxxstring_abi = get_field(m, cxxstring_abi_mapping) + tags["libc"] = get_field(m, libc_mapping) + tags["call_abi"] = get_field(m, call_abi_mapping) + tags["libgfortran_version"] = get_field(m, libgfortran_version_mapping) + tags["libstdcxx_version"] = get_field(m, libstdcxx_version_mapping) + tags["cxxstring_abi"] = get_field(m, cxxstring_abi_mapping) function split_tags(tagstr) tag_fields = split(tagstr, "-"; keepempty=false) if isempty(tag_fields) return Pair{String,String}[] end - return map(v -> Symbol(v[1]) => v[2], split.(tag_fields, "+")) + return map(v -> String(v[1]) => String(v[2]), split.(tag_fields, "+")) end - tags = split_tags(m["tags"]) + merge!(tags, Dict(split_tags(m["tags"]))) # Special parsing of os version number, if any exists function extract_os_version(os_name, pattern) @@ -730,18 +745,9 @@ function Base.parse(::Type{Platform}, triplet::AbstractString; validate_strict:: if os == "freebsd" os_version = extract_os_version("freebsd", r".*freebsd([\d.]+)") end + tags["os_version"] = os_version - return Platform( - arch, os; - validate_strict, - libc, - call_abi, - libgfortran_version, - cxxstring_abi, - libstdcxx_version, - os_version, - tags..., - ) + return Platform(arch, os, tags; validate_strict) end throw(ArgumentError("Platform `$(triplet)` is not an officially supported platform")) end @@ -1068,4 +1074,9 @@ function select_platform(download_info::Dict, platform::AbstractPlatform = HostP return download_info[p] end +# precompiles to reduce latency (see https://github.com/JuliaLang/julia/pull/43990#issuecomment-1025692379) +Dict{Platform,String}()[HostPlatform()] = "" +Platform("x86_64", "linux", Dict{String,Any}(); validate_strict=true) +Platform("x86_64", "linux", Dict{String,String}(); validate_strict=false) # called this way from Artifacts.unpack_platform + end # module diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index d600df1dbb0a1..4015b7c00bf0d 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -1,5 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +# Tracking of newly-inferred MethodInstances during precompilation +const track_newly_inferred = RefValue{Bool}(false) +const newly_inferred = MethodInstance[] + # build (and start inferring) the inference frame for the top-level MethodInstance function typeinf(interp::AbstractInterpreter, result::InferenceResult, cache::Symbol) frame = InferenceState(result, cache, interp) @@ -389,6 +393,12 @@ function cache_result!(interp::AbstractInterpreter, result::InferenceResult) if !already_inferred inferred_result = transform_result_for_cache(interp, linfo, valid_worlds, result.src) code_cache(interp)[linfo] = CodeInstance(result, inferred_result, valid_worlds) + if track_newly_inferred[] + m = linfo.def + if isa(m, Method) + m.module != Core && push!(newly_inferred, linfo) + end + end end unlock_mi_inference(interp, linfo) nothing diff --git a/base/loading.jl b/base/loading.jl index 7dce4532c1571..1f3297df36448 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1395,13 +1395,17 @@ function include_package_for_output(pkg::PkgId, input::String, depot_path::Vecto task_local_storage()[:SOURCE_PATH] = source end + Core.Compiler.track_newly_inferred.x = true try Base.include(Base.__toplevel__, input) catch ex precompilableerror(ex) || rethrow() @debug "Aborting `create_expr_cache'" exception=(ErrorException("Declaration of __precompile__(false) not allowed"), catch_backtrace()) exit(125) # we define status = 125 means PrecompileableError + finally + Core.Compiler.track_newly_inferred.x = false end + ccall(:jl_set_newly_inferred, Cvoid, (Any,), Core.Compiler.newly_inferred) end const PRECOMPILE_TRACE_COMPILE = Ref{String}() @@ -2033,12 +2037,12 @@ end Compile the given function `f` for the argument tuple (of types) `args`, but do not execute it. """ -function precompile(@nospecialize(f), args::Tuple) +function precompile(@nospecialize(f), @nospecialize(args::Tuple)) precompile(Tuple{Core.Typeof(f), args...}) end const ENABLE_PRECOMPILE_WARNINGS = Ref(false) -function precompile(argt::Type) +function precompile(@nospecialize(argt::Type)) ret = ccall(:jl_compile_hint, Int32, (Any,), argt) != 0 if !ret && ENABLE_PRECOMPILE_WARNINGS[] @warn "Inactive precompile statement" maxlog=100 form=argt _module=nothing _file=nothing _line=0 diff --git a/src/codegen.cpp b/src/codegen.cpp index 1e5118559b50e..21fe5d1fee84e 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -7857,7 +7857,7 @@ jl_compile_result_t jl_emit_codeinst( // don't delete inlineable code, unless it is constant (codeinst->invoke == jl_fptr_const_return_addr || !jl_ir_flag_inlineable((jl_array_t*)codeinst->inferred)) && // don't delete code when generating a precompile file - !imaging_mode) { + !(imaging_mode || jl_options.incremental)) { // if not inlineable, code won't be needed again codeinst->inferred = jl_nothing; } diff --git a/src/dump.c b/src/dump.c index f2c8629ca9c8b..f109622d57a80 100644 --- a/src/dump.c +++ b/src/dump.c @@ -92,6 +92,16 @@ static arraylist_t reinit_list; // This is not quite globally rooted, but we take care to only // ever assigned rooted values here. static jl_array_t *serializer_worklist JL_GLOBALLY_ROOTED; +// external MethodInstances we want to serialize +static htable_t external_mis; +// Inference tracks newly-inferred MethodInstances during precompilation +// and registers them by calling jl_set_newly_inferred +static jl_array_t *newly_inferred JL_GLOBALLY_ROOTED; + +// New roots to add to Methods. These can't be added until after +// recaching is complete, so we have to hold on to them separately +// Stored as method => (worklist_key, roots) +static htable_t queued_method_roots; // inverse of backedges graph (caller=>callees hash) htable_t edges_map; @@ -140,6 +150,18 @@ jl_value_t *jl_deser_symbol(uint8_t tag) return deser_symbols[tag]; } +uint64_t jl_worklist_key(jl_array_t *worklist) +{ + assert(jl_is_array(worklist)); + size_t len = jl_array_len(worklist); + if (len > 0) { + jl_module_t *topmod = (jl_module_t*)jl_array_ptr_ref(worklist, len-1); + assert(jl_is_module(topmod)); + return topmod->build_id; + } + return 0; +} + // --- serialize --- #define jl_serialize_value(s, v) jl_serialize_value_((s), (jl_value_t*)(v), 0) @@ -163,6 +185,11 @@ static int module_in_worklist(jl_module_t *mod) JL_NOTSAFEPOINT return 0; } +static int method_instance_in_queue(jl_method_instance_t *mi) +{ + return ptrhash_get(&external_mis, mi) != HT_NOTFOUND; +} + // compute whether a type references something internal to worklist // and thus could not have existed before deserialize // and thus does not need delayed unique-ing @@ -219,6 +246,75 @@ static int type_recursively_external(jl_datatype_t *dt) JL_NOTSAFEPOINT return 1; } +// When we infer external method instances, ensure they link back to the +// package. Otherwise they might be, e.g., for external macros +static int has_backedge_to_worklist(jl_method_instance_t *mi, htable_t *visited) +{ + void **bp = ptrhash_bp(visited, mi); + // HT_NOTFOUND: not yet analyzed + // HT_NOTFOUND + 1: doesn't link back + // HT_NOTFOUND + 2: does link back + if (*bp != HT_NOTFOUND) + return (char*)*bp - (char*)HT_NOTFOUND - 1; + *bp = (void*)((char*)HT_NOTFOUND + 1); // preliminarily mark as "not found" + jl_module_t *mod = mi->def.module; + if (jl_is_method(mod)) + mod = ((jl_method_t*)mod)->module; + assert(jl_is_module(mod)); + if (mi->precompiled || module_in_worklist(mod)) { + *bp = (void*)((char*)HT_NOTFOUND + 2); // found + return 1; + } + if (!mi->backedges) { + return 0; + } + size_t i, n = jl_array_len(mi->backedges); + for (i = 0; i < n; i++) { + jl_method_instance_t *be = (jl_method_instance_t*)jl_array_ptr_ref(mi->backedges, i); + if (has_backedge_to_worklist(be, visited)) { + *bp = (void*)((char*)HT_NOTFOUND + 2); // found + return 1; + } + } + return 0; +} + +// given the list of MethodInstances that were inferred during the +// build, select those that are external and have at least one +// relocatable CodeInstance. +static size_t queue_external_mis(jl_array_t *list) +{ + size_t i, n = 0; + htable_t visited; + htable_new(&visited, 0); + if (list) { + assert(jl_is_array(list)); + size_t n0 = jl_array_len(list); + for (i = 0; i < n0; i++) { + jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(list, i); + assert(jl_is_method_instance(mi)); + if (jl_is_method(mi->def.value)) { + jl_method_t *m = mi->def.method; + if (!module_in_worklist(m->module)) { + jl_code_instance_t *ci = mi->cache; + int relocatable = 0; + while (ci) { + relocatable |= ci->relocatability; + ci = ci->next; + } + if (relocatable && ptrhash_get(&external_mis, mi) == HT_NOTFOUND) { + if (has_backedge_to_worklist(mi, &visited)) { + ptrhash_put(&external_mis, mi, mi); + n++; + } + } + } + } + } + } + htable_free(&visited); + return n; +} static void jl_serialize_datatype(jl_serializer_state *s, jl_datatype_t *dt) JL_GC_DISABLED { @@ -485,8 +581,12 @@ static int jl_serialize_generic(jl_serializer_state *s, jl_value_t *v) JL_GC_DIS return 0; } -static void jl_serialize_code_instance(jl_serializer_state *s, jl_code_instance_t *codeinst, int skip_partial_opaque) JL_GC_DISABLED +static void jl_serialize_code_instance(jl_serializer_state *s, jl_code_instance_t *codeinst, int skip_partial_opaque, int internal) JL_GC_DISABLED { + if (internal > 2) { + while (codeinst && !codeinst->relocatability) + codeinst = codeinst->next; + } if (jl_serialize_generic(s, (jl_value_t*)codeinst)) { return; } @@ -507,7 +607,7 @@ static void jl_serialize_code_instance(jl_serializer_state *s, jl_code_instance_ if (write_ret_type && codeinst->rettype_const && jl_typeis(codeinst->rettype_const, jl_partial_opaque_type)) { if (skip_partial_opaque) { - jl_serialize_code_instance(s, codeinst->next, skip_partial_opaque); + jl_serialize_code_instance(s, codeinst->next, skip_partial_opaque, internal); return; } else { @@ -534,12 +634,13 @@ static void jl_serialize_code_instance(jl_serializer_state *s, jl_code_instance_ jl_serialize_value(s, jl_nothing); } write_uint8(s->s, codeinst->relocatability); - jl_serialize_code_instance(s, codeinst->next, skip_partial_opaque); + jl_serialize_code_instance(s, codeinst->next, skip_partial_opaque, internal); } enum METHOD_SERIALIZATION_MODE { METHOD_INTERNAL = 1, METHOD_EXTERNAL_MT = 2, + METHOD_HAS_NEW_ROOTS = 4, }; static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_literal) JL_GC_DISABLED @@ -665,9 +766,16 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li else if (jl_is_method(v)) { write_uint8(s->s, TAG_METHOD); jl_method_t *m = (jl_method_t*)v; - int serialization_mode = 0; + uint64_t key = 0; + int serialization_mode = 0, nwithkey = 0; if (m->is_for_opaque_closure || module_in_worklist(m->module)) serialization_mode |= METHOD_INTERNAL; + if (!(serialization_mode & METHOD_INTERNAL)) { + key = jl_worklist_key(serializer_worklist); + nwithkey = nroots_with_key(m, key); + if (nwithkey > 0) + serialization_mode |= METHOD_HAS_NEW_ROOTS; + } if (!(serialization_mode & METHOD_INTERNAL)) { // flag this in the backref table as special uintptr_t *bp = (uintptr_t*)ptrhash_bp(&backref_table, v); @@ -693,8 +801,25 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li else { jl_serialize_value(s, (jl_value_t*)m->external_mt); } - if (!(serialization_mode & METHOD_INTERNAL)) + if (!(serialization_mode & METHOD_INTERNAL)) { + if (serialization_mode & METHOD_HAS_NEW_ROOTS) { + // Serialize the roots that belong to key + write_uint64(s->s, key); + write_int32(s->s, nwithkey); + rle_iter_state rootiter = rle_iter_init(0); + uint64_t *rletable = NULL; + size_t nblocks2 = 0, nroots = jl_array_len(m->roots); + if (m->root_blocks) { + rletable = (uint64_t*)jl_array_data(m->root_blocks); + nblocks2 = jl_array_len(m->root_blocks); + } + // this visits every item, if it becomes a bottlneck we could hop blocks + while (rle_iter_increment(&rootiter, nroots, rletable, nblocks2)) + if (rootiter.key == key) + jl_serialize_value(s, jl_array_ptr_ref(m->roots, rootiter.i)); + } return; + } jl_serialize_value(s, m->specializations); jl_serialize_value(s, jl_atomic_load_relaxed(&m->speckeyset)); jl_serialize_value(s, (jl_value_t*)m->name); @@ -731,6 +856,8 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li internal = 1; else if (module_in_worklist(mi->def.method->module)) internal = 2; + else if (ptrhash_get(&external_mis, (void*)mi) != HT_NOTFOUND) + internal = 3; write_uint8(s->s, internal); if (!internal) { // also flag this in the backref table as special @@ -748,12 +875,12 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li jl_array_t *backedges = mi->backedges; if (backedges) { // filter backedges to only contain pointers - // to items that we will actually store (internal == 2) + // to items that we will actually store (internal >= 2) size_t ins, i, l = jl_array_len(backedges); jl_method_instance_t **b_edges = (jl_method_instance_t**)jl_array_data(backedges); for (ins = i = 0; i < l; i++) { jl_method_instance_t *backedge = b_edges[i]; - if (module_in_worklist(backedge->def.method->module)) { + if (module_in_worklist(backedge->def.method->module) || method_instance_in_queue(backedge)) { b_edges[ins++] = backedge; } } @@ -764,10 +891,10 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li } jl_serialize_value(s, (jl_value_t*)backedges); jl_serialize_value(s, (jl_value_t*)NULL); //callbacks - jl_serialize_code_instance(s, mi->cache, 1); + jl_serialize_code_instance(s, mi->cache, 1, internal); } else if (jl_is_code_instance(v)) { - jl_serialize_code_instance(s, (jl_code_instance_t*)v, 0); + jl_serialize_code_instance(s, (jl_code_instance_t*)v, 0, 2); } else if (jl_typeis(v, jl_module_type)) { jl_serialize_module(s, (jl_module_t*)v); @@ -936,6 +1063,27 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li } } +// Used to serialize the external method instances queued in queued_method_roots (from newly_inferred) +static void serialize_htable_keys(jl_serializer_state *s, htable_t *ht, int nitems) +{ + write_int32(s->s, nitems); + void **table = ht->table; + size_t i, n = 0, sz = ht->size; + for (i = 0; i < sz; i += 2) { + if (table[i+1] != HT_NOTFOUND) { + jl_serialize_value(s, (jl_value_t*)table[i]); + n += 1; + } + } + assert(n == nitems); +} + +// Create the forward-edge map (caller => callees) +// the intent of these functions is to invert the backedges tree +// for anything that points to a method not part of the worklist +// or method instances not in the queue +// +// from MethodTables static void jl_collect_missing_backedges_to_mod(jl_methtable_t *mt) { jl_array_t *backedges = mt->backedges; @@ -943,7 +1091,7 @@ static void jl_collect_missing_backedges_to_mod(jl_methtable_t *mt) size_t i, l = jl_array_len(backedges); for (i = 1; i < l; i += 2) { jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(backedges, i); - jl_value_t *missing_callee = jl_array_ptr_ref(backedges, i - 1); + jl_value_t *missing_callee = jl_array_ptr_ref(backedges, i - 1); // signature of abstract callee jl_array_t **edges = (jl_array_t**)ptrhash_bp(&edges_map, (void*)caller); if (*edges == HT_NOTFOUND) *edges = jl_alloc_vec_any(0); @@ -952,8 +1100,7 @@ static void jl_collect_missing_backedges_to_mod(jl_methtable_t *mt) } } -// the intent of this function is to invert the backedges tree -// for anything that points to a method not part of the worklist +// from MethodInstances static void collect_backedges(jl_method_instance_t *callee) JL_GC_DISABLED { jl_array_t *backedges = callee->backedges; @@ -970,6 +1117,11 @@ static void collect_backedges(jl_method_instance_t *callee) JL_GC_DISABLED } +// For functions owned by modules not on the worklist, call this on each method. +// - if the method is owned by a worklist module, add it to the list of things to be +// fully serialized +// - otherwise (i.e., if it's an external method), check all of its specializations. +// Collect backedges from those that are not being fully serialized. static int jl_collect_methcache_from_mod(jl_typemap_entry_t *ml, void *closure) JL_GC_DISABLED { jl_array_t *s = (jl_array_t*)closure; @@ -983,7 +1135,7 @@ static int jl_collect_methcache_from_mod(jl_typemap_entry_t *ml, void *closure) size_t i, l = jl_svec_len(specializations); for (i = 0; i < l; i++) { jl_method_instance_t *callee = (jl_method_instance_t*)jl_svecref(specializations, i); - if ((jl_value_t*)callee != jl_nothing) + if ((jl_value_t*)callee != jl_nothing && !method_instance_in_queue(callee)) collect_backedges(callee); } } @@ -1074,7 +1226,7 @@ static void jl_collect_backedges( /* edges */ jl_array_t *s, /* ext_targets */ j for (i = 0; i < edges_map.size; i += 2) { jl_method_instance_t *caller = (jl_method_instance_t*)table[i]; jl_array_t *callees = (jl_array_t*)table[i + 1]; - if (callees != HT_NOTFOUND && module_in_worklist(caller->def.method->module)) { + if (callees != HT_NOTFOUND && (module_in_worklist(caller->def.method->module) || method_instance_in_queue(caller))) { size_t i, l = jl_array_len(callees); for (i = 0; i < l; i++) { jl_value_t *c = jl_array_ptr_ref(callees, i); @@ -1556,6 +1708,19 @@ static jl_value_t *jl_deserialize_value_method(jl_serializer_state *s, jl_value_ assert(loc != NULL && loc != HT_NOTFOUND); arraylist_push(&flagref_list, loc); arraylist_push(&flagref_list, (void*)pos); + if (serialization_mode & METHOD_HAS_NEW_ROOTS) { + uint64_t key = read_uint64(s->s); + int i, nnew = read_int32(s->s); + jl_array_t *newroots = jl_alloc_vec_any(nnew); + jl_value_t **data = (jl_value_t**)jl_array_data(newroots); + for (i = 0; i < nnew; i++) + data[i] = jl_deserialize_value(s, &(data[i])); + // Storing the new roots in `m->roots` risks losing them due to recaching + // (which replaces pointers to `m` with ones to the "live" method). + // Put them in separate storage so we can find them later. + assert(ptrhash_get(&queued_method_roots, m) == HT_NOTFOUND); + ptrhash_put(&queued_method_roots, m, jl_svec2((void*)(uintptr_t)key, newroots)); // GC is disabled + } return (jl_value_t*)m; } m->specializations = (jl_svec_t*)jl_deserialize_value(s, (jl_value_t**)&m->specializations); @@ -1619,6 +1784,10 @@ static jl_value_t *jl_deserialize_value_method_instance(jl_serializer_state *s, uintptr_t pos = backref_list.len; arraylist_push(&backref_list, mi); int internal = read_uint8(s->s); + if (internal == 1) { + mi->uninferred = jl_deserialize_value(s, &mi->uninferred); + jl_gc_wb(mi, mi->uninferred); + } mi->specTypes = (jl_value_t*)jl_deserialize_value(s, (jl_value_t**)&mi->specTypes); jl_gc_wb(mi, mi->specTypes); mi->def.value = jl_deserialize_value(s, &mi->def.value); @@ -1631,10 +1800,6 @@ static jl_value_t *jl_deserialize_value_method_instance(jl_serializer_state *s, return (jl_value_t*)mi; } - if (internal == 1) { - mi->uninferred = jl_deserialize_value(s, &mi->uninferred); - jl_gc_wb(mi, mi->uninferred); - } mi->sparam_vals = (jl_svec_t*)jl_deserialize_value(s, (jl_value_t**)&mi->sparam_vals); jl_gc_wb(mi, mi->sparam_vals); mi->backedges = (jl_array_t*)jl_deserialize_value(s, (jl_value_t**)&mi->backedges); @@ -1676,6 +1841,7 @@ static jl_value_t *jl_deserialize_value_code_instance(jl_serializer_state *s, jl if ((flags >> 3) & 1) codeinst->precompile = 1; codeinst->relocatability = read_uint8(s->s); + assert(codeinst->relocatability <= 1); codeinst->next = (jl_code_instance_t*)jl_deserialize_value(s, (jl_value_t**)&codeinst->next); jl_gc_wb(codeinst, codeinst->next); if (validate) { @@ -2023,15 +2189,117 @@ static void jl_insert_methods(jl_array_t *list) size_t i, l = jl_array_len(list); for (i = 0; i < l; i += 2) { jl_method_t *meth = (jl_method_t*)jl_array_ptr_ref(list, i); + assert(jl_is_method(meth)); assert(!meth->is_for_opaque_closure); jl_tupletype_t *simpletype = (jl_tupletype_t*)jl_array_ptr_ref(list, i + 1); - assert(jl_is_method(meth)); jl_methtable_t *mt = jl_method_get_table(meth); assert((jl_value_t*)mt != jl_nothing); jl_method_table_insert(mt, meth, simpletype); } } +void remove_code_instance_from_validation(jl_code_instance_t *codeinst) +{ + ptrhash_remove(&new_code_instance_validate, codeinst); +} + +static void jl_insert_method_instances(jl_array_t *list) +{ + size_t i, l = jl_array_len(list); + // Validate the MethodInstances + jl_array_t *valids = jl_alloc_array_1d(jl_array_uint8_type, l); + memset(jl_array_data(valids), 1, l); + size_t world = jl_atomic_load_acquire(&jl_world_counter); + for (i = 0; i < l; i++) { + jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(list, i); + assert(jl_is_method_instance(mi)); + if (jl_is_method(mi->def.method)) { + // Is this still the method we'd be calling? + jl_methtable_t *mt = jl_method_table_for(mi->specTypes); + jl_value_t *mworld = jl_methtable_lookup(mt, mi->specTypes, world); + if (jl_is_method(mworld) && mi->def.method != (jl_method_t*)mworld) { + jl_array_uint8_set(valids, i, 0); + invalidate_backedges(&remove_code_instance_from_validation, mi, world, "jl_insert_method_instance"); + // The codeinst of this mi haven't yet been removed + jl_code_instance_t *codeinst = mi->cache; + while (codeinst) { + remove_code_instance_from_validation(codeinst); + codeinst = codeinst->next; + } + if (_jl_debug_method_invalidation) { + jl_array_ptr_1d_push(_jl_debug_method_invalidation, mworld); + jl_array_ptr_1d_push(_jl_debug_method_invalidation, jl_cstr_to_string("jl_method_table_insert")); // GC disabled + } + } + } + } + // While it's tempting to just remove the invalidated MIs altogether, + // this hurts the ability of SnoopCompile to diagnose problems. + for (i = 0; i < l; i++) { + jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(list, i); + jl_method_instance_t *milive = jl_specializations_get_or_insert(mi); + ptrhash_put(&uniquing_table, mi, milive); // store the association for the 2nd pass + } + // We may need to fix up the backedges for the ones that didn't "go live" + for (i = 0; i < l; i++) { + jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(list, i); + jl_method_instance_t *milive = (jl_method_instance_t*)ptrhash_get(&uniquing_table, mi); + if (milive != mi) { + // A previously-loaded module compiled this method, so the one we deserialized will be dropped. + // But make sure the backedges are copied over. + if (mi->backedges) { + if (!milive->backedges) { + // Copy all the backedges (after looking up the live ones) + size_t j, n = jl_array_len(mi->backedges); + milive->backedges = jl_alloc_vec_any(n); + jl_gc_wb(milive, milive->backedges); + for (j = 0; j < n; j++) { + jl_method_instance_t *be = (jl_method_instance_t*)jl_array_ptr_ref(mi->backedges, j); + jl_method_instance_t *belive = (jl_method_instance_t*)ptrhash_get(&uniquing_table, be); + if (belive == HT_NOTFOUND) + belive = be; + jl_array_ptr_set(milive->backedges, j, belive); + } + } else { + // Copy the missing backedges (this is an O(N^2) algorithm, but many methods have few MethodInstances) + size_t j, k, n = jl_array_len(mi->backedges), nlive = jl_array_len(milive->backedges); + for (j = 0; j < n; j++) { + jl_method_instance_t *be = (jl_method_instance_t*)jl_array_ptr_ref(mi->backedges, j); + jl_method_instance_t *belive = (jl_method_instance_t*)ptrhash_get(&uniquing_table, be); + if (belive == HT_NOTFOUND) + belive = be; + int found = 0; + for (k = 0; k < nlive; k++) { + if (belive == (jl_method_instance_t*)jl_array_ptr_ref(milive->backedges, k)) { + found = 1; + break; + } + } + if (!found) + jl_array_ptr_1d_push(milive->backedges, (jl_value_t*)belive); + } + } + } + // Additionally, if we have CodeInstance(s) and the running CodeInstance is world-limited, transfer it + if (mi->cache && jl_array_uint8_ref(valids, i)) { + if (!milive->cache || milive->cache->max_world < ~(size_t)0) { + jl_code_instance_t *cilive = milive->cache, *ci; + milive->cache = mi->cache; + jl_gc_wb(milive, milive->cache); + ci = mi->cache; + ci->def = milive; + while (ci->next) { + ci = ci->next; + ci->def = milive; + } + ci->next = cilive; + jl_gc_wb(ci, ci->next); + } + } + } + } +} + // verify that these edges intersect with the same methods as before static void jl_verify_edges(jl_array_t *targets, jl_array_t **pvalids) { @@ -2092,7 +2360,7 @@ static void jl_verify_edges(jl_array_t *targets, jl_array_t **pvalids) // Restore backedges to external targets // `targets` is [callee1, matches1, ...], the global set of non-worklist callees of worklist-owned methods. -// `edges` = [caller1, targets_indexes1, ...], the list of worklist-owned methods calling external methods. +// `list` = [caller1, targets_indexes1, ...], the list of worklist-owned methods calling external methods. static void jl_insert_backedges(jl_array_t *list, jl_array_t *targets) { // map(enable, ((list[i] => targets[list[i + 1] .* 2]) for i in 1:2:length(list) if all(valids[list[i + 1]]))) @@ -2104,7 +2372,6 @@ static void jl_insert_backedges(jl_array_t *list, jl_array_t *targets) for (i = 0; i < l; i += 2) { jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(list, i); assert(jl_is_method_instance(caller) && jl_is_method(caller->def.method)); - assert(caller->def.method->primary_world == jl_atomic_load_acquire(&jl_world_counter)); // caller should be new jl_array_t *idxs_array = (jl_array_t*)jl_array_ptr_ref(list, i + 1); assert(jl_isa((jl_value_t*)idxs_array, jl_array_int32_type)); int32_t *idxs = (int32_t*)jl_array_data(idxs_array); @@ -2124,14 +2391,18 @@ static void jl_insert_backedges(jl_array_t *list, jl_array_t *targets) } else { jl_methtable_t *mt = jl_method_table_for(callee); - assert((jl_value_t*)mt != jl_nothing); - jl_method_table_add_backedge(mt, callee, (jl_value_t*)caller); + // FIXME: rarely, `callee` has an unexpected `Union` signature, + // see https://github.com/JuliaLang/julia/pull/43990#issuecomment-1030329344 + // Fix the issue and turn this back into an `assert((jl_value_t*)mt != jl_nothing)` + // This workaround exposes us to (rare) 265-violations. + if ((jl_value_t*)mt != jl_nothing) + jl_method_table_add_backedge(mt, callee, (jl_value_t*)caller); } } // then enable it jl_code_instance_t *codeinst = caller->cache; while (codeinst) { - if (codeinst->min_world > 0) + if (ptrhash_get(&new_code_instance_validate, codeinst) != HT_NOTFOUND && codeinst->min_world > 0) codeinst->max_world = ~(size_t)0; ptrhash_remove(&new_code_instance_validate, codeinst); // mark it as handled codeinst = jl_atomic_load_relaxed(&codeinst->next); @@ -2321,6 +2592,14 @@ JL_DLLEXPORT void jl_init_restored_modules(jl_array_t *init_order) // --- entry points --- +// Register all newly-inferred MethodInstances +// This gets called as the final step of Base.include_package_for_output +JL_DLLEXPORT void jl_set_newly_inferred(jl_value_t* _newly_inferred) +{ + assert(_newly_inferred == NULL || jl_is_array(_newly_inferred)); + newly_inferred = (jl_array_t*) _newly_inferred; +} + // Serialize the modules in `worklist` to file `fname` JL_DLLEXPORT int jl_save_incremental(const char *fname, jl_array_t *worklist) { @@ -2352,6 +2631,7 @@ JL_DLLEXPORT int jl_save_incremental(const char *fname, jl_array_t *worklist) arraylist_new(&reinit_list, 0); htable_new(&edges_map, 0); htable_new(&backref_table, 5000); + htable_new(&external_mis, 0); ptrhash_put(&backref_table, jl_main_module, (char*)HT_NOTFOUND + 1); backref_table_numel = 1; jl_idtable_type = jl_base_module ? jl_get_global(jl_base_module, jl_symbol("IdDict")) : NULL; @@ -2367,6 +2647,8 @@ JL_DLLEXPORT int jl_save_incremental(const char *fname, jl_array_t *worklist) jl_array_t *ext_targets = jl_alloc_vec_any(0); // [callee1, matches1, ...] non-worklist callees of worklist-owned methods jl_array_t *edges = jl_alloc_vec_any(0); // [caller1, ext_targets_indexes1, ...] for worklist-owned methods calling external methods + int n_ext_mis = queue_external_mis(newly_inferred); + size_t i; size_t len = jl_array_len(mod_array); for (i = 0; i < len; i++) { @@ -2390,8 +2672,11 @@ JL_DLLEXPORT int jl_save_incremental(const char *fname, jl_array_t *worklist) mod_array }; jl_serialize_value(&s, worklist); // serialize module-owned items (those accessible from the bindings table) - jl_serialize_value(&s, extext_methods); // serialize new methods for external functions - // The next two allow us to restore backedges from external MethodInstances to internal ones + jl_serialize_value(&s, extext_methods); // serialize new worklist-owned methods for external functions + serialize_htable_keys(&s, &external_mis, n_ext_mis); // serialize external MethodInstances + + // The next two allow us to restore backedges from external "unserialized" (stub-serialized) MethodInstances + // to the ones we serialize here jl_serialize_value(&s, edges); jl_serialize_value(&s, ext_targets); jl_finalize_serializer(&s); @@ -2400,6 +2685,7 @@ JL_DLLEXPORT int jl_save_incremental(const char *fname, jl_array_t *worklist) jl_gc_enable(en); htable_reset(&edges_map, 0); htable_reset(&backref_table, 0); + htable_reset(&external_mis, 0); arraylist_free(&reinit_list); // Write the source-text for the dependent files @@ -2630,12 +2916,12 @@ static jl_method_t *jl_lookup_method(jl_methtable_t *mt, jl_datatype_t *sig, siz static jl_method_t *jl_recache_method(jl_method_t *m) { assert(!m->is_for_opaque_closure); + assert(jl_is_method(m)); jl_datatype_t *sig = (jl_datatype_t*)m->sig; jl_methtable_t *mt = jl_method_get_table(m); assert((jl_value_t*)mt != jl_nothing); jl_set_typeof(m, (void*)(intptr_t)0x30); // invalidate the old value to help catch errors - jl_method_t *_new = jl_lookup_method(mt, sig, m->module->primary_world); - return _new; + return jl_lookup_method(mt, sig, m->module->primary_world); } static jl_value_t *jl_recache_other_(jl_value_t *o); @@ -2694,6 +2980,34 @@ static void jl_recache_other(void) flagref_list.len = 0; } +// Wait to copy roots until recaching is done +// This is because recaching requires that all pointers to methods and methodinstances +// stay at their source location as recorded by flagref_list. Once recaching is complete, +// they can be safely copied over. +static void jl_copy_roots(void) +{ + size_t i, j, l; + for (i = 0; i < queued_method_roots.size; i+=2) { + jl_method_t *m = (jl_method_t*)queued_method_roots.table[i]; + m = (jl_method_t*)ptrhash_get(&uniquing_table, m); + jl_svec_t *keyroots = (jl_svec_t*)queued_method_roots.table[i+1]; + if (keyroots != HT_NOTFOUND) { + uint64_t key = (uint64_t)(uintptr_t)jl_svec_ref(keyroots, 0); + jl_array_t *roots = (jl_array_t*)jl_svec_ref(keyroots, 1); + assert(jl_is_array(roots)); + l = jl_array_len(roots); + for (j = 0; j < l; j++) { + jl_value_t *r = jl_array_ptr_ref(roots, j); + jl_value_t *newr = (jl_value_t*)ptrhash_get(&uniquing_table, r); + if (newr != HT_NOTFOUND) { + jl_array_ptr_set(roots, j, newr); + } + } + jl_append_method_roots(m, key, roots); + } + } +} + static int trace_method(jl_typemap_entry_t *entry, void *closure) { jl_call_tracer(jl_newmeth_tracer, (jl_value_t*)entry->func.method); @@ -2741,6 +3055,7 @@ static jl_value_t *_jl_restore_incremental(ios_t *f, jl_array_t *mod_array) arraylist_new(&backref_list, 4000); arraylist_push(&backref_list, jl_main_module); arraylist_new(&flagref_list, 0); + htable_new(&queued_method_roots, 0); htable_new(&new_code_instance_validate, 0); arraylist_new(&ccallable_list, 0); htable_new(&uniquing_table, 0); @@ -2756,6 +3071,11 @@ static jl_value_t *_jl_restore_incremental(ios_t *f, jl_array_t *mod_array) // See explanation in jl_save_incremental for variables of the same names jl_value_t *extext_methods = jl_deserialize_value(&s, &extext_methods); + int i, n_ext_mis = read_int32(s.s); + jl_array_t *mi_list = jl_alloc_vec_any(n_ext_mis); // reload MIs stored by serialize_htable_keys + jl_value_t **midata = (jl_value_t**)jl_array_data(mi_list); + for (i = 0; i < n_ext_mis; i++) + midata[i] = jl_deserialize_value(&s, &(midata[i])); jl_value_t *edges = jl_deserialize_value(&s, &edges); jl_value_t *ext_targets = jl_deserialize_value(&s, &ext_targets); @@ -2766,9 +3086,11 @@ static jl_value_t *_jl_restore_incremental(ios_t *f, jl_array_t *mod_array) // at this point, the AST is fully reconstructed, but still completely disconnected // now all of the interconnects will be created jl_recache_types(); // make all of the types identities correct - htable_reset(&uniquing_table, 0); jl_insert_methods((jl_array_t*)extext_methods); // hook up extension methods for external generic functions (needs to be after recache types) jl_recache_other(); // make all of the other objects identities correct (needs to be after insert methods) + jl_copy_roots(); // copying new roots of external methods (must wait until recaching is complete) + // At this point, the novel specializations in mi_list reference the real method, but they haven't been cached in its specializations + jl_insert_method_instances(mi_list); // insert novel specializations htable_free(&uniquing_table); jl_array_t *init_order = jl_finalize_deserializer(&s, tracee_list); // done with f and s (needs to be after recache) if (init_order == NULL) @@ -2787,6 +3109,7 @@ static jl_value_t *_jl_restore_incremental(ios_t *f, jl_array_t *mod_array) htable_free(&new_code_instance_validate); arraylist_free(&flagref_list); arraylist_free(&backref_list); + htable_free(&queued_method_roots); ios_close(f); jl_gc_enable_finalizers(ct, 1); // make sure we don't run any Julia code concurrently before this point diff --git a/src/gf.c b/src/gf.c index acd52c1913427..c59a0587166c1 100644 --- a/src/gf.c +++ b/src/gf.c @@ -99,7 +99,7 @@ static int speccache_eq(size_t idx, const void *ty, jl_svec_t *data, uint_t hv) } // get or create the MethodInstance for a specialization -JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams) +static jl_method_instance_t *jl_specializations_get_linfo_(jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams, jl_method_instance_t *mi_insert) { if (m->sig == (jl_value_t*)jl_anytuple_type && m->unspecialized) return m->unspecialized; // handle builtin methods @@ -150,7 +150,7 @@ JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m J assert(!jl_types_equal(mi->specTypes, type)); } } - jl_method_instance_t *mi = jl_get_specialized(m, type, sparams); + jl_method_instance_t *mi = mi_insert ? mi_insert : jl_get_specialized(m, type, sparams); JL_GC_PUSH1(&mi); if (hv ? (i + 1 >= cl || jl_svecref(specializations, i + 1) != jl_nothing) : (i <= 1 || jl_svecref(specializations, i - 2) != jl_nothing)) { size_t ncl = cl < 8 ? 8 : (cl*3)>>1; @@ -184,6 +184,19 @@ JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m J } } +JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams) +{ + return jl_specializations_get_linfo_(m, type, sparams, NULL); +} + +jl_method_instance_t *jl_specializations_get_or_insert(jl_method_instance_t *mi) +{ + jl_method_t *m = mi->def.method; + jl_value_t *type = mi->specTypes; + jl_svec_t *sparams = mi->sparam_vals; + return jl_specializations_get_linfo_(m, type, sparams, mi); +} + JL_DLLEXPORT jl_value_t *jl_specializations_lookup(jl_method_t *m, jl_value_t *type) { jl_value_t *mi = (jl_value_t*)jl_specializations_get_linfo(m, type, NULL); @@ -1390,8 +1403,10 @@ static void invalidate_external(jl_method_instance_t *mi, size_t max_world) { } } +static void do_nothing_with_codeinst(jl_code_instance_t *ci) {} + // recursively invalidate cached methods that had an edge to a replaced method -static void invalidate_method_instance(jl_method_instance_t *replaced, size_t max_world, int depth) +static void invalidate_method_instance(void (*f)(jl_code_instance_t*), jl_method_instance_t *replaced, size_t max_world, int depth) { if (_jl_debug_method_invalidation) { jl_value_t *boxeddepth = NULL; @@ -1411,6 +1426,7 @@ static void invalidate_method_instance(jl_method_instance_t *replaced, size_t ma codeinst->max_world = max_world; } assert(codeinst->max_world <= max_world); + (*f)(codeinst); codeinst = jl_atomic_load_relaxed(&codeinst->next); } // recurse to all backedges to update their valid range also @@ -1420,14 +1436,14 @@ static void invalidate_method_instance(jl_method_instance_t *replaced, size_t ma size_t i, l = jl_array_len(backedges); for (i = 0; i < l; i++) { jl_method_instance_t *replaced = (jl_method_instance_t*)jl_array_ptr_ref(backedges, i); - invalidate_method_instance(replaced, max_world, depth + 1); + invalidate_method_instance(f, replaced, max_world, depth + 1); } } JL_UNLOCK(&replaced->def.method->writelock); } // invalidate cached methods that overlap this definition -static void invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_world, const char *why) +void invalidate_backedges(void (*f)(jl_code_instance_t*), jl_method_instance_t *replaced_mi, size_t max_world, const char *why) { JL_LOCK(&replaced_mi->def.method->writelock); jl_array_t *backedges = replaced_mi->backedges; @@ -1437,7 +1453,7 @@ static void invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_w size_t i, l = jl_array_len(backedges); jl_method_instance_t **replaced = (jl_method_instance_t**)jl_array_ptr_data(backedges); for (i = 0; i < l; i++) { - invalidate_method_instance(replaced[i], max_world, 1); + invalidate_method_instance(f, replaced[i], max_world, 1); } } JL_UNLOCK(&replaced_mi->def.method->writelock); @@ -1600,7 +1616,7 @@ static void jl_method_table_invalidate(jl_methtable_t *mt, jl_typemap_entry_t *m if ((jl_value_t*)mi != jl_nothing) { invalidated = 1; invalidate_external(mi, methodentry->max_world); - invalidate_backedges(mi, methodentry->max_world, "jl_method_table_disable"); + invalidate_backedges(&do_nothing_with_codeinst, mi, methodentry->max_world, "jl_method_table_disable"); } } if (invalidated && _jl_debug_method_invalidation) { @@ -1731,7 +1747,7 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method if (missing) { jl_method_instance_t *backedge = (jl_method_instance_t*)backedges[i]; invalidate_external(backedge, max_world); - invalidate_method_instance(backedge, max_world, 0); + invalidate_method_instance(&do_nothing_with_codeinst, backedge, max_world, 0); invalidated = 1; if (_jl_debug_method_invalidation) jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)backedgetyp); @@ -1801,7 +1817,7 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method invalidate_external(mi, max_world); if (mi->backedges) { invalidated = 1; - invalidate_backedges(mi, max_world, "jl_method_table_insert"); + invalidate_backedges(&do_nothing_with_codeinst, mi, max_world, "jl_method_table_insert"); } } } @@ -2247,6 +2263,7 @@ JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types) if (mi == NULL) return 0; JL_GC_PROMISE_ROOTED(mi); + mi->precompiled = 1; if (jl_generating_output()) { jl_compile_now(mi); // In addition to full compilation of the compilation-signature, if `types` is more specific (e.g. due to nospecialize), @@ -2261,6 +2278,7 @@ JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types) types2 = jl_type_intersection_env((jl_value_t*)types, (jl_value_t*)mi->def.method->sig, &tpenv2); jl_method_instance_t *li2 = jl_specializations_get_linfo(mi->def.method, (jl_value_t*)types2, tpenv2); JL_GC_POP(); + li2->precompiled = 1; if (jl_rettype_inferred(li2, world, world) == jl_nothing) (void)jl_type_infer(li2, world, 1); if (jl_typeinf_func && mi->def.method->primary_world <= tworld) { diff --git a/src/jltypes.c b/src/jltypes.c index cb9141dd50fd4..a24e9f7e488bd 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -2466,7 +2466,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_method_instance_type = jl_new_datatype(jl_symbol("MethodInstance"), core, jl_any_type, jl_emptysvec, - jl_perm_symsvec(8, + jl_perm_symsvec(9, "def", "specTypes", "sparam_vals", @@ -2474,8 +2474,9 @@ void jl_init_types(void) JL_GC_DISABLED "backedges", "callbacks", "cache", - "inInference"), - jl_svec(8, + "inInference", + "precompiled"), + jl_svec(9, jl_new_struct(jl_uniontype_type, jl_method_type, jl_module_type), jl_any_type, jl_simplevector_type, @@ -2483,6 +2484,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_any_type, jl_any_type, jl_any_type, + jl_bool_type, jl_bool_type), jl_emptysvec, 0, 1, 3); diff --git a/src/julia.h b/src/julia.h index d726162b88213..afb0d00630dbd 100644 --- a/src/julia.h +++ b/src/julia.h @@ -361,6 +361,7 @@ struct _jl_method_instance_t { jl_array_t *callbacks; // list of callback functions to inform external caches about invalidations _Atomic(struct _jl_code_instance_t*) cache; uint8_t inInference; // flags to tell if inference is running on this object + uint8_t precompiled; // true if this instance was generated by an explicit `precompile(...)` call }; // OpaqueClosure @@ -1723,6 +1724,7 @@ JL_DLLEXPORT ios_t *jl_create_system_image(void *); JL_DLLEXPORT void jl_save_system_image(const char *fname); JL_DLLEXPORT void jl_restore_system_image(const char *fname); JL_DLLEXPORT void jl_restore_system_image_data(const char *buf, size_t len); +JL_DLLEXPORT void jl_set_newly_inferred(jl_value_t *newly_inferred); JL_DLLEXPORT int jl_save_incremental(const char *fname, jl_array_t *worklist); JL_DLLEXPORT jl_value_t *jl_restore_incremental(const char *fname, jl_array_t *depmods); JL_DLLEXPORT jl_value_t *jl_restore_incremental_from_buf(const char *buf, size_t sz, jl_array_t *depmods); diff --git a/src/julia_internal.h b/src/julia_internal.h index 0fe527e2041a8..e55854121393b 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -223,6 +223,7 @@ extern tracer_cb jl_newmeth_tracer; void jl_call_tracer(tracer_cb callback, jl_value_t *tracee); void print_func_loc(JL_STREAM *s, jl_method_t *m); extern jl_array_t *_jl_debug_method_invalidation JL_GLOBALLY_ROOTED; +void invalidate_backedges(void (*f)(jl_code_instance_t*), jl_method_instance_t *replaced_mi, size_t max_world, const char *why); extern JL_DLLEXPORT size_t jl_page_size; extern jl_function_t *jl_typeinf_func; @@ -536,8 +537,10 @@ void jl_resolve_globals_in_ir(jl_array_t *stmts, jl_module_t *m, jl_svec_t *spar int binding_effects); JL_DLLEXPORT void jl_add_method_root(jl_method_t *m, jl_module_t *mod, jl_value_t* root); +void jl_append_method_roots(jl_method_t *m, uint64_t modid, jl_array_t* roots); int get_root_reference(rle_reference *rr, jl_method_t *m, size_t i); jl_value_t *lookup_root(jl_method_t *m, uint64_t key, int index); +int nroots_with_key(jl_method_t *m, uint64_t key); int jl_valid_type_param(jl_value_t *v); @@ -866,6 +869,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_method_compiled(jl_method_instance_t *mi JL_ JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt, jl_value_t *type, size_t world); JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo( jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams); +jl_method_instance_t *jl_specializations_get_or_insert(jl_method_instance_t *mi_ins); JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_method_instance_t *caller); JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_value_t *caller); diff --git a/src/method.c b/src/method.c index f71bb8803caf0..7325670bd76a4 100644 --- a/src/method.c +++ b/src/method.c @@ -439,6 +439,7 @@ JL_DLLEXPORT jl_method_instance_t *jl_new_method_instance_uninit(void) li->callbacks = NULL; jl_atomic_store_relaxed(&li->cache, NULL); li->inInference = 0; + li->precompiled = 0; return li; } @@ -1033,15 +1034,8 @@ static void add_root_block(jl_array_t *root_blocks, uint64_t modid, size_t len) blocks[nx2-1] = len; } -JL_DLLEXPORT void jl_add_method_root(jl_method_t *m, jl_module_t *mod, jl_value_t* root) +static void prepare_method_for_roots(jl_method_t *m, uint64_t modid) { - JL_GC_PUSH2(&m, &root); - uint64_t modid = 0; - if (mod) { - assert(jl_is_module(mod)); - modid = mod->build_id; - } - assert(jl_is_method(m)); if (!m->roots) { m->roots = jl_alloc_vec_any(0); jl_gc_wb(m, m->roots); @@ -1050,12 +1044,35 @@ JL_DLLEXPORT void jl_add_method_root(jl_method_t *m, jl_module_t *mod, jl_value_ m->root_blocks = jl_alloc_array_1d(jl_array_uint64_type, 0); jl_gc_wb(m, m->root_blocks); } +} + +JL_DLLEXPORT void jl_add_method_root(jl_method_t *m, jl_module_t *mod, jl_value_t* root) +{ + JL_GC_PUSH2(&m, &root); + uint64_t modid = 0; + if (mod) { + assert(jl_is_module(mod)); + modid = mod->build_id; + } + assert(jl_is_method(m)); + prepare_method_for_roots(m, modid); if (current_root_id(m->root_blocks) != modid) add_root_block(m->root_blocks, modid, jl_array_len(m->roots)); jl_array_ptr_1d_push(m->roots, root); JL_GC_POP(); } +void jl_append_method_roots(jl_method_t *m, uint64_t modid, jl_array_t* roots) +{ + JL_GC_PUSH2(&m, &roots); + assert(jl_is_method(m)); + assert(jl_is_array(roots)); + prepare_method_for_roots(m, modid); + add_root_block(m->root_blocks, modid, jl_array_len(m->roots)); + jl_array_ptr_1d_append(m->roots, roots); + JL_GC_POP(); +} + // given the absolute index i of a root, retrieve its relocatable reference // returns 1 if the root is relocatable int get_root_reference(rle_reference *rr, jl_method_t *m, size_t i) @@ -1084,6 +1101,23 @@ jl_value_t *lookup_root(jl_method_t *m, uint64_t key, int index) return jl_array_ptr_ref(m->roots, i); } +int nroots_with_key(jl_method_t *m, uint64_t key) +{ + size_t nroots = 0; + if (m->roots) + nroots = jl_array_len(m->roots); + if (!m->root_blocks) + return key == 0 ? nroots : 0; + uint64_t *rletable = (uint64_t*)jl_array_data(m->root_blocks); + size_t j, nblocks2 = jl_array_len(m->root_blocks); + int nwithkey = 0; + for (j = 0; j < nblocks2; j+=2) { + if (rletable[j] == key) + nwithkey += (j+3 < nblocks2 ? rletable[j+3] : nroots) - rletable[j+1]; + } + return nwithkey; +} + #ifdef __cplusplus } #endif diff --git a/stdlib/Artifacts/src/Artifacts.jl b/stdlib/Artifacts/src/Artifacts.jl index 645e77944208b..6d3bdb5fb674b 100644 --- a/stdlib/Artifacts/src/Artifacts.jl +++ b/stdlib/Artifacts/src/Artifacts.jl @@ -272,17 +272,17 @@ function unpack_platform(entry::Dict{String,Any}, name::String, end # Collect all String-valued mappings in `entry` and use them as tags - tags = Dict{Symbol, String}() + tags = Dict{String, String}() for (k, v) in entry if v isa String - tags[Symbol(k)] = v + tags[k] = v end end # Removing some known entries that shouldn't be passed through `tags` - delete!(tags, :os) - delete!(tags, :arch) - delete!(tags, Symbol("git-tree-sha1")) - return Platform(entry["arch"], entry["os"]; tags...) + delete!(tags, "os") + delete!(tags, "arch") + delete!(tags, "git-tree-sha1") + return Platform(entry["arch"], entry["os"], tags) end function pack_platform!(meta::Dict, p::AbstractPlatform) @@ -718,4 +718,9 @@ split_artifact_slash(name::AbstractString) = artifact_slash_lookup(name::AbstractString, artifact_dict::Dict, artifacts_toml::AbstractString) = artifact_slash_lookup(String(name)::String, artifact_dict, String(artifacts_toml)::String) +# Precompilation to reduce latency +precompile(load_artifacts_toml, (String,)) +precompile(NamedTuple{(:pkg_uuid,)}, (Tuple{Base.UUID},)) +precompile(Core.kwfunc(load_artifacts_toml), (NamedTuple{(:pkg_uuid,), Tuple{Base.UUID}}, typeof(load_artifacts_toml), String)) + end # module Artifacts diff --git a/test/precompile.jl b/test/precompile.jl index 411267705622d..2072d6591a181 100644 --- a/test/precompile.jl +++ b/test/precompile.jl @@ -83,7 +83,6 @@ function group_roots(iter::RLEIterator) return rootsby end - precompile_test_harness("basic precompile functionality") do dir2 precompile_test_harness(false) do dir Foo_file = joinpath(dir, "$Foo_module.jl") @@ -585,11 +584,11 @@ precompile_test_harness(false) do dir end end -# method root provenance -# setindex!(::Dict{K,V}, ::Any, ::K) adds both compression and codegen roots +# method root provenance & external code caching precompile_test_harness("code caching") do dir Bid = rootid(Base) Cache_module = :Cacheb8321416e8a3e2f1 + # Note: calling setindex!(::Dict{K,V}, ::Any, ::K) adds both compression and codegen roots write(joinpath(dir, "$Cache_module.jl"), """ module $Cache_module @@ -619,33 +618,59 @@ precompile_test_harness("code caching") do dir Base.compilecache(Base.PkgId(string(Cache_module))) @eval using $Cache_module M = getfield(@__MODULE__, Cache_module) + # Test that this cache file "owns" all the roots Mid = rootid(M) for name in (:f, :fpush, :callboth) func = getfield(M, name) m = only(collect(methods(func))) @test all(i -> root_provenance(m, i) == Mid, 1:length(m.roots)) end + # Check that we can cache external CodeInstances: + # size(::Vector) has an inferred specialization for Vector{X} + msize = which(size, (Vector{<:Any},)) + hasspec = false + for i = 1:length(msize.specializations) + if isassigned(msize.specializations, i) + mi = msize.specializations[i] + if isa(mi, Core.MethodInstance) + tt = Base.unwrap_unionall(mi.specTypes) + if tt.parameters[2] == Vector{Cacheb8321416e8a3e2f1.X} + if isdefined(mi, :cache) && isa(mi.cache, Core.CodeInstance) && mi.cache.max_world == typemax(UInt) && mi.cache.inferred !== nothing + hasspec = true + break + end + end + end + end + end + @test hasspec + # Test that compilation adds to method roots with appropriate provenance m = which(setindex!, (Dict{M.X,Any}, Any, M.X)) - @test_broken M.X ∈ m.roots # requires caching external compilation results + @test M.X ∈ m.roots + # Check that roots added outside of incremental builds get attributed to a moduleid of 0 Base.invokelatest() do Dict{M.X2,Any}()[M.X2()] = nothing end @test M.X2 ∈ m.roots groups = group_roots(m) - @test_broken M.X ∈ groups[Mid] # requires caching external compilation results - @test M.X2 ∈ groups[rootid(@__MODULE__)] + @test M.X ∈ groups[Mid] # attributed to M + @test M.X2 ∈ groups[0] # activate module is not known @test !isempty(groups[Bid]) + # Check that internal methods and their roots are accounted appropriately minternal = which(M.getelsize, (Vector,)) mi = minternal.specializations[1] + @test Base.unwrap_unionall(mi.specTypes).parameters[2] == Vector{Int32} ci = mi.cache @test ci.relocatability == 1 + @test ci.inferred !== nothing + # ...and that we can add "untracked" roots & non-relocatable CodeInstances to them too Base.invokelatest() do M.getelsize(M.X2[]) end mi = minternal.specializations[2] ci = mi.cache @test ci.relocatability == 0 - # PkgA loads PkgB, and both add roots to the same method (both before and after loading B) + # PkgA loads PkgB, and both add roots to the same `push!` method (both before and after loading B) Cache_module2 = :Cachea1544c83560f0c99 write(joinpath(dir, "$Cache_module2.jl"), """ @@ -677,11 +702,175 @@ precompile_test_harness("code caching") do dir end mT = which(push!, (Vector{T} where T, Any)) groups = group_roots(mT) - # all below require caching external CodeInstances - @test_broken M2.Y ∈ groups[M2id] - @test_broken M2.Z ∈ groups[M2id] - @test_broken M.X ∈ groups[Mid] - @test_broken M.X ∉ groups[M2id] + @test M2.Y ∈ groups[M2id] + @test M2.Z ∈ groups[M2id] + @test M.X ∈ groups[Mid] + @test M.X ∉ groups[M2id] + # backedges of external MethodInstances + # Root gets used by RootA and RootB, and both consumers end up inferring the same MethodInstance from Root + # Do both callers get listed as backedges? + RootModule = :Root_0xab07d60518763a7e + write(joinpath(dir, "$RootModule.jl"), + """ + module $RootModule + function f(x) + while x < 10 + x += oftype(x, 1) + end + return x + end + g1() = f(Int16(9)) + g2() = f(Int16(9)) + # all deliberately uncompiled + end + """) + RootA = :RootA_0xab07d60518763a7e + write(joinpath(dir, "$RootA.jl"), + """ + module $RootA + using $RootModule + fA() = $RootModule.f(Int8(4)) + fA() + $RootModule.g1() + end + """) + RootB = :RootB_0xab07d60518763a7e + write(joinpath(dir, "$RootB.jl"), + """ + module $RootB + using $RootModule + fB() = $RootModule.f(Int8(4)) + fB() + $RootModule.g2() + end + """) + Base.compilecache(Base.PkgId(string(RootA))) + Base.compilecache(Base.PkgId(string(RootB))) + @eval using $RootA + @eval using $RootB + MA = getfield(@__MODULE__, RootA) + MB = getfield(@__MODULE__, RootB) + M = getfield(MA, RootModule) + m = which(M.f, (Any,)) + for mi in m.specializations + mi === nothing && continue + if mi.specTypes.parameters[2] === Int8 + # external callers + mods = Module[] + for be in mi.backedges + push!(mods, be.def.module) + end + @test MA ∈ mods + @test MB ∈ mods + @test length(mods) == 2 + elseif mi.specTypes.parameters[2] === Int16 + # internal callers + meths = Method[] + for be in mi.backedges + push!(meths, be.def) + end + @test which(M.g1, ()) ∈ meths + @test which(M.g2, ()) ∈ meths + @test length(meths) == 2 + end + end + + # Invalidations (this test is adapted from from SnoopCompile) + function hasvalid(mi, world) + isdefined(mi, :cache) || return false + ci = mi.cache + while true + ci.max_world >= world && return true + isdefined(ci, :next) || return false + ci = ci.next + end + end + + StaleA = :StaleA_0xab07d60518763a7e + StaleB = :StaleB_0xab07d60518763a7e + StaleC = :StaleC_0xab07d60518763a7e + write(joinpath(dir, "$StaleA.jl"), + """ + module $StaleA + + stale(x) = rand(1:8) + stale(x::Int) = length(digits(x)) + + not_stale(x::String) = first(x) + + use_stale(c) = stale(c[1]) + not_stale("hello") + build_stale(x) = use_stale(Any[x]) + + # force precompilation + build_stale(37) + stale('c') + + end + """ + ) + write(joinpath(dir, "$StaleB.jl"), + """ + module $StaleB + + # StaleB does not know about StaleC when it is being built. + # However, if StaleC is loaded first, we get `"jl_insert_method_instance"` + # invalidations. + using $StaleA + + # This will be invalidated if StaleC is loaded + useA() = $StaleA.stale("hello") + + # force precompilation + useA() + + end + """ + ) + write(joinpath(dir, "$StaleC.jl"), + """ + module $StaleC + + using $StaleA + + $StaleA.stale(x::String) = length(x) + call_buildstale(x) = $StaleA.build_stale(x) + + call_buildstale("hey") + + end # module + """ + ) + for pkg in (StaleA, StaleB, StaleC) + Base.compilecache(Base.PkgId(string(pkg))) + end + @eval using $StaleA + @eval using $StaleC + @eval using $StaleB + MA = getfield(@__MODULE__, StaleA) + MB = getfield(@__MODULE__, StaleB) + MC = getfield(@__MODULE__, StaleC) + world = Base.get_world_counter() + m = only(methods(MA.use_stale)) + mi = m.specializations[1] + @test hasvalid(mi, world) # was re-inferred by StaleC + m = only(methods(MA.build_stale)) + mis = filter(!isnothing, collect(m.specializations)) + @test length(mis) == 2 + for mi in mis + if mi.specTypes.parameters[2] == Int + @test mi.cache.max_world < world + else + # The variant for String got "healed" by recompilation in StaleC + @test mi.specTypes.parameters[2] == String + @test mi.cache.max_world == typemax(UInt) + end + end + m = only(methods(MB.useA)) + mi = m.specializations[1] + @test !hasvalid(mi, world) # invalidated by the stale(x::String) method in StaleC + m = only(methods(MC.call_buildstale)) + mi = m.specializations[1] + @test hasvalid(mi, world) # was compiled with the new method end # test --compiled-modules=no command line option