diff --git a/src/dump.c b/src/dump.c index c8fe2b731920a..26fdf6e238c55 100644 --- a/src/dump.c +++ b/src/dump.c @@ -396,10 +396,11 @@ static int has_backedge_to_worklist(jl_method_instance_t *mi, htable_t *visited, // build, select those that are external and have at least one // relocatable CodeInstance and are inferred to be called from the worklist // or explicitly added by a precompile statement. -static size_t queue_external_mis(jl_array_t *list) +// Also prepares external_mis for method_instance_in_queue queries. +static jl_array_t *queue_external_mis(jl_array_t *list) { if (list == NULL) - return 0; + return NULL; size_t i, n = 0; htable_t visited; assert(jl_is_array(list)); @@ -412,17 +413,16 @@ static size_t queue_external_mis(jl_array_t *list) jl_method_t *m = mi->def.method; if (!module_in_worklist(m->module)) { jl_code_instance_t *ci = mi->cache; - int relocatable = 0; while (ci) { - if (ci->max_world == ~(size_t)0) - relocatable |= ci->relocatability; - ci = ci->next; + if (ci->max_world == ~(size_t)0 && ci->relocatability && ci->inferred) + break; + ci = jl_atomic_load_relaxed(&ci->next); } - if (relocatable && ptrhash_get(&external_mis, mi) == HT_NOTFOUND) { + if (ci && ptrhash_get(&external_mis, mi) == HT_NOTFOUND) { int found = has_backedge_to_worklist(mi, &visited, 1); assert(found == 0 || found == 1); if (found == 1) { - ptrhash_put(&external_mis, mi, mi); + ptrhash_put(&external_mis, mi, ci); n++; } } @@ -430,7 +430,18 @@ static size_t queue_external_mis(jl_array_t *list) } } htable_free(&visited); - return n; + if (n == 0) + return NULL; + jl_array_t *mi_list = jl_alloc_vec_any(n); + n = 0; + for (size_t i = 0; i < external_mis.size; i += 2) { + void *ci = external_mis.table[i+1]; + if (ci != HT_NOTFOUND) { + jl_array_ptr_set(mi_list, n++, (jl_value_t*)ci); + } + } + assert(n == jl_array_len(mi_list)); + return mi_list; } static void jl_serialize_datatype(jl_serializer_state *s, jl_datatype_t *dt) JL_GC_DISABLED @@ -698,19 +709,16 @@ static int jl_serialize_generic(jl_serializer_state *s, jl_value_t *v) JL_GC_DIS return 0; } -static void jl_serialize_code_instance(jl_serializer_state *s, jl_code_instance_t *codeinst, int skip_partial_opaque, int internal) JL_GC_DISABLED +static void jl_serialize_code_instance(jl_serializer_state *s, jl_code_instance_t *codeinst, int skip_partial_opaque, int oc_hack) JL_GC_DISABLED { - if (internal > 2) { - while (codeinst && !codeinst->relocatability) - codeinst = codeinst->next; - } - if (jl_serialize_generic(s, (jl_value_t*)codeinst)) { + if (oc_hack && jl_serialize_generic(s, (jl_value_t*)codeinst)) { return; } assert(codeinst != NULL); // handle by jl_serialize_generic, but this makes clang-sa happy int validate = 0; - if (codeinst->max_world == ~(size_t)0) + if (codeinst->max_world == ~(size_t)0 && codeinst->inferred) + // TODO: also check if this object is part of the codeinst cache and in edges_map validate = 1; // can check on deserialize if this cache entry is still valid int flags = validate << 0; if (codeinst->invoke == jl_fptr_const_return) @@ -725,7 +733,7 @@ static void jl_serialize_code_instance(jl_serializer_state *s, jl_code_instance_ if (write_ret_type && codeinst->rettype_const && jl_typeis(codeinst->rettype_const, jl_partial_opaque_type)) { if (skip_partial_opaque) { - jl_serialize_code_instance(s, codeinst->next, skip_partial_opaque, internal); + jl_serialize_code_instance(s, codeinst->next, skip_partial_opaque, 1); return; } else { @@ -752,7 +760,7 @@ static void jl_serialize_code_instance(jl_serializer_state *s, jl_code_instance_ jl_serialize_value(s, jl_nothing); } write_uint8(s->s, codeinst->relocatability); - jl_serialize_code_instance(s, codeinst->next, skip_partial_opaque, internal); + jl_serialize_code_instance(s, codeinst->next, skip_partial_opaque, 1); } enum METHOD_SERIALIZATION_MODE { @@ -974,8 +982,6 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li internal = 1; else if (module_in_worklist(mi->def.method->module)) internal = 2; - else if (ptrhash_get(&external_mis, (void*)mi) != HT_NOTFOUND) - internal = 3; write_uint8(s->s, internal); if (!internal) { // also flag this in the backref table as special @@ -1013,10 +1019,10 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li } jl_serialize_value(s, (jl_value_t*)backedges); jl_serialize_value(s, (jl_value_t*)NULL); //callbacks - jl_serialize_code_instance(s, mi->cache, 1, internal); + jl_serialize_code_instance(s, mi->cache, 1, 1); } else if (jl_is_code_instance(v)) { - jl_serialize_code_instance(s, (jl_code_instance_t*)v, 0, 2); + jl_serialize_code_instance(s, (jl_code_instance_t*)v, 0, 0); } else if (jl_typeis(v, jl_module_type)) { jl_serialize_module(s, (jl_module_t*)v); @@ -1186,26 +1192,10 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li } } -// Used to serialize the external method instances queued in queued_method_roots (from newly_inferred) -static void serialize_htable_keys(jl_serializer_state *s, htable_t *ht, int nitems) -{ - write_int32(s->s, nitems); - void **table = ht->table; - size_t i, n = 0, sz = ht->size; - (void)n; - for (i = 0; i < sz; i += 2) { - if (table[i+1] != HT_NOTFOUND) { - jl_serialize_value(s, (jl_value_t*)table[i]); - n += 1; - } - } - assert(n == nitems); -} // Create the forward-edge map (caller => callees) // the intent of these functions is to invert the backedges tree // for anything that points to a method not part of the worklist -// or method instances not in the queue // // from MethodTables static void jl_collect_missing_backedges(jl_methtable_t *mt) @@ -1219,9 +1209,8 @@ static void jl_collect_missing_backedges(jl_methtable_t *mt) jl_array_t **edges = (jl_array_t**)ptrhash_bp(&edges_map, (void*)caller); if (*edges == HT_NOTFOUND) *edges = jl_alloc_vec_any(0); - // To stay synchronized with the format from MethodInstances (specifically for `invoke`d calls), - // we have to push a pair of values. But in this case the callee is unknown, so we leave it NULL. - push_edge(*edges, missing_callee, NULL); + jl_array_ptr_1d_push(*edges, NULL); + jl_array_ptr_1d_push(*edges, missing_callee); } } } @@ -1239,7 +1228,8 @@ static void collect_backedges(jl_method_instance_t *callee) JL_GC_DISABLED jl_array_t **edges = (jl_array_t**)ptrhash_bp(&edges_map, caller); if (*edges == HT_NOTFOUND) *edges = jl_alloc_vec_any(0); - push_edge(*edges, invokeTypes, callee); + jl_array_ptr_1d_push(*edges, (jl_value_t*)invokeTypes); + jl_array_ptr_1d_push(*edges, (jl_value_t*)callee); } } } @@ -1323,52 +1313,36 @@ static void jl_collect_extext_methods_from_mod(jl_array_t *s, jl_module_t *m) JL } } -static void register_backedge(htable_t *all_callees, jl_value_t *invokeTypes, jl_value_t *c) +static void jl_record_edges(jl_method_instance_t *caller, arraylist_t *wq, jl_array_t *edges) JL_GC_DISABLED { - if (invokeTypes) - ptrhash_put(all_callees, invokeTypes, c); - else - ptrhash_put(all_callees, c, c); - -} - -// flatten the backedge map reachable from caller into callees -static void jl_collect_backedges_to(jl_method_instance_t *caller, htable_t *all_callees) JL_GC_DISABLED -{ - if (module_in_worklist(caller->def.method->module) || method_instance_in_queue(caller)) - return; - if (ptrhash_has(&edges_map, caller)) { - jl_array_t **pcallees = (jl_array_t**)ptrhash_bp(&edges_map, (void*)caller), - *callees = *pcallees; - assert(callees != HT_NOTFOUND); - *pcallees = (jl_array_t*) HT_NOTFOUND; - size_t i = 0, l = jl_array_len(callees); - jl_method_instance_t *c; - jl_value_t *invokeTypes; - while (i < l) { - i = get_next_edge(callees, i, &invokeTypes, &c); - register_backedge(all_callees, invokeTypes, (jl_value_t*)c); + jl_array_t *callees = (jl_array_t*)ptrhash_get(&edges_map, (void*)caller); + if (callees != HT_NOTFOUND) { + ptrhash_remove(&edges_map, (void*)caller); + jl_array_ptr_1d_push(edges, (jl_value_t*)caller); + jl_array_ptr_1d_push(edges, (jl_value_t*)callees); + size_t i, l = jl_array_len(callees); + for (i = 1; i < l; i += 2) { + jl_method_instance_t *c = (jl_method_instance_t*)jl_array_ptr_ref(callees, i); if (c && jl_is_method_instance(c)) { - jl_collect_backedges_to((jl_method_instance_t*)c, all_callees); + assert(!module_in_worklist(c->def.method->module)); // these are preserved already as backedges if usable and applicable + arraylist_push(wq, c); } } } } + // Extract `edges` and `ext_targets` from `edges_map` -// This identifies internal->external edges in the call graph, pulling them out for special treatment. -static void jl_collect_backedges(jl_array_t *edges, jl_array_t *ext_targets) -{ - htable_t all_targets; // target => tgtindex mapping - htable_t all_callees; // MIs called by worklist methods (eff. Set{MethodInstance}) - htable_new(&all_targets, 0); - htable_new(&all_callees, 0); - jl_value_t *invokeTypes; - jl_method_instance_t *c; - size_t i; +// `edges` = [caller1, targets_indexes1, ...], the list of methods and their edges +// `ext_targets` is [invokesig1, callee1, matches1, ...], the edges for each target +static void jl_collect_edges(jl_array_t *edges, jl_array_t *ext_targets) +{ + size_t world = jl_atomic_load_acquire(&jl_world_counter); + arraylist_t wq; + arraylist_new(&wq, 0); void **table = edges_map.table; // edges is caller => callees size_t table_size = edges_map.size; - for (i = 0; i < table_size; i += 2) { + for (size_t i = 0; i < table_size; i += 2) { assert(table == edges_map.table && table_size == edges_map.size && "edges_map changed during iteration"); jl_method_instance_t *caller = (jl_method_instance_t*)table[i]; @@ -1376,67 +1350,115 @@ static void jl_collect_backedges(jl_array_t *edges, jl_array_t *ext_targets) if (callees == HT_NOTFOUND) continue; assert(jl_is_method_instance(caller) && jl_is_method(caller->def.method)); - if (module_in_worklist(caller->def.method->module) || method_instance_in_queue(caller)) { - size_t i = 0, l = jl_array_len(callees); - while (i < l) { - i = get_next_edge(callees, i, &invokeTypes, &c); - register_backedge(&all_callees, invokeTypes, (jl_value_t*)c); - if (c && jl_is_method_instance(c)) { - jl_collect_backedges_to((jl_method_instance_t*)c, &all_callees); - } - } - callees = jl_alloc_array_1d(jl_array_int32_type, 0); - void **pc = all_callees.table; - size_t j; - int valid = 1; - int mode; - for (j = 0; valid && j < all_callees.size; j += 2) { - if (pc[j + 1] != HT_NOTFOUND) { - jl_value_t *callee = (jl_value_t*)pc[j]; - void *target = ptrhash_get(&all_targets, (void*)callee); - if (target == HT_NOTFOUND) { - jl_value_t *sig; - if (jl_is_method_instance(callee)) { - sig = ((jl_method_instance_t*)callee)->specTypes; - mode = 1; - } - else { - sig = callee; - callee = (jl_value_t*)pc[j+1]; - mode = 2; - } - size_t min_valid = 0; - size_t max_valid = ~(size_t)0; - int ambig = 0; - jl_value_t *matches = jl_matching_methods((jl_tupletype_t*)sig, jl_nothing, -1, 0, jl_atomic_load_acquire(&jl_world_counter), &min_valid, &max_valid, &ambig); - if (matches == jl_false) { - valid = 0; - break; - } - size_t k; - for (k = 0; k < jl_array_len(matches); k++) { - jl_method_match_t *match = (jl_method_match_t *)jl_array_ptr_ref(matches, k); - jl_array_ptr_set(matches, k, match->method); + if (module_in_worklist(caller->def.method->module) || + method_instance_in_queue(caller)) { + jl_record_edges(caller, &wq, edges); + } + } + for (size_t i = 0; i < wq.len; i++) { + jl_method_instance_t *caller = (jl_method_instance_t*)wq.items[i]; + jl_record_edges(caller, &wq, edges); + } + arraylist_free(&wq); + htable_reset(&edges_map, 0); + size_t l = jl_array_len(edges); + // process target list to turn it into a memoized validity table + // and compute the old methods list, ready for serialization + for (size_t i = 0; i < l; i += 2) { + jl_array_t *callees = (jl_array_t*)jl_array_ptr_ref(edges, i + 1); + size_t l = jl_array_len(callees); + jl_array_t *callee_ids = jl_alloc_array_1d(jl_array_int32_type, l + 1); + int32_t *idxs = (int32_t*)jl_array_data(callee_ids); + idxs[0] = l / 2; + int valid = 1; + for (size_t j = 0; j < l; j += 2) { + jl_value_t *invokeTypes = jl_array_ptr_ref(callees, j); + jl_value_t *callee = jl_array_ptr_ref(callees, j + 1); + // (nullptr, c) => call + // (invokeTypes, c) => invoke + // (nullptr, invokeTypes) => missing call + // (invokeTypes, nullptr) => missing invoke (unused--inferred as Any) + void *target = ptrhash_get(&edges_map, invokeTypes ? (void*)invokeTypes : (void*)callee); + if (target == HT_NOTFOUND) { + jl_value_t *matches; + size_t min_valid = 0; + size_t max_valid = ~(size_t)0; + if (invokeTypes) { + assert(callee && "unsupported edge"); + jl_methtable_t *mt = jl_method_get_table(((jl_method_instance_t*)callee)->def.method); + if ((jl_value_t*)mt == jl_nothing) { + valid = 0; + break; + } + else { + matches = jl_gf_invoke_lookup_worlds(invokeTypes, (jl_value_t*)mt, world, &min_valid, &max_valid); + if (matches == jl_nothing) { + valid = 0; + break; } - jl_array_ptr_1d_push(ext_targets, mode == 1 ? NULL : sig); - jl_array_ptr_1d_push(ext_targets, callee); - jl_array_ptr_1d_push(ext_targets, matches); - target = (char*)HT_NOTFOUND + jl_array_len(ext_targets) / 3; - ptrhash_put(&all_targets, (void*)callee, target); + matches = (jl_value_t*)((jl_method_match_t*)matches)->method; + } + } + else { + jl_value_t *sig; + if (jl_is_method_instance(callee)) + sig = ((jl_method_instance_t*)callee)->specTypes; + else + sig = callee; + int ambig = 0; + matches = jl_matching_methods((jl_tupletype_t*)sig, jl_nothing, + -1, 0, world, &min_valid, &max_valid, &ambig); + if (matches == jl_false) { + valid = 0; + break; + } + size_t k; + for (k = 0; k < jl_array_len(matches); k++) { + jl_method_match_t *match = (jl_method_match_t *)jl_array_ptr_ref(matches, k); + jl_array_ptr_set(matches, k, match->method); } - jl_array_grow_end(callees, 1); - ((int32_t*)jl_array_data(callees))[jl_array_len(callees) - 1] = (char*)target - (char*)HT_NOTFOUND - 1; } + jl_array_ptr_1d_push(ext_targets, invokeTypes); + jl_array_ptr_1d_push(ext_targets, callee); + jl_array_ptr_1d_push(ext_targets, matches); + target = (void*)((char*)HT_NOTFOUND + jl_array_len(ext_targets) / 3); + ptrhash_put(&edges_map, (void*)callee, target); } - htable_reset(&all_callees, 100); - if (valid) { - jl_array_ptr_1d_push(edges, (jl_value_t*)caller); - jl_array_ptr_1d_push(edges, (jl_value_t*)callees); + idxs[j / 2 + 1] = (char*)target - (char*)HT_NOTFOUND - 1; + } + if (valid) + jl_array_ptr_set(edges, i + 1, callee_ids); // swap callees for ids + else + jl_array_ptr_set(edges, i + 1, NULL); + } + // record place of every method in edges + htable_reset(&edges_map, 0); + for (size_t i = 0; i < l; i += 2) { + jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, i); + void *target = (void*)((char*)HT_NOTFOUND + i / 2 + 1); + ptrhash_put(&edges_map, (void*)caller, target); + } + // add method edges to the callee_id list + for (size_t i = 0; i < l; i += 2) { + jl_array_t *callee_ids = (jl_array_t*)jl_array_ptr_ref(edges, i + 1); + if (!callee_ids) + continue; // invalid edges + int32_t *idxs = (int32_t*)jl_array_data(callee_ids); + size_t l = idxs[0]; + size_t nt = l + 1; + for (size_t j = 0; j < l; j++) { + int idx = idxs[j + 1]; + jl_value_t *callee = jl_array_ptr_ref(ext_targets, idx * 3 + 1); + if (callee && jl_is_method_instance(callee)) { + void *target = ptrhash_get(&edges_map, (void*)callee); + if (target != HT_NOTFOUND) { + idxs[nt++] = (char*)target - (char*)HT_NOTFOUND - 1; + } } } + jl_array_del_end(callee_ids, 2 * l + 1 - nt); } - htable_free(&all_targets); - htable_free(&all_callees); + htable_reset(&edges_map, 0); } // serialize information about all loaded modules @@ -2370,327 +2392,331 @@ void remove_code_instance_from_validation(jl_code_instance_t *codeinst) ptrhash_remove(&new_code_instance_validate, codeinst); } -static int do_selective_invoke_backedge_invalidation(jl_methtable_t *mt, jl_value_t *mworld, jl_method_instance_t *mi, size_t world) -{ - jl_value_t *invokeTypes; - jl_method_instance_t *caller; - size_t jins = 0, j0, j = 0, nbe = jl_array_len(mi->backedges); - while (j < nbe) { - j0 = j; - j = get_next_edge(mi->backedges, j, &invokeTypes, &caller); - if (invokeTypes) { - struct jl_typemap_assoc search = {invokeTypes, world, NULL, 0, ~(size_t)0}; - jl_typemap_entry_t *entry = jl_typemap_assoc_by_type(mt->defs, &search, /*offs*/0, /*subtype*/0); - if (entry) { - jl_value_t *imworld = entry->func.value; - if (jl_is_method(imworld) && mi->def.method == (jl_method_t*)imworld) { - // this one is OK - // in case we deleted some earlier ones, move this earlier - for (; j0 < j; jins++, j0++) { - jl_array_ptr_set(mi->backedges, jins, jl_array_ptr_ref(mi->backedges, j0)); - } - continue; - } - } - } - invalidate_backedges(&remove_code_instance_from_validation, caller, world, "jl_insert_method_instance caller"); - // The codeinst of this mi haven't yet been removed - jl_code_instance_t *codeinst = caller->cache; - while (codeinst) { - remove_code_instance_from_validation(codeinst); - codeinst = codeinst->next; - } - } - jl_array_del_end(mi->backedges, j - jins); - if (jins == 0) { - return 0; - } - return 1; -} - -static void jl_insert_method_instances(jl_array_t *list) JL_GC_DISABLED +// verify that these edges intersect with the same methods as before +static jl_array_t *jl_verify_edges(jl_array_t *targets) { - size_t i, l = jl_array_len(list); - // Validate the MethodInstances + size_t world = jl_atomic_load_acquire(&jl_world_counter); + size_t i, l = jl_array_len(targets) / 3; jl_array_t *valids = jl_alloc_array_1d(jl_array_uint8_type, l); memset(jl_array_data(valids), 1, l); - size_t world = jl_atomic_load_acquire(&jl_world_counter); + JL_GC_PUSH1(&valids); for (i = 0; i < l; i++) { - jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(list, i); + jl_value_t *invokesig = jl_array_ptr_ref(targets, i * 3); + jl_value_t *callee = jl_array_ptr_ref(targets, i * 3 + 1); + jl_value_t *expected = jl_array_ptr_ref(targets, i * 3 + 2); int valid = 1; - assert(jl_is_method_instance(mi)); - if (jl_is_method(mi->def.method)) { - jl_method_t *m = mi->def.method; - if (m->deleted_world != ~(size_t)0) { - // The method we depended on has been deleted, invalidate + size_t min_valid = 0; + size_t max_valid = ~(size_t)0; + if (invokesig) { + assert(callee && "unsupported edge"); + jl_methtable_t *mt = jl_method_get_table(((jl_method_instance_t*)callee)->def.method); + if ((jl_value_t*)mt == jl_nothing) { valid = 0; - } else { - // Is this still the method we'd be calling? - jl_methtable_t *mt = jl_method_table_for(mi->specTypes); - struct jl_typemap_assoc search = {(jl_value_t*)mi->specTypes, world, NULL, 0, ~(size_t)0}; - jl_typemap_entry_t *entry = jl_typemap_assoc_by_type(mt->defs, &search, /*offs*/0, /*subtype*/0); - if (entry) { - jl_value_t *mworld = entry->func.value; - if (jl_is_method(mworld) && mi->def.method != (jl_method_t*)mworld && jl_type_morespecific(((jl_method_t*)mworld)->sig, mi->def.method->sig)) { - if (!mi->backedges) { - valid = 0; - } else { - // There's still a chance this is valid, if any caller made this via `invoke` and the invoke-signature is still valid. - // Selectively go through all the backedges, invalidating those not made via `invoke` and validating those that are. - if (!do_selective_invoke_backedge_invalidation(mt, mworld, mi, world)) { - m = (jl_method_t*)mworld; - valid = 0; - } - } - } - } + break; } - if (!valid) { - // None of the callers were valid, so invalidate `mi` too - jl_array_uint8_set(valids, i, 0); - invalidate_backedges(&remove_code_instance_from_validation, mi, world, "jl_insert_method_instance"); - jl_code_instance_t *codeinst = mi->cache; - while (codeinst) { - remove_code_instance_from_validation(codeinst); - codeinst = codeinst->next; - } - if (_jl_debug_method_invalidation) { - jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)m); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, jl_cstr_to_string("jl_method_table_insert")); // GC disabled - } + jl_value_t *matches = jl_gf_invoke_lookup_worlds(invokesig, (jl_value_t*)mt, world, &min_valid, &max_valid); + if (matches == jl_nothing) { + valid = 0; + break; + } + matches = (jl_value_t*)((jl_method_match_t*)matches)->method; + if (matches != expected) { + valid = 0; + break; } } - } - // While it's tempting to just remove the invalidated MIs altogether, - // this hurts the ability of SnoopCompile to diagnose problems. - for (i = 0; i < l; i++) { - jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(list, i); - jl_method_instance_t *milive = jl_specializations_get_or_insert(mi); - ptrhash_put(&uniquing_table, mi, milive); // store the association for the 2nd pass - } - // We may need to fix up the backedges for the ones that didn't "go live" - for (i = 0; i < l; i++) { - jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(list, i); - jl_method_instance_t *milive = (jl_method_instance_t*)ptrhash_get(&uniquing_table, mi); - if (milive != mi) { - // A previously-loaded module compiled this method, so the one we deserialized will be dropped. - // But make sure the backedges are copied over. - jl_value_t *invokeTypes; - jl_method_instance_t *be, *belive; - if (mi->backedges) { - if (!milive->backedges) { - // Copy all the backedges (after looking up the live ones) - size_t j = 0, jlive = 0, n = jl_array_len(mi->backedges); - milive->backedges = jl_alloc_vec_any(n); - jl_gc_wb(milive, milive->backedges); - while (j < n) { - j = get_next_edge(mi->backedges, j, &invokeTypes, &be); - belive = (jl_method_instance_t*)ptrhash_get(&uniquing_table, be); - if (belive == HT_NOTFOUND) - belive = be; - jlive = set_next_edge(milive->backedges, jlive, invokeTypes, belive); - } - } else { - // Copy the missing backedges (this is an O(N^2) algorithm, but many methods have few MethodInstances) - size_t j = 0, k, n = jl_array_len(mi->backedges), nlive = jl_array_len(milive->backedges); - jl_value_t *invokeTypes2; - jl_method_instance_t *belive2; - while (j < n) { - j = get_next_edge(mi->backedges, j, &invokeTypes, &be); - belive = (jl_method_instance_t*)ptrhash_get(&uniquing_table, be); - if (belive == HT_NOTFOUND) - belive = be; - int found = 0; - k = 0; - while (k < nlive) { - k = get_next_edge(milive->backedges, k, &invokeTypes2, &belive2); - if (belive == belive2 && ((invokeTypes == NULL && invokeTypes2 == NULL) || - (invokeTypes && invokeTypes2 && jl_egal(invokeTypes, invokeTypes2)))) { - found = 1; - break; - } - } - if (!found) - push_edge(milive->backedges, invokeTypes, belive); - } - } + else { + jl_value_t *sig; + if (jl_is_method_instance(callee)) + sig = ((jl_method_instance_t*)callee)->specTypes; + else + sig = callee; + assert(jl_is_array(expected)); + int ambig = 0; + // TODO: possibly need to included ambiguities too (for the optimizer correctness)? + jl_value_t *matches = jl_matching_methods((jl_tupletype_t*)sig, jl_nothing, + -1, 0, world, &min_valid, &max_valid, &ambig); + if (matches == jl_false || jl_array_len(matches) != jl_array_len(expected)) { + valid = 0; } - // Additionally, if we have CodeInstance(s) and the running CodeInstance is world-limited, transfer it - if (mi->cache && jl_array_uint8_ref(valids, i)) { - if (!milive->cache || milive->cache->max_world < ~(size_t)0) { - jl_code_instance_t *cilive = milive->cache, *ci; - milive->cache = mi->cache; - jl_gc_wb(milive, milive->cache); - ci = mi->cache; - ci->def = milive; - while (ci->next) { - ci = ci->next; - ci->def = milive; + else { + size_t j, k, l = jl_array_len(expected); + for (k = 0; k < jl_array_len(matches); k++) { + jl_method_match_t *match = (jl_method_match_t*)jl_array_ptr_ref(matches, k); + jl_method_t *m = match->method; + for (j = 0; j < l; j++) { + if (m == (jl_method_t*)jl_array_ptr_ref(expected, j)) + break; + } + if (j == l) { + // intersection has a new method or a method was + // deleted--this is now probably no good, just invalidate + // everything about it now + valid = 0; + break; } - ci->next = cilive; - jl_gc_wb(ci, ci->next); } } } + jl_array_uint8_set(valids, i, valid); + //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)invokesig); + //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)callee); + //ios_puts(valid ? "valid\n" : "INVALID\n", ios_stderr); } + JL_GC_POP(); + return valids; } -// verify that these edges intersect with the same methods as before -static void jl_verify_edges(jl_array_t *targets, jl_array_t **pvalids) +// Combine all edges relevant to a method into the visited table +void jl_verify_methods(jl_array_t *edges, jl_array_t *valids, htable_t *visited) JL_NOTSAFEPOINT { - size_t i, l = jl_array_len(targets) / 3; - jl_array_t *valids = jl_alloc_array_1d(jl_array_uint8_type, l); - memset(jl_array_data(valids), 1, l); - jl_value_t *loctag = NULL, *matches = NULL; - JL_GC_PUSH2(&loctag, &matches); - *pvalids = valids; + size_t i, l = jl_array_len(edges) / 2; + htable_new(visited, l); for (i = 0; i < l; i++) { - jl_value_t *invokesig = jl_array_ptr_ref(targets, i * 3); - jl_value_t *callee = jl_array_ptr_ref(targets, i * 3 + 1); - jl_method_instance_t *callee_mi = (jl_method_instance_t*)callee; - jl_value_t *sig; - if (callee && jl_is_method_instance(callee)) { - sig = invokesig == NULL ? callee_mi->specTypes : invokesig; - } - else { - sig = callee == NULL ? invokesig : callee; - } - jl_array_t *expected = (jl_array_t*)jl_array_ptr_ref(targets, i * 3 + 2); - assert(jl_is_array(expected)); + jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, 2 * i); + assert(jl_is_method_instance(caller) && jl_is_method(caller->def.method)); + jl_array_t *callee_ids = (jl_array_t*)jl_array_ptr_ref(edges, 2 * i + 1); + assert(jl_typeis((jl_value_t*)callee_ids, jl_array_int32_type)); int valid = 1; - size_t min_valid = 0; - size_t max_valid = ~(size_t)0; - int ambig = 0; - // TODO: possibly need to included ambiguities too (for the optimizer correctness)? - matches = jl_matching_methods((jl_tupletype_t*)sig, jl_nothing, -1, 0, jl_atomic_load_acquire(&jl_world_counter), &min_valid, &max_valid, &ambig); - if (matches == jl_false || jl_array_len(matches) != jl_array_len(expected)) { + if (callee_ids == NULL) { + // serializing the edges had failed valid = 0; } else { - size_t j, k, l = jl_array_len(expected); - for (k = 0; k < jl_array_len(matches); k++) { - jl_method_match_t *match = (jl_method_match_t*)jl_array_ptr_ref(matches, k); - jl_method_t *m = match->method; - for (j = 0; j < l; j++) { - if (m == (jl_method_t*)jl_array_ptr_ref(expected, j)) - break; - } - if (j == l) { - // intersection has a new method or a method was - // deleted--this is now probably no good, just invalidate - // everything about it now - valid = 0; - break; - } + int32_t *idxs = (int32_t*)jl_array_data(callee_ids); + size_t j; + for (j = 0; valid && j < idxs[0]; j++) { + int32_t idx = idxs[j + 1]; + valid = jl_array_uint8_ref(valids, idx); } } - jl_array_uint8_set(valids, i, valid); - if (!valid && _jl_debug_method_invalidation) { - jl_array_ptr_1d_push(_jl_debug_method_invalidation, callee ? (jl_value_t*)callee : sig); - loctag = jl_cstr_to_string("insert_backedges_callee"); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); - loctag = jl_box_int32((int32_t)i); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); - loctag = jl_box_uint64(jl_worklist_key(serializer_worklist)); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); - if (matches != jl_false) { - // setdiff!(matches, expected) - size_t j, k, ins = 0; - for (j = 0; j < jl_array_len(matches); j++) { - int found = 0; - jl_method_t *match = ((jl_method_match_t*)jl_array_ptr_ref(matches, j))->method; - for (k = 0; !found && k < jl_array_len(expected); k++) - found |= jl_egal((jl_value_t*)match, jl_array_ptr_ref(expected, k)); - if (!found) - jl_array_ptr_set(matches, ins++, match); - } - jl_array_del_end((jl_array_t*)matches, jl_array_len(matches) - ins); - } - jl_array_ptr_1d_push(_jl_debug_method_invalidation, matches); + ptrhash_put(visited, caller, (void*)(((char*)HT_NOTFOUND) + valid + 1)); + //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)caller); + //ios_puts(valid ? "valid\n" : "INVALID\n", ios_stderr); + // HT_NOTFOUND: valid (no invalid edges) + // HT_NOTFOUND + 1: invalid + // HT_NOTFOUND + 2: need to scan + // HT_NOTFOUND + 3 + depth: in-progress + } +} + +// Propagate the result of cycle-resolution to all edges (recursively) +static void mark_edges_in_worklist(jl_array_t *edges, int idx, htable_t *visited, int found) JL_NOTSAFEPOINT +{ + jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, idx); + int oldfound = (char*)ptrhash_get(visited, caller) - (char*)HT_NOTFOUND; + if (oldfound < 3) + return; // not in-progress + if (!found) + ptrhash_remove(visited, (void*)caller); + else + ptrhash_put(visited, (void*)caller, (void*)((char*)HT_NOTFOUND + 1 + found)); + jl_array_t *callee_ids = (jl_array_t*)jl_array_ptr_ref(edges, idx + 1); + assert(jl_typeis((jl_value_t*)callee_ids, jl_array_int32_type)); + int32_t *idxs = (int32_t*)jl_array_data(callee_ids); + size_t i, n = jl_array_len(callee_ids); + for (i = idxs[0] + 1; i < n; i++) { + int32_t idx = idxs[i]; + mark_edges_in_worklist(edges, idx, visited, found); + } +} + + +// Visit the entire call graph, starting from edges[idx] to determine if that method is valid +static int jl_verify_graph_edge(jl_array_t *edges, int idx, htable_t *visited, int depth) JL_NOTSAFEPOINT +{ + jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, idx * 2); + assert(jl_is_method_instance(caller) && jl_is_method(caller->def.method)); + int found = (char*)ptrhash_get(visited, (void*)caller) - (char*)HT_NOTFOUND; + if (found == 0) + return 1; // valid + if (found == 1) + return 0; // invalid + if (found != 2) + return found - 1; // depth + found = 0; + ptrhash_put(visited, (void*)caller, (void*)((char*)HT_NOTFOUND + 3 + depth)); // change 2 to in-progress at depth + jl_array_t *callee_ids = (jl_array_t*)jl_array_ptr_ref(edges, idx * 2 + 1); + assert(jl_typeis((jl_value_t*)callee_ids, jl_array_int32_type)); + int32_t *idxs = (int32_t*)jl_array_data(callee_ids); + int cycle = 0; + size_t i, n = jl_array_len(callee_ids); + for (i = idxs[0] + 1; i < n; i++) { + int32_t idx = idxs[i]; + int child_found = jl_verify_graph_edge(edges, idx, visited, depth + 1); + if (child_found == 0) { + found = 1; + break; + } + else if (child_found >= 2 && child_found - 2 < cycle) { + // record the cycle will resolve at depth "cycle" + cycle = child_found - 2; + assert(cycle); } } - JL_GC_POP(); + if (!found) { + if (cycle && cycle != depth) + return cycle + 2; + ptrhash_remove(visited, (void*)caller); + } + else { // found invalid + ptrhash_put(visited, (void*)caller, (void*)((char*)HT_NOTFOUND + 1 + found)); + } + if (cycle) { + // If we are the top of the current cycle, now mark all other parts of + // our cycle by re-walking the backedges graph and marking all WIP + // items as found. + // Be careful to only re-walk as far as we had originally scanned above. + // Or if we found a backedge, also mark all of the other parts of the + // cycle as also having an backedge. + n = i; + for (i = idxs[0] + 1; i < n; i++) { + int32_t idx = idxs[i]; + mark_edges_in_worklist(edges, idx, visited, found); + } + } + return found ? 0 : 1; +} + +// Visit all entries in edges, verify if they are valid +static jl_array_t *jl_verify_graph(jl_array_t *edges, htable_t *visited) +{ + size_t i, n = jl_array_len(edges) / 2; + jl_array_t *valids = jl_alloc_array_1d(jl_array_uint8_type, n); + int8_t *valids_data = (int8_t*)jl_array_data(valids); + for (i = 0; i < n; i++) { + valids_data[i] = jl_verify_graph_edge(edges, i, visited, 1); + } + return valids; } // Restore backedges to external targets // `edges` = [caller1, targets_indexes1, ...], the list of worklist-owned methods calling external methods. // `ext_targets` is [invokesig1, callee1, matches1, ...], the global set of non-worklist callees of worklist-owned methods. -static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_targets) +static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_targets, jl_array_t *mi_list) { - // foreach(enable, ((edges[2i-1] => ext_targets[edges[2i] .* 3]) for i in 1:length(edges)รท2 if all(valids[edges[2i]]))) - size_t i, l = jl_array_len(edges); + // determine which CodeInstance objects are still valid in our image size_t world = jl_atomic_load_acquire(&jl_world_counter); - jl_array_t *valids = NULL; - jl_value_t *targetidx = NULL; - JL_GC_PUSH2(&valids, &targetidx); - jl_verify_edges(ext_targets, &valids); - for (i = 0; i < l; i += 2) { - jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, i); - assert(jl_is_method_instance(caller) && jl_is_method(caller->def.method)); - jl_array_t *idxs_array = (jl_array_t*)jl_array_ptr_ref(edges, i + 1); - assert(jl_isa((jl_value_t*)idxs_array, jl_array_int32_type)); - int32_t *idxs = (int32_t*)jl_array_data(idxs_array); - int valid = 1; - size_t j, idxbad = -1; - for (j = 0; valid && j < jl_array_len(idxs_array); j++) { - int32_t idx = idxs[j]; - valid = jl_array_uint8_ref(valids, idx); - if (!valid) - idxbad = idx; - } - if (valid) { - // if this callee is still valid, add all the backedges - for (j = 0; j < jl_array_len(idxs_array); j++) { - int32_t idx = idxs[j]; - jl_value_t *callee = jl_array_ptr_ref(ext_targets, idx * 3 + 1); - if (callee && jl_is_method_instance(callee)) { - jl_value_t *invokesig = jl_array_ptr_ref(ext_targets, idx * 3); - jl_method_instance_add_backedge((jl_method_instance_t*)callee, invokesig, caller); - } - else { - jl_value_t *sig = callee == NULL ? jl_array_ptr_ref(ext_targets, idx * 3) : callee; - jl_methtable_t *mt = jl_method_table_for(sig); - // FIXME: rarely, `callee` has an unexpected `Union` signature, - // see https://github.com/JuliaLang/julia/pull/43990#issuecomment-1030329344 - // Fix the issue and turn this back into an `assert((jl_value_t*)mt != jl_nothing)` - // This workaround exposes us to (rare) 265-violations. - if ((jl_value_t*)mt != jl_nothing) - jl_method_table_add_backedge(mt, sig, (jl_value_t*)caller); - } + jl_array_t *valids = jl_verify_edges(ext_targets); + JL_GC_PUSH1(&valids); + htable_t visited; + htable_new(&visited, 0); + jl_verify_methods(edges, valids, &visited); + valids = jl_verify_graph(edges, &visited); + size_t i, l = jl_array_len(edges) / 2; + + // next build a map from external_mis to their CodeInstance for insertion + if (mi_list == NULL) { + htable_reset(&visited, 0); + } + else { + size_t i, l = jl_array_len(mi_list); + htable_reset(&visited, l); + for (i = 0; i < l; i++) { + jl_code_instance_t *ci = (jl_code_instance_t*)jl_array_ptr_ref(mi_list, i); + ptrhash_put(&visited, (void*)ci->def, (void*)ci); + } + } + + // next enable any applicable codes + for (i = 0; i < l; i++) { + jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, 2 * i); + int valid = jl_array_uint8_ref(valids, i); + if (!valid) + continue; + // if this callee is still valid, add all the backedges + jl_array_t *callee_ids = (jl_array_t*)jl_array_ptr_ref(edges, 2 * i + 1); + int32_t *idxs = (int32_t*)jl_array_data(callee_ids); + for (size_t j = 0; j < idxs[0]; j++) { + int32_t idx = idxs[j + 1]; + jl_value_t *invokesig = jl_array_ptr_ref(ext_targets, idx * 3); + jl_value_t *callee = jl_array_ptr_ref(ext_targets, idx * 3 + 1); + if (callee && jl_is_method_instance(callee)) { + jl_method_instance_add_backedge((jl_method_instance_t*)callee, invokesig, caller); } - // then enable it + else { + jl_value_t *sig = callee == NULL ? invokesig : callee; + jl_methtable_t *mt = jl_method_table_for(sig); + // FIXME: rarely, `callee` has an unexpected `Union` signature, + // see https://github.com/JuliaLang/julia/pull/43990#issuecomment-1030329344 + // Fix the issue and turn this back into an `assert((jl_value_t*)mt != jl_nothing)` + // This workaround exposes us to (rare) 265-violations. + if ((jl_value_t*)mt != jl_nothing) + jl_method_table_add_backedge(mt, sig, (jl_value_t*)caller); + } + } + // then enable it + void *ci = ptrhash_get(&visited, (void*)caller); + if (ci != HT_NOTFOUND) { + // have some new external code to use + assert(jl_is_code_instance(ci)); + jl_code_instance_t *codeinst = (jl_code_instance_t*)ci; + remove_code_instance_from_validation(codeinst); // mark it as handled + assert(codeinst->min_world >= world && codeinst->inferred); + codeinst->max_world = ~(size_t)0; + if (jl_rettype_inferred(caller, world, ~(size_t)0) == jl_nothing) { + jl_mi_cache_insert(caller, codeinst); + } + } + else { + // TODO: supposed to be only internal here, but may be internal or external (this will normally correctly ignore external things however) jl_code_instance_t *codeinst = caller->cache; while (codeinst) { - if (ptrhash_get(&new_code_instance_validate, codeinst) != HT_NOTFOUND && codeinst->min_world > 0) + if (ptrhash_get(&new_code_instance_validate, codeinst) != HT_NOTFOUND && codeinst->min_world > 0) { codeinst->max_world = ~(size_t)0; - ptrhash_remove(&new_code_instance_validate, codeinst); // mark it as handled + } + remove_code_instance_from_validation(codeinst); // mark it as handled codeinst = jl_atomic_load_relaxed(&codeinst->next); } } + } + + // finally disable any invalid codes + // n.b. This may disable code we just enabled in the rare case there was a backedge added + // for an external method whose edges loop through this new module. + // TODO: should we pre-compute those edges and add them to the set at the boundary, + // so we don't attempt to add them, then immediately delete them/ again? + for (i = 0; i < l; i++) { + jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, 2 * i); + assert(jl_is_method_instance(caller) && jl_is_method(caller->def.method)); + int valid = jl_array_uint8_ref(valids, i); + if (valid) + continue; + void *ci = ptrhash_get(&visited, (void*)caller); + if (ci != HT_NOTFOUND) { + assert(jl_is_code_instance(ci)); + remove_code_instance_from_validation((jl_code_instance_t*)ci); // mark it as handled + } else { jl_code_instance_t *codeinst = caller->cache; while (codeinst) { - ptrhash_remove(&new_code_instance_validate, codeinst); // should be left invalid + remove_code_instance_from_validation(codeinst); // should be left invalid codeinst = jl_atomic_load_relaxed(&codeinst->next); } invalidate_backedges(&remove_code_instance_from_validation, caller, world, "insert_backedges"); - if (_jl_debug_method_invalidation) { - targetidx = jl_box_int32((int32_t)idxbad); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, targetidx); - targetidx = jl_box_uint64(jl_worklist_key(serializer_worklist)); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, targetidx); - } } } + + htable_free(&visited); JL_GC_POP(); } static void validate_new_code_instances(void) { + size_t world = jl_atomic_load_acquire(&jl_world_counter); size_t i; for (i = 0; i < new_code_instance_validate.size; i += 2) { if (new_code_instance_validate.table[i+1] != HT_NOTFOUND) { - ((jl_code_instance_t*)new_code_instance_validate.table[i])->max_world = ~(size_t)0; + jl_code_instance_t *ci = (jl_code_instance_t*)new_code_instance_validate.table[i]; + JL_GC_PROMISE_ROOTED(ci); // TODO: this needs a root (or restructuring to avoid it) + assert(ci->min_world >= world && ci->inferred); + ci->max_world = ~(size_t)0; + jl_method_instance_t *caller = ci->def; + if (jl_rettype_inferred(caller, world, ~(size_t)0) == jl_nothing) { + jl_mi_cache_insert(caller, ci); + } + //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)caller); + //ios_puts("FREE\n", ios_stderr); } } } @@ -2903,18 +2929,12 @@ JL_DLLEXPORT int jl_save_incremental(const char *fname, jl_array_t *worklist) jl_symbol("BITS_PER_LIMB"))) / 8; } + // Save the inferred code from newly inferred, external methods + jl_array_t *mi_list = queue_external_mis(newly_inferred); + int en = jl_gc_enable(0); // edges map is not gc-safe jl_array_t *extext_methods = jl_alloc_vec_any(0); // [method1, simplesig1, ...], worklist-owned "extending external" methods added to functions owned by modules outside the worklist - jl_array_t *ext_targets = jl_alloc_vec_any(0); // [invokesig1, callee1, matches1, ...] non-worklist callees of worklist-owned methods - // ordinary dispatch: invokesig=NULL, callee is MethodInstance - // `invoke` dispatch: invokesig is signature, callee is MethodInstance - // abstract call: callee is signature - jl_array_t *edges = jl_alloc_vec_any(0); // [caller1, ext_targets_indexes1, ...] for worklist-owned methods calling external methods - - int n_ext_mis = queue_external_mis(newly_inferred); - - size_t i; - size_t len = jl_array_len(mod_array); + size_t i, len = jl_array_len(mod_array); for (i = 0; i < len; i++) { jl_module_t *m = (jl_module_t*)jl_array_ptr_ref(mod_array, i); assert(jl_is_module(m)); @@ -2925,10 +2945,14 @@ JL_DLLEXPORT int jl_save_incremental(const char *fname, jl_array_t *worklist) jl_collect_missing_backedges(jl_type_type_mt); jl_collect_methtable_from_mod(extext_methods, jl_nonfunction_mt); jl_collect_missing_backedges(jl_nonfunction_mt); - - // jl_collect_extext_methods_from_mod and jl_collect_missing_backedges accumulate data in edges_map. + // jl_collect_extext_methods_from_mod and jl_collect_missing_backedges also accumulate data in edges_map. // Process this to extract `edges` and `ext_targets`. - jl_collect_backedges(edges, ext_targets); + jl_array_t *ext_targets = jl_alloc_vec_any(0); // [invokesig1, callee1, matches1, ...] non-worklist callees of worklist-owned methods + // ordinary dispatch: invokesig=NULL, callee is MethodInstance + // `invoke` dispatch: invokesig is signature, callee is MethodInstance + // abstract call: callee is signature + jl_array_t *edges = jl_alloc_vec_any(0); // [caller1, ext_targets_indexes1, ...] for worklist-owned methods calling external methods + jl_collect_edges(edges, ext_targets); jl_serializer_state s = { &f, @@ -2937,19 +2961,18 @@ JL_DLLEXPORT int jl_save_incremental(const char *fname, jl_array_t *worklist) }; jl_serialize_value(&s, worklist); // serialize module-owned items (those accessible from the bindings table) jl_serialize_value(&s, extext_methods); // serialize new worklist-owned methods for external functions - serialize_htable_keys(&s, &external_mis, n_ext_mis); // serialize external MethodInstances - // The next two allow us to restore backedges from external "unserialized" (stub-serialized) MethodInstances - // to the ones we serialize here + // The next three allow us to restore code instances, if still valid + jl_serialize_value(&s, mi_list); jl_serialize_value(&s, edges); jl_serialize_value(&s, ext_targets); jl_finalize_serializer(&s); serializer_worklist = NULL; jl_gc_enable(en); - htable_reset(&edges_map, 0); - htable_reset(&backref_table, 0); - htable_reset(&external_mis, 0); + htable_free(&edges_map); + htable_free(&backref_table); + htable_free(&external_mis); arraylist_free(&reinit_list); // Write the source-text for the dependent files @@ -3331,15 +3354,11 @@ static jl_value_t *_jl_restore_incremental(ios_t *f, jl_array_t *mod_array) }; jl_array_t *restored = (jl_array_t*)jl_deserialize_value(&s, (jl_value_t**)&restored); serializer_worklist = restored; - assert(jl_isa((jl_value_t*)restored, jl_array_any_type)); + assert(jl_typeis((jl_value_t*)restored, jl_array_any_type)); // See explanation in jl_save_incremental for variables of the same names jl_value_t *extext_methods = jl_deserialize_value(&s, &extext_methods); - int i, n_ext_mis = read_int32(s.s); - jl_array_t *mi_list = jl_alloc_vec_any(n_ext_mis); // reload MIs stored by serialize_htable_keys - jl_value_t **midata = (jl_value_t**)jl_array_data(mi_list); - for (i = 0; i < n_ext_mis; i++) - midata[i] = jl_deserialize_value(&s, &(midata[i])); + jl_value_t *mi_list = jl_deserialize_value(&s, &mi_list); // reload MIs stored by queue_external_mis jl_value_t *edges = jl_deserialize_value(&s, &edges); jl_value_t *ext_targets = jl_deserialize_value(&s, &ext_targets); @@ -3353,19 +3372,16 @@ static jl_value_t *_jl_restore_incremental(ios_t *f, jl_array_t *mod_array) jl_insert_methods((jl_array_t*)extext_methods); // hook up extension methods for external generic functions (needs to be after recache types) jl_recache_other(); // make all of the other objects identities correct (needs to be after insert methods) jl_copy_roots(); // copying new roots of external methods (must wait until recaching is complete) - // At this point, the novel specializations in mi_list reference the real method, but they haven't been cached in its specializations - jl_insert_method_instances(mi_list); // insert novel specializations htable_free(&uniquing_table); jl_array_t *init_order = jl_finalize_deserializer(&s, tracee_list); // done with f and s (needs to be after recache) if (init_order == NULL) init_order = (jl_array_t*)jl_an_empty_vec_any; - assert(jl_isa((jl_value_t*)init_order, jl_array_any_type)); + assert(jl_typeis((jl_value_t*)init_order, jl_array_any_type)); - JL_GC_PUSH4(&init_order, &restored, &edges, &ext_targets); + JL_GC_PUSH5(&init_order, &restored, &edges, &ext_targets, &mi_list); jl_gc_enable(en); // subtyping can allocate a lot, not valid before recache-other - jl_insert_backedges((jl_array_t*)edges, (jl_array_t*)ext_targets); // restore external backedges (needs to be last) - + jl_insert_backedges((jl_array_t*)edges, (jl_array_t*)ext_targets, (jl_array_t*)mi_list); // restore external backedges (needs to be last) // check new CodeInstances and validate any that lack external backedges validate_new_code_instances(); diff --git a/src/gf.c b/src/gf.c index 7330e4de4b275..c0d7f34aac826 100644 --- a/src/gf.c +++ b/src/gf.c @@ -222,8 +222,6 @@ JL_DLLEXPORT jl_code_instance_t* jl_new_codeinst( int32_t const_flags, size_t min_world, size_t max_world, uint32_t ipo_effects, uint32_t effects, jl_value_t *argescapes, uint8_t relocatability); -JL_DLLEXPORT void jl_mi_cache_insert(jl_method_instance_t *mi JL_ROOTING_ARGUMENT, - jl_code_instance_t *ci JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED); jl_datatype_t *jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_args_t fptr) JL_GC_DISABLED { @@ -438,7 +436,7 @@ JL_DLLEXPORT void jl_mi_cache_insert(jl_method_instance_t *mi JL_ROOTING_ARGUMEN JL_LOCK(&mi->def.method->writelock); jl_code_instance_t *oldci = jl_atomic_load_relaxed(&mi->cache); jl_atomic_store_relaxed(&ci->next, oldci); - jl_gc_wb(ci, oldci); // likely older, but just being careful + jl_gc_wb(ci, oldci); jl_atomic_store_release(&mi->cache, ci); jl_gc_wb(mi, ci); if (jl_is_method(mi->def.method)) @@ -1435,6 +1433,7 @@ static void invalidate_method_instance(void (*f)(jl_code_instance_t*), jl_method jl_array_ptr_1d_push(_jl_debug_method_invalidation, boxeddepth); JL_GC_POP(); } + //jl_static_show(JL_STDERR, (jl_value_t*)replaced); if (!jl_is_method(replaced->def.method)) return; // shouldn't happen, but better to be safe JL_LOCK(&replaced->def.method->writelock); @@ -1471,6 +1470,7 @@ void invalidate_backedges(void (*f)(jl_code_instance_t*), jl_method_instance_t * { JL_LOCK(&replaced_mi->def.method->writelock); jl_array_t *backedges = replaced_mi->backedges; + //jl_static_show(JL_STDERR, (jl_value_t*)replaced_mi); if (backedges) { // invalidate callers (if any) replaced_mi->backedges = NULL; diff --git a/src/julia.h b/src/julia.h index 644ce0dbd78ae..5ecf00faa674a 100644 --- a/src/julia.h +++ b/src/julia.h @@ -1839,6 +1839,7 @@ JL_DLLEXPORT jl_value_t *jl_compress_argnames(jl_array_t *syms); JL_DLLEXPORT jl_array_t *jl_uncompress_argnames(jl_value_t *syms); JL_DLLEXPORT jl_value_t *jl_uncompress_argname_n(jl_value_t *syms, size_t i); + JL_DLLEXPORT int jl_is_operator(char *sym); JL_DLLEXPORT int jl_is_unary_operator(char *sym); JL_DLLEXPORT int jl_is_unary_and_binary_operator(char *sym); diff --git a/src/julia_internal.h b/src/julia_internal.h index 635a14b6a2f26..344061b09a9ac 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -714,15 +714,18 @@ jl_method_instance_t *jl_method_lookup(jl_value_t **args, size_t nargs, size_t w jl_value_t *jl_gf_invoke_by_method(jl_method_t *method, jl_value_t *gf, jl_value_t **args, size_t nargs); jl_value_t *jl_gf_invoke(jl_value_t *types, jl_value_t *f, jl_value_t **args, size_t nargs); +JL_DLLEXPORT jl_value_t *jl_gf_invoke_lookup_worlds(jl_value_t *types, jl_value_t *mt, size_t world, size_t *min_world, size_t *max_world); JL_DLLEXPORT jl_value_t *jl_matching_methods(jl_tupletype_t *types, jl_value_t *mt, int lim, int include_ambiguous, size_t world, size_t *min_valid, size_t *max_valid, int *ambig); +JL_DLLEXPORT jl_value_t *jl_gf_invoke_lookup_worlds(jl_value_t *types, jl_value_t *mt, size_t world, size_t *min_world, size_t *max_world); + JL_DLLEXPORT jl_datatype_t *jl_first_argument_datatype(jl_value_t *argtypes JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_value_t *jl_argument_datatype(jl_value_t *argt JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_methtable_t *jl_method_table_for( jl_value_t *argtypes JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_methtable_t *jl_method_get_table( - jl_method_t *method) JL_NOTSAFEPOINT; + jl_method_t *method JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT; jl_methtable_t *jl_argument_method_table(jl_value_t *argt JL_PROPAGATES_ROOT); JL_DLLEXPORT int jl_pointer_egal(jl_value_t *t); @@ -956,6 +959,8 @@ JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo( jl_method_instance_t *jl_specializations_get_or_insert(jl_method_instance_t *mi_ins); JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_value_t *invokesig, jl_method_instance_t *caller); JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_value_t *caller); +JL_DLLEXPORT void jl_mi_cache_insert(jl_method_instance_t *mi JL_ROOTING_ARGUMENT, + jl_code_instance_t *ci JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED); uint32_t jl_module_next_counter(jl_module_t *m) JL_NOTSAFEPOINT; jl_tupletype_t *arg_type_tuple(jl_value_t *arg1, jl_value_t **args, size_t nargs); diff --git a/src/opaque_closure.c b/src/opaque_closure.c index d34989181b7ad..7a01d254ce71a 100644 --- a/src/opaque_closure.c +++ b/src/opaque_closure.c @@ -110,9 +110,6 @@ JL_DLLEXPORT jl_code_instance_t* jl_new_codeinst( uint32_t ipo_effects, uint32_t effects, jl_value_t *argescapes, uint8_t relocatability); -JL_DLLEXPORT void jl_mi_cache_insert(jl_method_instance_t *mi JL_ROOTING_ARGUMENT, - jl_code_instance_t *ci JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED); - JL_DLLEXPORT jl_opaque_closure_t *jl_new_opaque_closure_from_code_info(jl_tupletype_t *argt, jl_value_t *rt_lb, jl_value_t *rt_ub, jl_module_t *mod, jl_code_info_t *ci, int lineno, jl_value_t *file, int nargs, int isva, jl_value_t *env) { diff --git a/test/precompile.jl b/test/precompile.jl index f6936197917a8..06f69525fde32 100644 --- a/test/precompile.jl +++ b/test/precompile.jl @@ -642,16 +642,11 @@ precompile_test_harness("code caching") do dir msize = which(size, (Vector{<:Any},)) hasspec = false for i = 1:length(msize.specializations) - if isassigned(msize.specializations, i) - mi = msize.specializations[i] - if isa(mi, Core.MethodInstance) - tt = Base.unwrap_unionall(mi.specTypes) - if tt.parameters[2] == Vector{Cacheb8321416e8a3e2f1.X} - if isdefined(mi, :cache) && isa(mi.cache, Core.CodeInstance) && mi.cache.max_world == typemax(UInt) && mi.cache.inferred !== nothing - hasspec = true - break - end - end + mi = msize.specializations[i] + if isa(mi, Core.MethodInstance) && mi.specTypes == Tuple{typeof(size),Vector{Cacheb8321416e8a3e2f1.X}} + if isdefined(mi, :cache) && isa(mi.cache, Core.CodeInstance) && mi.cache.max_world == typemax(UInt) && mi.cache.inferred !== nothing + hasspec = true + break end end end @@ -671,7 +666,7 @@ precompile_test_harness("code caching") do dir # Check that internal methods and their roots are accounted appropriately minternal = which(M.getelsize, (Vector,)) mi = minternal.specializations[1] - @test Base.unwrap_unionall(mi.specTypes).parameters[2] == Vector{Int32} + @test mi.specTypes == Tuple{typeof(M.getelsize),Vector{Int32}} ci = mi.cache @test ci.relocatability == 1 @test ci.inferred !== nothing @@ -787,7 +782,7 @@ precompile_test_harness("code caching") do dir end end - # Invalidations (this test is adapted from from SnoopCompile) + # Invalidations (this test is adapted from SnoopCompile) function hasvalid(mi, world) isdefined(mi, :cache) || return false ci = mi.cache @@ -911,13 +906,13 @@ precompile_test_harness("code caching") do dir @test mi.specTypes.parameters[end] === Integer ? !hv : hv end - tagbad = invalidations[idx+1] - buildid = invalidations[idx+2] - @test isa(buildid, UInt64) - j = findfirst(==(tagbad), invalidations) - @test invalidations[j+1] == buildid - @test isa(invalidations[j-2], Type) - @test invalidations[j-1] == "insert_backedges_callee" + #tagbad = invalidations[idx+1] + #buildid = invalidations[idx+2] + #@test isa(buildid, UInt64) + #j = findfirst(==(tagbad), invalidations) + #@test invalidations[j+1] == buildid + #@test isa(invalidations[j-2], Type) + #@test invalidations[j-1] == "insert_backedges_callee" m = only(methods(MB.map_nbits)) @test !hasvalid(m.specializations[1], world+1) # insert_backedges invalidations also trigger their backedges @@ -931,6 +926,7 @@ precompile_test_harness("invoke") do dir module $InvokeModule export f, g, h, q, fnc, gnc, hnc, qnc # nc variants do not infer to a Const export f44320, g44320 + export getlast # f is for testing invoke that occurs within a dependency f(x::Real) = 0 f(x::Int) = x < 5 ? 1 : invoke(f, Tuple{Real}, x) @@ -954,6 +950,16 @@ precompile_test_harness("invoke") do dir f44320(::Any) = 2 g44320() = invoke(f44320, Tuple{Any}, 0) g44320() + + # Adding new specializations should not invalidate `invoke`s + function getlast(itr) + x = nothing + for y in itr + x = y + end + return x + end + getlast(a::AbstractArray) = invoke(getlast, Tuple{Any}, a) end """) write(joinpath(dir, "$CallerModule.jl"), @@ -981,6 +987,8 @@ precompile_test_harness("invoke") do dir # Issue #44320 f44320(::Real) = 3 + call_getlast(x) = getlast(x) + # force precompilation begin Base.Experimental.@force_compile @@ -996,6 +1004,7 @@ precompile_test_harness("invoke") do dir callqnci(3) internal(3) internalnc(3) + call_getlast([1,2,3]) end # Now that we've precompiled, invalidate with a new method that overrides the `invoke` dispatch @@ -1007,6 +1016,9 @@ precompile_test_harness("invoke") do dir end """) Base.compilecache(Base.PkgId(string(CallerModule))) + @eval using $InvokeModule: $InvokeModule + MI = getfield(@__MODULE__, InvokeModule) + @eval $MI.getlast(a::UnitRange) = a.stop @eval using $CallerModule M = getfield(@__MODULE__, CallerModule) @@ -1060,6 +1072,9 @@ precompile_test_harness("invoke") do dir m = only(methods(M.g44320)) @test m.specializations[1].cache.max_world == typemax(UInt) + m = which(MI.getlast, (Any,)) + @test m.specializations[1].cache.max_world == typemax(UInt) + # Precompile specific methods for arbitrary arg types invokeme(x) = 1 invokeme(::Int) = 2