Skip to content

Commit

Permalink
Add mechanism to disable creating new worlds
Browse files Browse the repository at this point in the history
As a performance optimization, when you are certain only to run all
remaining code in a fixed world (e.g. not even eval a new closure or
comprehension), such as when all code is defined in a system image and
not being used interactively, you can put a call to:

    Base.Experimental.disable_new_worlds()

Which will enable performance optimizations around avoiding tracking
backedges and invalidations.
  • Loading branch information
vtjnash committed Nov 27, 2024
1 parent 4280731 commit 7f39d37
Show file tree
Hide file tree
Showing 6 changed files with 240 additions and 130 deletions.
9 changes: 9 additions & 0 deletions base/experimental.jl
Original file line number Diff line number Diff line change
Expand Up @@ -494,4 +494,13 @@ function entrypoint(@nospecialize(argt::Type))
nothing
end

"""
Base.Experimental.disable_new_worlds()
Mark that no new worlds (methods additions, deletions, etc) are permitted to be created at
any future time, allowing for lower latencies for some operations and slightly lower memory
usage, by eliminating the tracking of those possible invalidation.
"""
disable_new_worlds() = ccall(:jl_disable_new_worlds, Cvoid, ())

end
152 changes: 104 additions & 48 deletions src/gf.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
extern "C" {
#endif

_Atomic(int) allow_new_worlds = 1;
JL_DLLEXPORT _Atomic(size_t) jl_world_counter = 1; // uses atomic acquire/release
jl_mutex_t world_counter_lock;
JL_DLLEXPORT size_t jl_get_world_counter(void) JL_NOTSAFEPOINT
Expand Down Expand Up @@ -1819,76 +1820,84 @@ static void invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_w
// add a backedge from callee to caller
JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_value_t *invokesig, jl_code_instance_t *caller)
{
if (!jl_atomic_load_relaxed(&allow_new_worlds))
return;
if (invokesig == jl_nothing)
invokesig = NULL; // julia uses `nothing` but C uses NULL (#undef)
assert(jl_is_method_instance(callee));
assert(jl_is_code_instance(caller));
assert(invokesig == NULL || jl_is_type(invokesig));
JL_LOCK(&callee->def.method->writelock);
int found = 0;
// TODO: use jl_cache_type_(invokesig) like cache_method does to save memory
if (!callee->backedges) {
// lazy-init the backedges array
callee->backedges = jl_alloc_vec_any(0);
jl_gc_wb(callee, callee->backedges);
}
else {
size_t i = 0, l = jl_array_nrows(callee->backedges);
for (i = 0; i < l; i++) {
// optimized version of while (i < l) i = get_next_edge(callee->backedges, i, &invokeTypes, &mi);
jl_value_t *mi = jl_array_ptr_ref(callee->backedges, i);
if (mi != (jl_value_t*)caller)
continue;
jl_value_t *invokeTypes = i > 0 ? jl_array_ptr_ref(callee->backedges, i - 1) : NULL;
if (invokeTypes && jl_is_method_instance(invokeTypes))
invokeTypes = NULL;
if ((invokesig == NULL && invokeTypes == NULL) ||
(invokesig && invokeTypes && jl_types_equal(invokesig, invokeTypes))) {
found = 1;
break;
if (jl_atomic_load_relaxed(&allow_new_worlds)) {
int found = 0;
// TODO: use jl_cache_type_(invokesig) like cache_method does to save memory
if (!callee->backedges) {
// lazy-init the backedges array
callee->backedges = jl_alloc_vec_any(0);
jl_gc_wb(callee, callee->backedges);
}
else {
size_t i = 0, l = jl_array_nrows(callee->backedges);
for (i = 0; i < l; i++) {
// optimized version of while (i < l) i = get_next_edge(callee->backedges, i, &invokeTypes, &mi);
jl_value_t *mi = jl_array_ptr_ref(callee->backedges, i);
if (mi != (jl_value_t*)caller)
continue;
jl_value_t *invokeTypes = i > 0 ? jl_array_ptr_ref(callee->backedges, i - 1) : NULL;
if (invokeTypes && jl_is_method_instance(invokeTypes))
invokeTypes = NULL;
if ((invokesig == NULL && invokeTypes == NULL) ||
(invokesig && invokeTypes && jl_types_equal(invokesig, invokeTypes))) {
found = 1;
break;
}
}
}
if (!found)
push_edge(callee->backedges, invokesig, caller);
}
if (!found)
push_edge(callee->backedges, invokesig, caller);
JL_UNLOCK(&callee->def.method->writelock);
}

// add a backedge from a non-existent signature to caller
JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_code_instance_t *caller)
{
assert(jl_is_code_instance(caller));
if (!jl_atomic_load_relaxed(&allow_new_worlds))
return;
JL_LOCK(&mt->writelock);
if (!mt->backedges) {
// lazy-init the backedges array
mt->backedges = jl_alloc_vec_any(2);
jl_gc_wb(mt, mt->backedges);
jl_array_ptr_set(mt->backedges, 0, typ);
jl_array_ptr_set(mt->backedges, 1, caller);
}
else {
// check if the edge is already present and avoid adding a duplicate
size_t i, l = jl_array_nrows(mt->backedges);
for (i = 1; i < l; i += 2) {
if (jl_array_ptr_ref(mt->backedges, i) == (jl_value_t*)caller) {
if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) {
JL_UNLOCK(&mt->writelock);
return;
if (jl_atomic_load_relaxed(&allow_new_worlds)) {
if (!mt->backedges) {
// lazy-init the backedges array
mt->backedges = jl_alloc_vec_any(2);
jl_gc_wb(mt, mt->backedges);
jl_array_ptr_set(mt->backedges, 0, typ);
jl_array_ptr_set(mt->backedges, 1, caller);
}
else {
// check if the edge is already present and avoid adding a duplicate
size_t i, l = jl_array_nrows(mt->backedges);
for (i = 1; i < l; i += 2) {
if (jl_array_ptr_ref(mt->backedges, i) == (jl_value_t*)caller) {
if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) {
JL_UNLOCK(&mt->writelock);
return;
}
}
}
}
// reuse an already cached instance of this type, if possible
// TODO: use jl_cache_type_(tt) like cache_method does, instead of this linear scan?
for (i = 1; i < l; i += 2) {
if (jl_array_ptr_ref(mt->backedges, i) != (jl_value_t*)caller) {
if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) {
typ = jl_array_ptr_ref(mt->backedges, i - 1);
break;
// reuse an already cached instance of this type, if possible
// TODO: use jl_cache_type_(tt) like cache_method does, instead of this linear scan?
for (i = 1; i < l; i += 2) {
if (jl_array_ptr_ref(mt->backedges, i) != (jl_value_t*)caller) {
if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) {
typ = jl_array_ptr_ref(mt->backedges, i - 1);
break;
}
}
}
jl_array_ptr_1d_push(mt->backedges, typ);
jl_array_ptr_1d_push(mt->backedges, (jl_value_t*)caller);
}
jl_array_ptr_1d_push(mt->backedges, typ);
jl_array_ptr_1d_push(mt->backedges, (jl_value_t*)caller);
}
JL_UNLOCK(&mt->writelock);
}
Expand Down Expand Up @@ -2024,10 +2033,55 @@ static void jl_method_table_invalidate(jl_methtable_t *mt, jl_method_t *replaced
}
}

static int erase_method_backedges(jl_typemap_entry_t *def, void *closure)
{
jl_method_t *method = def->func.method;
JL_LOCK(&method->writelock);
jl_value_t *specializations = jl_atomic_load_relaxed(&method->specializations);
if (jl_is_svec(specializations)) {
size_t i, l = jl_svec_len(specializations);
for (i = 0; i < l; i++) {
jl_method_instance_t *mi = (jl_method_instance_t*)jl_svecref(specializations, i);
if ((jl_value_t*)mi != jl_nothing) {
mi->backedges = NULL;
}
}
}
else {
jl_method_instance_t *mi = (jl_method_instance_t*)specializations;
mi->backedges = NULL;
}
JL_UNLOCK(&method->writelock);
return 1;
}

static int erase_all_backedges(jl_methtable_t *mt, void *env)
{
// removes all method caches
// this might not be entirely safe (GC or MT), thus we only do it very early in bootstrapping
JL_LOCK(&mt->writelock);
mt->backedges = NULL;
JL_UNLOCK(&mt->writelock);
jl_typemap_visitor(jl_atomic_load_relaxed(&mt->defs), erase_method_backedges, env);
return 1;
}

JL_DLLEXPORT void jl_disable_new_worlds(void)
{
if (jl_generating_output())
jl_error("Disabling Method changes is not possible when generating output.");
JL_LOCK(&world_counter_lock);
jl_atomic_store_relaxed(&allow_new_worlds, 0);
JL_UNLOCK(&world_counter_lock);
jl_foreach_reachable_mtable(erase_all_backedges, (void*)NULL);
}

JL_DLLEXPORT void jl_method_table_disable(jl_methtable_t *mt, jl_method_t *method)
{
jl_typemap_entry_t *methodentry = do_typemap_search(mt, method);
JL_LOCK(&world_counter_lock);
if (!jl_atomic_load_relaxed(&allow_new_worlds))
jl_error("Method changes have been disabled via a call to disable_new_worlds.");
JL_LOCK(&mt->writelock);
// Narrow the world age on the method to make it uncallable
size_t world = jl_atomic_load_relaxed(&jl_world_counter);
Expand Down Expand Up @@ -2341,6 +2395,8 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method
jl_typemap_entry_t *newentry = jl_method_table_add(mt, method, simpletype);
JL_GC_PUSH1(&newentry);
JL_LOCK(&world_counter_lock);
if (!jl_atomic_load_relaxed(&allow_new_worlds))
jl_error("Method changes have been disabled via a call to disable_new_worlds.");
size_t world = jl_atomic_load_relaxed(&jl_world_counter) + 1;
jl_atomic_store_relaxed(&method->primary_world, world);
jl_atomic_store_relaxed(&method->deleted_world, ~(size_t)0);
Expand Down
32 changes: 23 additions & 9 deletions src/staticdata.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ External links:
#include "julia_assert.h"

static const size_t WORLD_AGE_REVALIDATION_SENTINEL = 0x1;
size_t jl_require_world = ~(size_t)0;

#include "staticdata_utils.c"
#include "precompile_utils.c"
Expand Down Expand Up @@ -2678,7 +2679,6 @@ jl_genericmemory_t *jl_global_roots_list;
jl_genericmemory_t *jl_global_roots_keyset;
jl_mutex_t global_roots_lock;
extern jl_mutex_t world_counter_lock;
extern size_t jl_require_world;

jl_mutex_t precompile_field_replace_lock;
jl_svec_t *precompile_field_replace JL_GLOBALLY_ROOTED;
Expand Down Expand Up @@ -4044,16 +4044,30 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i
// Add roots to methods
jl_copy_roots(method_roots_list, jl_worklist_key((jl_array_t*)restored));
// Insert method extensions and handle edges
int new_methods = jl_array_nrows(extext_methods) > 0;
if (!new_methods) {
size_t i, l = jl_array_nrows(internal_methods);
for (i = 0; i < l; i++) {
jl_value_t *obj = jl_array_ptr_ref(internal_methods, i);
if (jl_is_method(obj)) {
new_methods = 1;
break;
}
}
}
JL_LOCK(&world_counter_lock);
// allocate a world for the new methods, and insert them there, invalidating content as needed
size_t world = jl_atomic_load_relaxed(&jl_world_counter) + 1;
jl_activate_methods(extext_methods, internal_methods, world);
// TODO: inject new_ext_cis into caches here, so the system can see them immediately as potential candidates (before validation)
// allow users to start running in this updated world
jl_atomic_store_release(&jl_world_counter, world);
// now permit more methods to be added again
// allocate a world for the new methods, and insert them there, invalidating content as needed
size_t world = jl_atomic_load_relaxed(&jl_world_counter);
if (new_methods)
world += 1;
jl_activate_methods(extext_methods, internal_methods, world, pkgname);
// TODO: inject new_ext_cis into caches here, so the system can see them immediately as potential candidates (before validation)
// allow users to start running in this updated world
if (new_methods)
jl_atomic_store_release(&jl_world_counter, world);
// now permit more methods to be added again
JL_UNLOCK(&world_counter_lock);
// but one of those immediate users is going to be our cache insertions
// but one of those immediate users is going to be our cache insertions
jl_insert_backedges((jl_array_t*)edges, (jl_array_t*)new_ext_cis); // restore existing caches (needs to be last)
// reinit ccallables
jl_reinit_ccallable(&ccallable_list, base, pkgimage_handle);
Expand Down
Loading

0 comments on commit 7f39d37

Please sign in to comment.