diff --git a/benchmark/vm_ivar_memoize.yml b/benchmark/vm_ivar_memoize.yml new file mode 100644 index 00000000000000..90f6b07f05de41 --- /dev/null +++ b/benchmark/vm_ivar_memoize.yml @@ -0,0 +1,85 @@ +prelude: | + IVARS = 60 + class Record + def initialize(offset = false) + @offset = 1 if offset + @first = 0 + IVARS.times do |i| + instance_variable_set("@ivar_#{i}", i) + end + end + + def first + @first + end + + def lazy_set + @lazy_set ||= 123 + end + + def undef + @undef + end + end + + Record.new # Need one alloc to right size + + BASE = Record.new + LAZY = Record.new + LAZY.lazy_set + + class Miss < Record + @first = 0 + IVARS.times do |i| + instance_variable_set("@i_#{i}", i) + end + end + + Miss.new # Need one alloc to right size + MISS = Miss.new + + DIVERGENT = Record.new(true) + +benchmark: + vm_ivar_stable_shape: | + BASE.first + BASE.first + BASE.first + BASE.first + BASE.first + BASE.first + vm_ivar_memoize_unstable_shape: | + BASE.first + LAZY.first + BASE.first + LAZY.first + BASE.first + LAZY.first + vm_ivar_memoize_unstable_shape_miss: | + BASE.first + MISS.first + BASE.first + MISS.first + BASE.first + MISS.first + vm_ivar_unstable_undef: | + BASE.undef + LAZY.undef + BASE.undef + LAZY.undef + BASE.undef + LAZY.undef + vm_ivar_divergent_shape: | + BASE.first + DIVERGENT.first + BASE.first + DIVERGENT.first + BASE.first + DIVERGENT.first + vm_ivar_divergent_shape_imbalanced: | + BASE.first + DIVERGENT.first + DIVERGENT.first + DIVERGENT.first + DIVERGENT.first + DIVERGENT.first diff --git a/shape.c b/shape.c index 3b0d2734210eb5..9b5181acbafe77 100644 --- a/shape.c +++ b/shape.c @@ -436,12 +436,16 @@ rb_shape_transition_shape_capa(rb_shape_t* shape) // Same as rb_shape_get_iv_index, but uses a provided valid shape id and index // to return a result faster if branches of the shape tree are closely related. bool -rb_shape_get_iv_index_with_hint(rb_shape_t * shape, ID id, attr_index_t *value, shape_id_t shape_id_hint, attr_index_t index_hint) +rb_shape_get_iv_index_with_hint(shape_id_t shape_id, ID id, attr_index_t *value, shape_id_t *shape_id_hint) { - if (shape_id_hint == INVALID_SHAPE_ID) + attr_index_t index_hint = *value; + rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id); + if (*shape_id_hint == INVALID_SHAPE_ID) { + *shape_id_hint = shape_id; return rb_shape_get_iv_index(shape, id, value); + } - rb_shape_t * shape_hint = rb_shape_get_shape_by_id(shape_id_hint); + rb_shape_t * shape_hint = rb_shape_get_shape_by_id(*shape_id_hint); while (shape->next_iv_index > index_hint) { while (shape_hint->next_iv_index > shape->next_iv_index) { @@ -450,12 +454,9 @@ rb_shape_get_iv_index_with_hint(rb_shape_t * shape, ID id, attr_index_t *value, if (shape_hint == shape) { // We've found a common ancestor so use the index hint - if (index_hint == (attr_index_t)-1) { - return false; - } else { - *value = index_hint; - return true; - } + *value = index_hint; + *shape_id_hint = rb_shape_id(shape); + return true; } if (shape->edge_name == id) { // We found the matching id before a common ancestor @@ -467,6 +468,7 @@ rb_shape_get_iv_index_with_hint(rb_shape_t * shape, ID id, attr_index_t *value, shape = rb_shape_get_parent(shape); } + *shape_id_hint = shape_id; return rb_shape_get_iv_index(shape, id, value); } diff --git a/shape.h b/shape.h index 67f8f21d420ded..49cd814bce15be 100644 --- a/shape.h +++ b/shape.h @@ -144,7 +144,7 @@ rb_shape_t* rb_shape_get_shape_by_id(shape_id_t shape_id); shape_id_t rb_shape_get_shape_id(VALUE obj); rb_shape_t * rb_shape_get_next_iv_shape(rb_shape_t * shape, ID id); bool rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t * value); -bool rb_shape_get_iv_index_with_hint(rb_shape_t * shape, ID id, attr_index_t * value, shape_id_t shape_id_hint, attr_index_t index_hint); +bool rb_shape_get_iv_index_with_hint(shape_id_t shape_id, ID id, attr_index_t * value, shape_id_t *shape_id_hint); bool rb_shape_obj_too_complex(VALUE obj); void rb_shape_set_shape(VALUE obj, rb_shape_t* shape); diff --git a/vm_insnhelper.c b/vm_insnhelper.c index ee8662f3ab59f7..dd3b1eb3fdbb6f 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1286,22 +1286,28 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call } #endif - rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id); - if (shape_id == OBJ_TOO_COMPLEX_SHAPE_ID) { if (!st_lookup(ROBJECT_IV_HASH(obj), id, &val)) { val = default_value; } } else { - if (rb_shape_get_iv_index_with_hint(shape, id, &index, cached_id, index)) { + shape_id_t previous_cached_id = cached_id; + if (rb_shape_get_iv_index_with_hint(shape_id, id, &index, &cached_id)) { // This fills in the cache with the shared cache object. // "ent" is the shared cache object - fill_ivar_cache(iseq, ic, cc, is_attr, index, shape_id); + if (cached_id != previous_cached_id) { + fill_ivar_cache(iseq, ic, cc, is_attr, index, cached_id); + } - // We fetched the ivar list above - val = ivar_list[index]; - RUBY_ASSERT(!UNDEF_P(val)); + if (index == ATTR_INDEX_NOT_SET) { + val = default_value; + } + else { + // We fetched the ivar list above + val = ivar_list[index]; + RUBY_ASSERT(!UNDEF_P(val)); + } } else { if (is_attr) {