diff --git a/test/ruby/test_yjit.rb b/test/ruby/test_yjit.rb index 115c1902f0369d..6e8070199d5feb 100644 --- a/test/ruby/test_yjit.rb +++ b/test/ruby/test_yjit.rb @@ -548,7 +548,7 @@ def foo &blk def test_getblockparamproxy # Currently two side exits as OPTIMIZED_METHOD_TYPE_CALL is unimplemented - assert_compiles(<<~'RUBY', insns: [:getblockparamproxy], exits: { opt_send_without_block: 2 }) + assert_compiles(<<~'RUBY', insns: [:getblockparamproxy]) def foo &blk p blk.call p blk.call @@ -607,7 +607,7 @@ def jit_method def test_send_kwargs # For now, this side-exits when calls include keyword args - assert_compiles(<<~'RUBY', result: "2#a:1,b:2/A", exits: {opt_send_without_block: 1}) + assert_compiles(<<~'RUBY', result: "2#a:1,b:2/A") def internal_method(**kw) "#{kw.size}##{kw.keys.map { |k| "#{k}:#{kw[k]}" }.join(",")}" end @@ -647,7 +647,7 @@ def jit_method def test_send_kwargs_splat # For now, this side-exits when calling with a splat - assert_compiles(<<~'RUBY', result: "2#a:1,b:2/B", exits: {opt_send_without_block: 1}) + assert_compiles(<<~'RUBY', result: "2#a:1,b:2/B") def internal_method(**kw) "#{kw.size}##{kw.keys.map { |k| "#{k}:#{kw[k]}" }.join(",")}" end diff --git a/vm_exec.h b/vm_exec.h index dbfd4e9f44cb8c..41c4b74ffc6fc1 100644 --- a/vm_exec.h +++ b/vm_exec.h @@ -169,10 +169,20 @@ default: \ #define THROW_EXCEPTION(exc) return (VALUE)(exc) #endif +// Run the interpreter from the JIT +#define VM_EXEC(ec, val) do { \ + if (val == Qundef) { \ + VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH); \ + val = vm_exec(ec); \ + } \ +} while (0) + +// Run the JIT from the interpreter #define JIT_EXEC(ec, val) do { \ rb_jit_func_t func; \ if (val == Qundef && (func = jit_compile(ec))) { \ val = func(ec, ec->cfp); \ + RESTORE_REGS(); /* fix cfp for tailcall */ \ if (ec->tag->state) THROW_EXCEPTION(val); \ } \ } while (0) diff --git a/vm_insnhelper.c b/vm_insnhelper.c index d33fdb8fa79acc..c676399c5979be 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -5528,6 +5528,42 @@ vm_sendish( return val; } +VALUE +rb_vm_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq) +{ + VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false); + VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method); + VM_EXEC(ec, val); + return val; +} + +VALUE +rb_vm_opt_send_without_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd) +{ + VALUE bh = VM_BLOCK_HANDLER_NONE; + VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_method); + VM_EXEC(ec, val); + return val; +} + +VALUE +rb_vm_invokesuper(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd, ISEQ blockiseq) +{ + VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true); + VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_super); + VM_EXEC(ec, val); + return val; +} + +VALUE +rb_vm_invokeblock(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, CALL_DATA cd) +{ + VALUE bh = VM_BLOCK_HANDLER_NONE; + VALUE val = vm_sendish(ec, GET_CFP(), cd, bh, mexp_search_invokeblock); + VM_EXEC(ec, val); + return val; +} + /* object.c */ VALUE rb_nil_to_s(VALUE); VALUE rb_true_to_s(VALUE); diff --git a/yjit.c b/yjit.c index 4db46d59f37412..50dcecae96255c 100644 --- a/yjit.c +++ b/yjit.c @@ -1122,6 +1122,20 @@ rb_yjit_assert_holding_vm_lock(void) ASSERT_vm_locking(); } +// The number of stack slots that vm_sendish() pops for send and invokesuper. +size_t +rb_yjit_sendish_sp_pops(const struct rb_callinfo *ci) +{ + return 1 - sp_inc_of_sendish(ci); // + 1 to ignore return value push +} + +// The number of stack slots that vm_sendish() pops for invokeblock. +size_t +rb_yjit_invokeblock_sp_pops(const struct rb_callinfo *ci) +{ + return 1 - sp_inc_of_invokeblock(ci); // + 1 to ignore return value push +} + // Primitives used by yjit.rb VALUE rb_yjit_stats_enabled_p(rb_execution_context_t *ec, VALUE self); VALUE rb_yjit_trace_exit_locations_enabled_p(rb_execution_context_t *ec, VALUE self); diff --git a/yjit/bindgen/src/main.rs b/yjit/bindgen/src/main.rs index d00816b3d50c8e..5bda8b471bce37 100644 --- a/yjit/bindgen/src/main.rs +++ b/yjit/bindgen/src/main.rs @@ -325,6 +325,8 @@ fn main() { .allowlist_function("rb_yjit_icache_invalidate") .allowlist_function("rb_optimized_call") .allowlist_function("rb_yjit_assert_holding_vm_lock") + .allowlist_function("rb_yjit_sendish_sp_pops") + .allowlist_function("rb_yjit_invokeblock_sp_pops") // from vm_sync.h .allowlist_function("rb_vm_barrier") diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs index ab436d50229710..e6d65ef4235dcd 100644 --- a/yjit/src/codegen.rs +++ b/yjit/src/codegen.rs @@ -6428,6 +6428,38 @@ fn gen_struct_aset( Some(EndBlock) } +// Generate code that calls a method with dynamic dispatch +fn gen_send_dynamic Opnd>( + jit: &mut JITState, + asm: &mut Assembler, + cd: *const rb_call_data, + sp_pops: usize, + vm_sendish: F, +) -> Option { + // Our frame handling is not compatible with tailcall + if unsafe { vm_ci_flag((*cd).ci) } & VM_CALL_TAILCALL != 0 { + return None; + } + + // Save PC and SP to prepare for dynamic dispatch + jit_prepare_routine_call(jit, asm); + + // Pop arguments and a receiver + asm.stack_pop(sp_pops); + + // Dispatch a method + let ret = vm_sendish(asm); + + // Push the return value + let stack_ret = asm.stack_push(Type::Unknown); + asm.mov(stack_ret, ret); + + // Fix the interpreter SP deviated by vm_sendish + asm.mov(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SP), SP); + + Some(KeepCompiling) +} + fn gen_send_general( jit: &mut JITState, asm: &mut Assembler, @@ -6909,9 +6941,22 @@ fn gen_opt_send_without_block( asm: &mut Assembler, ocb: &mut OutlinedCb, ) -> Option { + // Generate specialized code if possible let cd = jit.get_arg(0).as_ptr(); + if let Some(status) = gen_send_general(jit, asm, ocb, cd, None) { + return Some(status); + } - gen_send_general(jit, asm, ocb, cd, None) + // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send + gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| { + extern "C" { + fn rb_vm_opt_send_without_block(ec: EcPtr, cfp: CfpPtr, cd: VALUE) -> VALUE; + } + asm.ccall( + rb_vm_opt_send_without_block as *const u8, + vec![EC, CFP, (cd as usize).into()], + ) + }) } fn gen_send( @@ -6919,15 +6964,54 @@ fn gen_send( asm: &mut Assembler, ocb: &mut OutlinedCb, ) -> Option { + // Generate specialized code if possible let cd = jit.get_arg(0).as_ptr(); let block = jit.get_arg(1).as_optional_ptr(); - return gen_send_general(jit, asm, ocb, cd, block); + if let Some(status) = gen_send_general(jit, asm, ocb, cd, block) { + return Some(status); + } + + // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send + let blockiseq = jit.get_arg(1).as_iseq(); + gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| { + extern "C" { + fn rb_vm_send(ec: EcPtr, cfp: CfpPtr, cd: VALUE, blockiseq: IseqPtr) -> VALUE; + } + asm.ccall( + rb_vm_send as *const u8, + vec![EC, CFP, (cd as usize).into(), VALUE(blockiseq as usize).into()], + ) + }) } fn gen_invokeblock( jit: &mut JITState, asm: &mut Assembler, ocb: &mut OutlinedCb, +) -> Option { + // Generate specialized code if possible + let cd = jit.get_arg(0).as_ptr(); + if let Some(status) = gen_invokeblock_specialized(jit, asm, ocb, cd) { + return Some(status); + } + + // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send + gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_invokeblock_sp_pops((*cd).ci) }, |asm| { + extern "C" { + fn rb_vm_invokeblock(ec: EcPtr, cfp: CfpPtr, cd: VALUE) -> VALUE; + } + asm.ccall( + rb_vm_invokeblock as *const u8, + vec![EC, CFP, (cd as usize).into()], + ) + }) +} + +fn gen_invokeblock_specialized( + jit: &mut JITState, + asm: &mut Assembler, + ocb: &mut OutlinedCb, + cd: *const rb_call_data, ) -> Option { if !jit.at_current_insn() { defer_compilation(jit, asm, ocb); @@ -6935,7 +7019,6 @@ fn gen_invokeblock( } // Get call info - let cd = jit.get_arg(0).as_ptr(); let ci = unsafe { get_call_data_ci(cd) }; let argc: i32 = unsafe { vm_ci_argc(ci) }.try_into().unwrap(); let flags = unsafe { vm_ci_flag(ci) }; @@ -7065,7 +7148,31 @@ fn gen_invokesuper( asm: &mut Assembler, ocb: &mut OutlinedCb, ) -> Option { - let cd: *const rb_call_data = jit.get_arg(0).as_ptr(); + // Generate specialized code if possible + let cd = jit.get_arg(0).as_ptr(); + if let Some(status) = gen_invokesuper_specialized(jit, asm, ocb, cd) { + return Some(status); + } + + // Otherwise, fallback to dynamic dispatch using the interpreter's implementation of send + let blockiseq = jit.get_arg(1).as_iseq(); + gen_send_dynamic(jit, asm, cd, unsafe { rb_yjit_sendish_sp_pops((*cd).ci) }, |asm| { + extern "C" { + fn rb_vm_invokesuper(ec: EcPtr, cfp: CfpPtr, cd: VALUE, blockiseq: IseqPtr) -> VALUE; + } + asm.ccall( + rb_vm_invokesuper as *const u8, + vec![EC, CFP, (cd as usize).into(), VALUE(blockiseq as usize).into()], + ) + }) +} + +fn gen_invokesuper_specialized( + jit: &mut JITState, + asm: &mut Assembler, + ocb: &mut OutlinedCb, + cd: *const rb_call_data, +) -> Option { let block: Option = jit.get_arg(1).as_optional_ptr(); // Defer compilation so we can specialize on class of receiver diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs index ed1384571faab1..506120f3f098f7 100644 --- a/yjit/src/cruby_bindings.inc.rs +++ b/yjit/src/cruby_bindings.inc.rs @@ -1336,4 +1336,6 @@ extern "C" { line: ::std::os::raw::c_int, ); pub fn rb_yjit_assert_holding_vm_lock(); + pub fn rb_yjit_sendish_sp_pops(ci: *const rb_callinfo) -> usize; + pub fn rb_yjit_invokeblock_sp_pops(ci: *const rb_callinfo) -> usize; }