From 1dd8202fb02cb3079d015c5d15c0433e455d63ce Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Mon, 17 Jan 2022 15:50:49 +0800 Subject: [PATCH 1/5] deps: V8: cherry-pick d8dc66f92169 Original commit message: [riscv64][sparkplug] Fix sparkplug verify framesize failed Change-Id: I7481749ba3d5c41d7405b0d88a51defbc8bec9d6 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3093009 Auto-Submit: Yahan Lu Reviewed-by: Ji Qiu Commit-Queue: Yahan Lu Cr-Commit-Position: refs/heads/master@{#76277} Refs: v8/v8@d8dc66f9 --- common.gypi | 2 +- .../riscv64/baseline-assembler-riscv64-inl.h | 260 +++++------------- .../riscv64/baseline-compiler-riscv64-inl.h | 64 +---- .../src/builtins/riscv64/builtins-riscv64.cc | 68 +++-- .../codegen/riscv64/macro-assembler-riscv64.h | 2 +- 5 files changed, 120 insertions(+), 276 deletions(-) diff --git a/common.gypi b/common.gypi index b8d61f0d32709f..89fac788d3d293 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.14', + 'v8_embedder_string': '-node.15', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h index 01f5a5802bb698..f39f5786469c3b 100644 --- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h +++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h @@ -109,30 +109,19 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, } void BaselineAssembler::CallBuiltin(Builtin builtin) { - if (masm()->options().short_builtin_calls) { - __ CallBuiltin(builtin); - } else { - ASM_CODE_COMMENT_STRING(masm_, - __ CommentForOffHeapTrampoline("call", builtin)); - Register temp = t6; - __ LoadEntryFromBuiltin(builtin, temp); - __ Call(temp); - } + ASM_CODE_COMMENT_STRING(masm_, + __ CommentForOffHeapTrampoline("call", builtin)); + Register temp = t6; + __ LoadEntryFromBuiltin(builtin, temp); + __ Call(temp); } void BaselineAssembler::TailCallBuiltin(Builtin builtin) { - if (masm()->options().short_builtin_calls) { - // Generate pc-relative jump. - __ TailCallBuiltin(builtin); - } else { - ASM_CODE_COMMENT_STRING( - masm_, __ CommentForOffHeapTrampoline("tail call", builtin)); - // t6 be used for function call in RISCV64 - // For example 'jalr t6' or 'jal t6' - Register temp = t6; - __ LoadEntryFromBuiltin(builtin, temp); - __ Jump(temp); - } + ASM_CODE_COMMENT_STRING(masm_, + __ CommentForOffHeapTrampoline("tail call", builtin)); + Register temp = t6; + __ LoadEntryFromBuiltin(builtin, temp); + __ Jump(temp); } void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, @@ -140,7 +129,7 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, ScratchRegisterScope temps(this); Register tmp = temps.AcquireScratch(); __ And(tmp, value, Operand(mask)); - __ Branch(target, AsMasmCondition(cc), tmp, Operand(mask)); + __ Branch(target, AsMasmCondition(cc), tmp, Operand(zero_reg)); } void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, @@ -161,6 +150,11 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, Label* target, Label::Distance) { ScratchRegisterScope temps(this); Register type = temps.AcquireScratch(); + if (FLAG_debug_code) { + __ AssertNotSmi(map); + __ GetObjectType(map, type, type); + __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE)); + } __ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset)); __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type)); } @@ -182,44 +176,28 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, } void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, Label* target, Label::Distance) { - ScratchRegisterScope temps(this); - Register temp = temps.AcquireScratch(); + // todo: compress pointer __ AssertSmi(lhs); __ AssertSmi(rhs); - if (COMPRESS_POINTERS_BOOL) { - __ Sub32(temp, lhs, rhs); - } else { - __ Sub64(temp, lhs, rhs); - } - __ Branch(target, AsMasmCondition(cc), temp, Operand(zero_reg)); + __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs)); } void BaselineAssembler::JumpIfTagged(Condition cc, Register value, MemOperand operand, Label* target, Label::Distance) { + // todo: compress pointer ScratchRegisterScope temps(this); - Register tmp1 = temps.AcquireScratch(); - Register tmp2 = temps.AcquireScratch(); - __ Ld(tmp1, operand); - if (COMPRESS_POINTERS_BOOL) { - __ Sub32(tmp2, value, tmp1); - } else { - __ Sub64(tmp2, value, tmp1); - } - __ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg)); + Register scratch = temps.AcquireScratch(); + __ Ld(scratch, operand); + __ Branch(target, AsMasmCondition(cc), value, Operand(scratch)); } void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, Register value, Label* target, Label::Distance) { + // todo: compress pointer ScratchRegisterScope temps(this); - Register tmp1 = temps.AcquireScratch(); - Register tmp2 = temps.AcquireScratch(); - __ Ld(tmp1, operand); - if (COMPRESS_POINTERS_BOOL) { - __ Sub32(tmp2, tmp1, value); - } else { - __ Sub64(tmp2, tmp1, value); - } - __ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg)); + Register scratch = temps.AcquireScratch(); + __ Ld(scratch, operand); + __ Branch(target, AsMasmCondition(cc), scratch, Operand(value)); } void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, Label* target, Label::Distance) { @@ -267,137 +245,51 @@ inline Register ToRegister(BaselineAssembler* basm, return reg; } -template -struct CountPushHelper; -template <> -struct CountPushHelper<> { - static int Count() { return 0; } -}; -template -struct CountPushHelper { - static int Count(Arg arg, Args... args) { - return 1 + CountPushHelper::Count(args...); - } -}; -template -struct CountPushHelper { - static int Count(interpreter::RegisterList list, Args... args) { - return list.register_count() + CountPushHelper::Count(args...); - } -}; - template struct PushAllHelper; -template -void PushAll(BaselineAssembler* basm, Args... args) { - PushAllHelper::Push(basm, args...); -} -template -void PushAllReverse(BaselineAssembler* basm, Args... args) { - PushAllHelper::PushReverse(basm, args...); -} - template <> struct PushAllHelper<> { - static void Push(BaselineAssembler* basm) {} - static void PushReverse(BaselineAssembler* basm) {} + static int Push(BaselineAssembler* basm) { return 0; } + static int PushReverse(BaselineAssembler* basm) { return 0; } }; - -inline void PushSingle(MacroAssembler* masm, RootIndex source) { - masm->PushRoot(source); -} -inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); } - -inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); } -inline void PushSingle(MacroAssembler* masm, Handle object) { - masm->Push(object); -} -inline void PushSingle(MacroAssembler* masm, int32_t immediate) { - masm->li(kScratchReg, (int64_t)(immediate)); - PushSingle(masm, kScratchReg); -} - -inline void PushSingle(MacroAssembler* masm, TaggedIndex value) { - masm->li(kScratchReg, static_cast(value.ptr())); - PushSingle(masm, kScratchReg); -} -inline void PushSingle(MacroAssembler* masm, MemOperand operand) { - masm->Ld(kScratchReg, operand); - PushSingle(masm, kScratchReg); -} -inline void PushSingle(MacroAssembler* masm, interpreter::Register source) { - return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source)); -} - template struct PushAllHelper { - static void Push(BaselineAssembler* basm, Arg arg) { - PushSingle(basm->masm(), arg); + static int Push(BaselineAssembler* basm, Arg arg) { + BaselineAssembler::ScratchRegisterScope scope(basm); + basm->masm()->Push(ToRegister(basm, &scope, arg)); + return 1; } - static void PushReverse(BaselineAssembler* basm, Arg arg) { - // Push the padding register to round up the amount of values pushed. + static int PushReverse(BaselineAssembler* basm, Arg arg) { return Push(basm, arg); } }; -template -struct PushAllHelper { - static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2, - Args... args) { - { - BaselineAssembler::ScratchRegisterScope scope(basm); - basm->masm()->Push(ToRegister(basm, &scope, arg1), - ToRegister(basm, &scope, arg2)); - } - PushAll(basm, args...); - } - static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2, - Args... args) { - PushAllReverse(basm, args...); - { - BaselineAssembler::ScratchRegisterScope scope(basm); - basm->masm()->Push(ToRegister(basm, &scope, arg2), - ToRegister(basm, &scope, arg1)); - } - } -}; -// Currently RegisterLists are always be the last argument, so we don't -// specialize for the case where they're not. We do still specialise for the -// aligned and unaligned cases. -template -struct PushAllHelper { - static void Push(BaselineAssembler* basm, Arg arg, - interpreter::RegisterList list) { - DCHECK_EQ(list.register_count() % 2, 1); - PushAll(basm, arg, list[0], list.PopLeft()); +template +struct PushAllHelper { + static int Push(BaselineAssembler* basm, Arg arg, Args... args) { + PushAllHelper::Push(basm, arg); + return 1 + PushAllHelper::Push(basm, args...); } - static void PushReverse(BaselineAssembler* basm, Arg arg, - interpreter::RegisterList list) { - if (list.register_count() == 0) { - PushAllReverse(basm, arg); - } else { - PushAllReverse(basm, arg, list[0], list.PopLeft()); - } + static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) { + int nargs = PushAllHelper::PushReverse(basm, args...); + PushAllHelper::Push(basm, arg); + return nargs + 1; } }; template <> struct PushAllHelper { - static void Push(BaselineAssembler* basm, interpreter::RegisterList list) { - DCHECK_EQ(list.register_count() % 2, 0); - for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) { - PushAll(basm, list[reg_index], list[reg_index + 1]); + static int Push(BaselineAssembler* basm, interpreter::RegisterList list) { + for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) { + PushAllHelper::Push(basm, list[reg_index]); } + return list.register_count(); } - static void PushReverse(BaselineAssembler* basm, - interpreter::RegisterList list) { - int reg_index = list.register_count() - 1; - if (reg_index % 2 == 0) { - // Push the padding register to round up the amount of values pushed. - PushAllReverse(basm, list[reg_index]); - reg_index--; - } - for (; reg_index >= 1; reg_index -= 2) { - PushAllReverse(basm, list[reg_index - 1], list[reg_index]); + static int PushReverse(BaselineAssembler* basm, + interpreter::RegisterList list) { + for (int reg_index = list.register_count() - 1; reg_index >= 0; + --reg_index) { + PushAllHelper::Push(basm, list[reg_index]); } + return list.register_count(); } }; @@ -414,10 +306,9 @@ struct PopAllHelper { } }; template -struct PopAllHelper { - static void Pop(BaselineAssembler* basm, Register reg1, Register reg2, - T... tail) { - basm->masm()->Pop(reg1, reg2); +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg, T... tail) { + PopAllHelper::Pop(basm, reg); PopAllHelper::Pop(basm, tail...); } }; @@ -426,20 +317,12 @@ struct PopAllHelper { template int BaselineAssembler::Push(T... vals) { - // We have to count the pushes first, to decide whether to add padding before - // the first push. - int push_count = detail::CountPushHelper::Count(vals...); - if (push_count % 2 == 0) { - detail::PushAll(this, vals...); - } else { - detail::PushAll(this, vals...); - } - return push_count; + return detail::PushAllHelper::Push(this, vals...); } template void BaselineAssembler::PushReverse(T... vals) { - detail::PushAllReverse(this, vals...); + detail::PushAllHelper::PushReverse(this, vals...); } template @@ -461,7 +344,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, } void BaselineAssembler::LoadByteField(Register output, Register source, int offset) { - __ Ld(output, FieldMemOperand(source, offset)); + __ Lb(output, FieldMemOperand(source, offset)); } void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, Smi value) { @@ -495,11 +378,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); - __ Ld(interrupt_budget, + __ Lw(interrupt_budget, FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); // Remember to set flags as part of the add! - __ Add64(interrupt_budget, interrupt_budget, weight); - __ Sd(interrupt_budget, + __ Add32(interrupt_budget, interrupt_budget, weight); + __ Sw(interrupt_budget, FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); if (skip_interrupt_label) { DCHECK_LT(weight, 0); @@ -517,11 +400,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); - __ Ld(interrupt_budget, + __ Lw(interrupt_budget, FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); // Remember to set flags as part of the add! - __ Add64(interrupt_budget, interrupt_budget, weight); - __ Sd(interrupt_budget, + __ Add32(interrupt_budget, interrupt_budget, weight); + __ Sw(interrupt_budget, FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); if (skip_interrupt_label) __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(weight)); @@ -556,20 +439,19 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12); int32_t Lo12 = (int32_t)imm64 << 20 >> 20; __ auipc(temp, Hi20); // Read PC + Hi20 into t6 - __ lui(temp, Lo12); // jump PC + Hi20 + Lo12 + __ addi(temp, temp, Lo12); // jump PC + Hi20 + Lo12 - int entry_size_log2 = 2; - Register temp2 = scope.AcquireScratch(); - __ CalcScaledAddress(temp2, temp, reg, entry_size_log2); + int entry_size_log2 = 3; + __ CalcScaledAddress(temp, temp, reg, entry_size_log2); __ Jump(temp); { TurboAssembler::BlockTrampolinePoolScope(masm()); - __ BlockTrampolinePoolFor(num_labels * kInstrSize); + __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2); __ bind(&table); for (int i = 0; i < num_labels; ++i) { - __ Branch(labels[i]); + __ BranchLong(labels[i]); } - DCHECK_EQ(num_labels * kInstrSize, __ InstructionsGeneratedSince(&table)); + DCHECK_EQ(num_labels * 2, __ InstructionsGeneratedSince(&table)); __ bind(&fallthrough); } } @@ -598,7 +480,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->Push(kJSFunctionRegister); __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); - __ masm()->Pop(kInterpreterAccumulatorRegister, params_size); + __ masm()->Pop(params_size, kInterpreterAccumulatorRegister); __ masm()->SmiUntag(params_size); __ Bind(&skip_interrupt_label); diff --git a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h index fc73105b8e9ad6..1fbdaa0761e1ea 100644 --- a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h +++ b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h @@ -37,69 +37,35 @@ void BaselineCompiler::PrologueFillFrame() { const int kLoopUnrollSize = 8; const int new_target_index = new_target_or_generator_register.index(); const bool has_new_target = new_target_index != kMaxInt; - // BaselineOutOfLinePrologue already pushed one undefined. - register_count -= 1; if (has_new_target) { - if (new_target_index == 0) { - // Oops, need to fix up that undefined that BaselineOutOfLinePrologue - // pushed. - __ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp)); - } else { - DCHECK_LE(new_target_index, register_count); - int index = 1; - for (; index + 2 <= new_target_index; index += 2) { - __ masm()->Push(kInterpreterAccumulatorRegister, - kInterpreterAccumulatorRegister); - } - if (index == new_target_index) { - __ masm()->Push(kJavaScriptCallNewTargetRegister, - kInterpreterAccumulatorRegister); - } else { - DCHECK_EQ(index, new_target_index - 1); - __ masm()->Push(kInterpreterAccumulatorRegister, - kJavaScriptCallNewTargetRegister); - } - // We pushed "index" registers, minus the one the prologue pushed, plus - // the two registers that included new_target. - register_count -= (index - 1 + 2); + DCHECK_LE(new_target_index, register_count); + __ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index))); + for (int i = 0; i < new_target_index; i++) { + __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8)); } + // Push new_target_or_generator. + __ Push(kJavaScriptCallNewTargetRegister); + register_count -= new_target_index + 1; } if (register_count < 2 * kLoopUnrollSize) { // If the frame is small enough, just unroll the frame fill completely. - for (int i = 0; i < register_count; i += 2) { - __ masm()->Push(kInterpreterAccumulatorRegister, - kInterpreterAccumulatorRegister); + __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count))); + for (int i = 0; i < register_count; ++i) { + __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8)); } } else { - BaselineAssembler::ScratchRegisterScope temps(&basm_); - Register scratch = temps.AcquireScratch(); - - // Extract the first few registers to round to the unroll size. - int first_registers = register_count % kLoopUnrollSize; - for (int i = 0; i < first_registers; i += 2) { - __ masm()->Push(kInterpreterAccumulatorRegister, - kInterpreterAccumulatorRegister); - } - __ Move(scratch, register_count / kLoopUnrollSize); - // We enter the loop unconditionally, so make sure we need to loop at least - // once. - DCHECK_GT(register_count / kLoopUnrollSize, 0); - Label loop; - __ Bind(&loop); - for (int i = 0; i < kLoopUnrollSize; i += 2) { - __ masm()->Push(kInterpreterAccumulatorRegister, - kInterpreterAccumulatorRegister); + __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count))); + for (int i = 0; i < register_count; ++i) { + __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8)); } - __ masm()->Branch(&loop, gt, scratch, Operand(1)); } } void BaselineCompiler::VerifyFrameSize() { ASM_CODE_COMMENT(&masm_); __ masm()->Add64(kScratchReg, sp, - RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp + - bytecode_->frame_size(), - 2 * kSystemPointerSize)); + Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp + + bytecode_->frame_size())); __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg, Operand(fp)); } diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc index f79e392f4800fc..38136ed53a154a 100644 --- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -1160,9 +1160,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // store the bytecode offset. if (FLAG_debug_code) { UseScratchRegisterScope temps(masm); - Register type = temps.Acquire(); - __ GetObjectType(feedback_vector, type, type); - __ Assert(eq, AbortReason::kExpectedFeedbackVector, type, + Register invocation_count = temps.Acquire(); + __ GetObjectType(feedback_vector, invocation_count, invocation_count); + __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count, Operand(FEEDBACK_VECTOR_TYPE)); } // Our stack is currently aligned. We have have to push something along with @@ -1171,8 +1171,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves // `undefined` in the accumulator register, to skip the load in the baseline // code. - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); - __ Push(feedback_vector, kInterpreterAccumulatorRegister); + __ Push(feedback_vector); } Label call_stack_guard; @@ -1203,7 +1202,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { { ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); // Drop the frame created by the baseline call. - __ Pop(fp, ra); + __ Pop(ra, fp); MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, feedback_vector); __ Trap(); @@ -1212,14 +1211,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { __ bind(&call_stack_guard); { ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call"); - Register new_target = descriptor.GetRegisterParameter( - BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget); - FrameScope frame_scope(masm, StackFrame::INTERNAL); // Save incoming new target or generator - __ Push(zero_reg, new_target); - __ CallRuntime(Runtime::kStackGuard); - __ Pop(new_target, zero_reg); + __ Push(kJavaScriptCallNewTargetRegister); + __ SmiTag(frame_size); + __ Push(frame_size); + __ CallRuntime(Runtime::kStackGuardWithGap); + __ Pop(kJavaScriptCallNewTargetRegister); } __ Ret(); temps.Exclude(kScratchReg.bit() | kScratchReg2.bit()); @@ -1466,31 +1464,25 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(&is_baseline); { // Load the feedback vector from the closure. - __ Ld(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ Ld(feedback_vector, - FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedPointerField( + feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedPointerField( + feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to // allocate it. - __ Ld(scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); - __ Lh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ LoadTaggedPointerField( + scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); __ Branch(&install_baseline_code, ne, scratch, Operand(FEEDBACK_VECTOR_TYPE)); - // Read off the optimization state in the feedback vector. - // TODO(v8:11429): Is this worth doing here? Baseline code will check it - // anyway... - __ Ld(optimization_state, - FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); - - // Check if there is optimized code or a optimization marker that needes to - // be processed. - __ And( - scratch, optimization_state, - Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask)); - __ Branch(&has_optimized_code_or_marker, ne, scratch, Operand(zero_reg)); + // Check for an optimization marker. + LoadOptimizationStateAndJumpIfNeedsProcessing( + masm, optimization_state, feedback_vector, + &has_optimized_code_or_marker); // Load the baseline code into the closure. __ LoadTaggedPointerField( @@ -2713,6 +2705,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { RelocInfo::CODE_TARGET); } +#if V8_ENABLE_WEBASSEMBLY void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was put in t0 by the jump table trampoline. // Convert to Smi for the runtime call @@ -2786,6 +2779,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { } __ Ret(); } +#endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, SaveFPRegsMode save_doubles, ArgvMode argv_mode, @@ -3640,7 +3634,6 @@ namespace { void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, bool next_bytecode, bool is_osr = false) { - __ Push(zero_reg, kInterpreterAccumulatorRegister); Label start; __ bind(&start); @@ -3667,7 +3660,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE)); // Start with bytecode as there is no baseline code. - __ Pop(zero_reg, kInterpreterAccumulatorRegister); Builtin builtin_id = next_bytecode ? Builtin::kInterpreterEnterAtNextBytecode : Builtin::kInterpreterEnterAtBytecode; @@ -3701,7 +3693,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, UseScratchRegisterScope temps(masm); Register type = temps.Acquire(); __ GetObjectType(feedback_vector, type, type); - __ Branch(&install_baseline_code, eq, type, Operand(FEEDBACK_VECTOR_TYPE)); + __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE)); // Save BytecodeOffset from the stack frame. __ SmiUntag(kInterpreterBytecodeOffsetRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); @@ -3711,7 +3703,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, feedback_vector = no_reg; // Compute baseline pc for bytecode offset. - __ Push(zero_reg, kInterpreterAccumulatorRegister); ExternalReference get_baseline_pc_extref; if (next_bytecode || is_osr) { get_baseline_pc_extref = @@ -3744,6 +3735,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Get bytecode array from the stack frame. __ Ld(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ Push(kInterpreterAccumulatorRegister); { Register arg_reg_1 = a0; Register arg_reg_2 = a1; @@ -3755,13 +3747,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ CallCFunction(get_baseline_pc, 3, 0); } __ Add64(code_obj, code_obj, kReturnRegister0); - __ Pop(kInterpreterAccumulatorRegister, zero_reg); + __ Pop(kInterpreterAccumulatorRegister); if (is_osr) { // Reset the OSR loop nesting depth to disarm back edges. // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // Sparkplug here. - __ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, + __ Ld(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, BytecodeArray::kOsrLoopNestingLevelOffset)); Generate_OSREntry(masm, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); @@ -3786,8 +3780,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ bind(&install_baseline_code); { FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(kInterpreterAccumulatorRegister); __ Push(closure); __ CallRuntime(Runtime::kInstallBaselineCode, 1); + __ Pop(kInterpreterAccumulatorRegister); } // Retry from the start after installing baseline code. __ Branch(&start); diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h index 04285916bca162..75d99a34059b1b 100644 --- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h +++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h @@ -151,6 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Branch(Label* target); void Branch(int32_t target); + void BranchLong(Label* L); void Branch(Label* target, Condition cond, Register r1, const Operand& r2, Label::Distance near_jump = Label::kFar); void Branch(int32_t target, Condition cond, Register r1, const Operand& r2, @@ -945,7 +946,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { Register rs, const Operand& rt); bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt); - void BranchLong(Label* L); void BranchAndLinkLong(Label* L); template From 12920f97e66be5fcfd4ebb1e4ffea701fa8bca88 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Mon, 17 Jan 2022 15:51:18 +0800 Subject: [PATCH 2/5] deps: V8: cherry-pick 3cab84c24723 Original commit message: [riscv64] Use s1 to save code_obj The caller saved a4 may be clobbered by the callee function. So we substitute it with the callee saved s1 to save code_obj. Change-Id: Iebe707cbaa62d47fdee0aa117e32e88f67dac743 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3096886 Reviewed-by: Ji Qiu Commit-Queue: Ji Qiu Commit-Queue: Yahan Lu Auto-Submit: Yahan Lu Cr-Commit-Position: refs/heads/master@{#76294} Refs: v8/v8@3cab84c2472 --- common.gypi | 2 +- deps/v8/src/builtins/riscv64/builtins-riscv64.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common.gypi b/common.gypi index 89fac788d3d293..4e938b15d6cbfa 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.15', + 'v8_embedder_string': '-node.16', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc index 38136ed53a154a..764ef97952cf13 100644 --- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -3642,7 +3642,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); // Get the Code object from the shared function info. - Register code_obj = a4; + Register code_obj = s1; __ LoadTaggedPointerField( code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); From c203eeb2cbc01762885e36008e1841c39b391d5b Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Mon, 17 Jan 2022 15:51:37 +0800 Subject: [PATCH 3/5] deps: V8: cherry-pick 471f862954f3 Original commit message: [riscv64] Link should greater and equal zero Change-Id: Ieeb5888efc068707766aef6ba6fc842c5deaaf9c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3146673 Commit-Queue: Yahan Lu Auto-Submit: Yahan Lu Reviewed-by: Ji Qiu Cr-Commit-Position: refs/heads/main@{#76784} Refs: v8/v8@471f862954f3 --- common.gypi | 2 +- deps/v8/src/codegen/riscv64/assembler-riscv64.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common.gypi b/common.gypi index 4e938b15d6cbfa..88033804f1f291 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.16', + 'v8_embedder_string': '-node.17', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc index 0c322542a9440b..3534559fd5ff97 100644 --- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc +++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc @@ -699,7 +699,7 @@ void Assembler::next(Label* L, bool is_internal) { if (link == kEndOfChain) { L->Unuse(); } else { - DCHECK_GT(link, 0); + DCHECK_GE(link, 0); DEBUG_PRINTF("next: %p to %p (%d)\n", L, reinterpret_cast(buffer_start_ + link), link); L->link_to(link); From 81e50d8a46f93b7d459d049b554653ff8d6a3758 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Mon, 17 Jan 2022 15:54:35 +0800 Subject: [PATCH 4/5] deps: V8: cherry-pick e74d6918fb9f Original commit message: [sparkplug] Simplify arch-guards to ENABLE_SPARKPLUG There is still a place to simplify. Bug: v8:11420, v8:11421 Change-Id: I774139c52d911323f162350532a493e70f518643 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3096885 Auto-Submit: Yahan Lu Commit-Queue: Leszek Swirski Reviewed-by: Leszek Swirski Cr-Commit-Position: refs/heads/master@{#76296} Refs: v8/v8@e74d6918fb9 --- common.gypi | 2 +- deps/v8/src/baseline/baseline-batch-compiler.cc | 5 ++--- deps/v8/src/builtins/builtins-internal-gen.cc | 4 +--- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/common.gypi b/common.gypi index 88033804f1f291..22e35703521a3e 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.17', + 'v8_embedder_string': '-node.18', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc index 6a25df72648914..c9055db7329f53 100644 --- a/deps/v8/src/baseline/baseline-batch-compiler.cc +++ b/deps/v8/src/baseline/baseline-batch-compiler.cc @@ -6,9 +6,8 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ - V8_TARGET_ARCH_MIPS +#include "src/flags/flags.h" +#if ENABLE_SPARKPLUG #include "src/baseline/baseline-compiler.h" #include "src/codegen/compiler.h" diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index 49ad4b4e7c6898..6f9bfa342622e7 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -1053,9 +1053,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) { // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. -#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ - V8_TARGET_ARCH_MIPS +#if ENABLE_SPARKPLUG void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) { EmitReturnBaseline(masm); } From f18bb63439fdff1c4a517276277ec16e7026ef99 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Mon, 17 Jan 2022 15:57:41 +0800 Subject: [PATCH 5/5] deps: V8: backport 77599ffe0a74 Original commit message: [riscv64] Add block before LoadAddress fix node.js DCHECK failed issue: https://github.com/riscv-collab/v8/issues/514 Change-Id: I07f40e6aca05be3eb7304a43235185fd40ebc1f2 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3260979 Reviewed-by: ji qiu Commit-Queue: ji qiu Auto-Submit: Yahan Lu Cr-Commit-Position: refs/heads/main@{#77750} Refs: v8/v8@77599ffe0a74 --- common.gypi | 2 +- deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h | 1 + deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/common.gypi b/common.gypi index 22e35703521a3e..36f789821ed1ff 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.18', + 'v8_embedder_string': '-node.19', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h index f39f5786469c3b..83afd560b002d0 100644 --- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h +++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h @@ -438,6 +438,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, DCHECK(is_int32(imm64)); int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12); int32_t Lo12 = (int32_t)imm64 << 20 >> 20; + __ BlockTrampolinePoolFor(2); __ auipc(temp, Hi20); // Read PC + Hi20 into t6 __ addi(temp, temp, Lo12); // jump PC + Hi20 + Lo12 diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc index 3baa71d1a2e768..4d231adfb4823e 100644 --- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc +++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc @@ -3485,6 +3485,7 @@ void TurboAssembler::LoadAddress(Register dst, Label* target, if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) { int32_t Hi20 = (((int32_t)offset + 0x800) >> 12); int32_t Lo12 = (int32_t)offset << 20 >> 20; + BlockTrampolinePoolScope block_trampoline_pool(this); auipc(dst, Hi20); addi(dst, dst, Lo12); } else {