diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc index c37231a707ff..6c0379f7dbf1 100644 --- a/src/compiler/arm64/code-generator-arm64.cc +++ b/src/compiler/arm64/code-generator-arm64.cc @@ -395,67 +395,52 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, } \ } while (0) -#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ +#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, reg) \ do { \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ asm_instr(i.OutputRegister32(), i.TempRegister(0)); \ + __ asm_instr(i.Output##reg(), i.TempRegister(0)); \ } while (0) -#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ +#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, reg) \ do { \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ asm_instr(i.InputRegister32(2), i.TempRegister(0)); \ + __ asm_instr(i.Input##reg(2), i.TempRegister(0)); \ } while (0) -#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \ - do { \ - Label exchange; \ - __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ Bind(&exchange); \ - __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \ - __ store_instr(i.TempRegister32(1), i.InputRegister32(2), \ - i.TempRegister(0)); \ - __ Cbnz(i.TempRegister32(1), &exchange); \ +#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr, reg) \ + do { \ + Label exchange; \ + __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ + __ Bind(&exchange); \ + __ load_instr(i.Output##reg(), i.TempRegister(0)); \ + __ store_instr(i.TempRegister32(1), i.Input##reg(2), i.TempRegister(0)); \ + __ Cbnz(i.TempRegister32(1), &exchange); \ } while (0) -#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, ext) \ +#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, ext, \ + reg) \ do { \ Label compareExchange; \ Label exit; \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ Bind(&compareExchange); \ - __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \ - __ Cmp(i.OutputRegister32(), Operand(i.InputRegister32(2), ext)); \ + __ load_instr(i.Output##reg(), i.TempRegister(0)); \ + __ Cmp(i.Output##reg(), Operand(i.Input##reg(2), ext)); \ __ B(ne, &exit); \ - __ store_instr(i.TempRegister32(1), i.InputRegister32(3), \ - i.TempRegister(0)); \ + __ store_instr(i.TempRegister32(1), i.Input##reg(3), i.TempRegister(0)); \ __ Cbnz(i.TempRegister32(1), &compareExchange); \ __ Bind(&exit); \ } while (0) -#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \ - do { \ - Label binop; \ - __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ Bind(&binop); \ - __ load_instr(i.OutputRegister32(), i.TempRegister(0)); \ - __ bin_instr(i.TempRegister32(1), i.OutputRegister32(), \ - Operand(i.InputRegister32(2))); \ - __ store_instr(i.TempRegister32(2), i.TempRegister32(1), \ - i.TempRegister(0)); \ - __ Cbnz(i.TempRegister32(2), &binop); \ - } while (0) - -#define ASSEMBLE_ATOMIC64_BINOP(load_instr, store_instr, bin_instr) \ +#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr, reg) \ do { \ Label binop; \ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ __ Bind(&binop); \ - __ load_instr(i.OutputRegister(), i.TempRegister(0)); \ - __ bin_instr(i.TempRegister(1), i.OutputRegister(), \ - Operand(i.InputRegister(2))); \ - __ store_instr(i.TempRegister(2), i.TempRegister(1), i.TempRegister(0)); \ - __ Cbnz(i.TempRegister(2), &binop); \ + __ load_instr(i.Output##reg(), i.TempRegister(0)); \ + __ bin_instr(i.Temp##reg(1), i.Output##reg(), Operand(i.Input##reg(2))); \ + __ store_instr(i.TempRegister32(2), i.Temp##reg(1), i.TempRegister(0)); \ + __ Cbnz(i.TempRegister32(2), &binop); \ } while (0) #define ASSEMBLE_IEEE754_BINOP(name) \ @@ -1606,85 +1591,116 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Operand(kSpeculationPoisonRegister)); break; case kWord32AtomicLoadInt8: - ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb); + ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32); __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); break; case kWord32AtomicLoadUint8: - ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb); + case kArm64Word64AtomicLoadUint8: + ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32); break; case kWord32AtomicLoadInt16: - ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh); + ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32); __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); break; case kWord32AtomicLoadUint16: - ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh); + case kArm64Word64AtomicLoadUint16: + ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32); break; case kWord32AtomicLoadWord32: - ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar); + case kArm64Word64AtomicLoadUint32: + ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register32); + break; + case kArm64Word64AtomicLoadUint64: + ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register); break; case kWord32AtomicStoreWord8: - ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb); + case kArm64Word64AtomicStoreWord8: + ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb, Register32); break; case kWord32AtomicStoreWord16: - ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh); + case kArm64Word64AtomicStoreWord16: + ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh, Register32); break; case kWord32AtomicStoreWord32: - ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr); + case kArm64Word64AtomicStoreWord32: + ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register32); + break; + case kArm64Word64AtomicStoreWord64: + ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register); break; case kWord32AtomicExchangeInt8: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb); + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32); __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); break; case kWord32AtomicExchangeUint8: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb); + case kArm64Word64AtomicExchangeUint8: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32); break; case kWord32AtomicExchangeInt16: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh); + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32); __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); break; case kWord32AtomicExchangeUint16: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh); + case kArm64Word64AtomicExchangeUint16: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32); break; case kWord32AtomicExchangeWord32: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr); + case kArm64Word64AtomicExchangeUint32: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register32); + break; + case kArm64Word64AtomicExchangeUint64: + ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register); break; case kWord32AtomicCompareExchangeInt8: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB); + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB, + Register32); __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); break; case kWord32AtomicCompareExchangeUint8: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB); + case kArm64Word64AtomicCompareExchangeUint8: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB, + Register32); break; case kWord32AtomicCompareExchangeInt16: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH); + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH, + Register32); __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); break; case kWord32AtomicCompareExchangeUint16: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH); + case kArm64Word64AtomicCompareExchangeUint16: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH, + Register32); break; case kWord32AtomicCompareExchangeWord32: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW); - break; -#define ATOMIC_BINOP_CASE(op, inst) \ - case kWord32Atomic##op##Int8: \ - ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \ - __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \ - break; \ - case kWord32Atomic##op##Uint8: \ - case kArm64Word64Atomic##op##Uint8: \ - ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \ - break; \ - case kWord32Atomic##op##Int16: \ - ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \ - __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \ - break; \ - case kWord32Atomic##op##Uint16: \ - case kArm64Word64Atomic##op##Uint16: \ - ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \ - break; \ - case kWord32Atomic##op##Word32: \ - case kArm64Word64Atomic##op##Uint32: \ - ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst); \ + case kArm64Word64AtomicCompareExchangeUint32: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW, Register32); + break; + case kArm64Word64AtomicCompareExchangeUint64: + ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTX, Register); + break; +#define ATOMIC_BINOP_CASE(op, inst) \ + case kWord32Atomic##op##Int8: \ + ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \ + __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \ + break; \ + case kWord32Atomic##op##Uint8: \ + case kArm64Word64Atomic##op##Uint8: \ + ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \ + break; \ + case kWord32Atomic##op##Int16: \ + ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \ + __ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \ + break; \ + case kWord32Atomic##op##Uint16: \ + case kArm64Word64Atomic##op##Uint16: \ + ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \ + break; \ + case kWord32Atomic##op##Word32: \ + case kArm64Word64Atomic##op##Uint32: \ + ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register32); \ + break; \ + case kArm64Word64Atomic##op##Uint64: \ + ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register); \ break; ATOMIC_BINOP_CASE(Add, Add) ATOMIC_BINOP_CASE(Sub, Sub) @@ -1692,25 +1708,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ATOMIC_BINOP_CASE(Or, Orr) ATOMIC_BINOP_CASE(Xor, Eor) #undef ATOMIC_BINOP_CASE -#define ATOMIC64_BINOP_CASE(op, inst) \ - case kArm64Word64Atomic##op##Uint64: \ - ASSEMBLE_ATOMIC64_BINOP(ldaxr, stlxr, inst); \ - break; - ATOMIC64_BINOP_CASE(Add, Add) - ATOMIC64_BINOP_CASE(Sub, Sub) - ATOMIC64_BINOP_CASE(And, And) - ATOMIC64_BINOP_CASE(Or, Orr) - ATOMIC64_BINOP_CASE(Xor, Eor) -#undef ATOMIC64_BINOP_CASE #undef ASSEMBLE_SHIFT #undef ASSEMBLE_ATOMIC_LOAD_INTEGER #undef ASSEMBLE_ATOMIC_STORE_INTEGER #undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER +#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER #undef ASSEMBLE_ATOMIC_BINOP -#undef ASSEMBLE_ATOMIC64_BINOP #undef ASSEMBLE_IEEE754_BINOP #undef ASSEMBLE_IEEE754_UNOP -#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER #define SIMD_UNOP_CASE(Op, Instr, FORMAT) \ case Op: \ diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h index 5eb09dfe05ae..c5f4b19c1636 100644 --- a/src/compiler/arm64/instruction-codes-arm64.h +++ b/src/compiler/arm64/instruction-codes-arm64.h @@ -11,316 +11,332 @@ namespace compiler { // ARM64-specific opcodes that specify which assembly sequence to emit. // Most opcodes specify a single instruction. -#define TARGET_ARCH_OPCODE_LIST(V) \ - V(Arm64Add) \ - V(Arm64Add32) \ - V(Arm64And) \ - V(Arm64And32) \ - V(Arm64Bic) \ - V(Arm64Bic32) \ - V(Arm64Clz) \ - V(Arm64Clz32) \ - V(Arm64Cmp) \ - V(Arm64Cmp32) \ - V(Arm64Cmn) \ - V(Arm64Cmn32) \ - V(Arm64Tst) \ - V(Arm64Tst32) \ - V(Arm64Or) \ - V(Arm64Or32) \ - V(Arm64Orn) \ - V(Arm64Orn32) \ - V(Arm64Eor) \ - V(Arm64Eor32) \ - V(Arm64Eon) \ - V(Arm64Eon32) \ - V(Arm64Sub) \ - V(Arm64Sub32) \ - V(Arm64Mul) \ - V(Arm64Mul32) \ - V(Arm64Smull) \ - V(Arm64Umull) \ - V(Arm64Madd) \ - V(Arm64Madd32) \ - V(Arm64Msub) \ - V(Arm64Msub32) \ - V(Arm64Mneg) \ - V(Arm64Mneg32) \ - V(Arm64Idiv) \ - V(Arm64Idiv32) \ - V(Arm64Udiv) \ - V(Arm64Udiv32) \ - V(Arm64Imod) \ - V(Arm64Imod32) \ - V(Arm64Umod) \ - V(Arm64Umod32) \ - V(Arm64Not) \ - V(Arm64Not32) \ - V(Arm64Lsl) \ - V(Arm64Lsl32) \ - V(Arm64Lsr) \ - V(Arm64Lsr32) \ - V(Arm64Asr) \ - V(Arm64Asr32) \ - V(Arm64Ror) \ - V(Arm64Ror32) \ - V(Arm64Mov32) \ - V(Arm64Sxtb32) \ - V(Arm64Sxth32) \ - V(Arm64Sxtb) \ - V(Arm64Sxth) \ - V(Arm64Sxtw) \ - V(Arm64Sbfx32) \ - V(Arm64Ubfx) \ - V(Arm64Ubfx32) \ - V(Arm64Ubfiz32) \ - V(Arm64Bfi) \ - V(Arm64Rbit) \ - V(Arm64Rbit32) \ - V(Arm64TestAndBranch32) \ - V(Arm64TestAndBranch) \ - V(Arm64CompareAndBranch32) \ - V(Arm64CompareAndBranch) \ - V(Arm64Claim) \ - V(Arm64Poke) \ - V(Arm64PokePair) \ - V(Arm64Peek) \ - V(Arm64Float32Cmp) \ - V(Arm64Float32Add) \ - V(Arm64Float32Sub) \ - V(Arm64Float32Mul) \ - V(Arm64Float32Div) \ - V(Arm64Float32Abs) \ - V(Arm64Float32Neg) \ - V(Arm64Float32Sqrt) \ - V(Arm64Float32RoundDown) \ - V(Arm64Float32Max) \ - V(Arm64Float32Min) \ - V(Arm64Float64Cmp) \ - V(Arm64Float64Add) \ - V(Arm64Float64Sub) \ - V(Arm64Float64Mul) \ - V(Arm64Float64Div) \ - V(Arm64Float64Mod) \ - V(Arm64Float64Max) \ - V(Arm64Float64Min) \ - V(Arm64Float64Abs) \ - V(Arm64Float64Neg) \ - V(Arm64Float64Sqrt) \ - V(Arm64Float64RoundDown) \ - V(Arm64Float32RoundUp) \ - V(Arm64Float64RoundUp) \ - V(Arm64Float64RoundTiesAway) \ - V(Arm64Float32RoundTruncate) \ - V(Arm64Float64RoundTruncate) \ - V(Arm64Float32RoundTiesEven) \ - V(Arm64Float64RoundTiesEven) \ - V(Arm64Float64SilenceNaN) \ - V(Arm64Float32ToFloat64) \ - V(Arm64Float64ToFloat32) \ - V(Arm64Float32ToInt32) \ - V(Arm64Float64ToInt32) \ - V(Arm64Float32ToUint32) \ - V(Arm64Float64ToUint32) \ - V(Arm64Float32ToInt64) \ - V(Arm64Float64ToInt64) \ - V(Arm64Float32ToUint64) \ - V(Arm64Float64ToUint64) \ - V(Arm64Int32ToFloat32) \ - V(Arm64Int32ToFloat64) \ - V(Arm64Int64ToFloat32) \ - V(Arm64Int64ToFloat64) \ - V(Arm64Uint32ToFloat32) \ - V(Arm64Uint32ToFloat64) \ - V(Arm64Uint64ToFloat32) \ - V(Arm64Uint64ToFloat64) \ - V(Arm64Float64ExtractLowWord32) \ - V(Arm64Float64ExtractHighWord32) \ - V(Arm64Float64InsertLowWord32) \ - V(Arm64Float64InsertHighWord32) \ - V(Arm64Float64MoveU64) \ - V(Arm64U64MoveFloat64) \ - V(Arm64LdrS) \ - V(Arm64StrS) \ - V(Arm64LdrD) \ - V(Arm64StrD) \ - V(Arm64LdrQ) \ - V(Arm64StrQ) \ - V(Arm64Ldrb) \ - V(Arm64Ldrsb) \ - V(Arm64Strb) \ - V(Arm64Ldrh) \ - V(Arm64Ldrsh) \ - V(Arm64Strh) \ - V(Arm64Ldrsw) \ - V(Arm64LdrW) \ - V(Arm64StrW) \ - V(Arm64Ldr) \ - V(Arm64Str) \ - V(Arm64DsbIsb) \ - V(Arm64F32x4Splat) \ - V(Arm64F32x4ExtractLane) \ - V(Arm64F32x4ReplaceLane) \ - V(Arm64F32x4SConvertI32x4) \ - V(Arm64F32x4UConvertI32x4) \ - V(Arm64F32x4Abs) \ - V(Arm64F32x4Neg) \ - V(Arm64F32x4RecipApprox) \ - V(Arm64F32x4RecipSqrtApprox) \ - V(Arm64F32x4Add) \ - V(Arm64F32x4AddHoriz) \ - V(Arm64F32x4Sub) \ - V(Arm64F32x4Mul) \ - V(Arm64F32x4Min) \ - V(Arm64F32x4Max) \ - V(Arm64F32x4Eq) \ - V(Arm64F32x4Ne) \ - V(Arm64F32x4Lt) \ - V(Arm64F32x4Le) \ - V(Arm64I32x4Splat) \ - V(Arm64I32x4ExtractLane) \ - V(Arm64I32x4ReplaceLane) \ - V(Arm64I32x4SConvertF32x4) \ - V(Arm64I32x4SConvertI16x8Low) \ - V(Arm64I32x4SConvertI16x8High) \ - V(Arm64I32x4Neg) \ - V(Arm64I32x4Shl) \ - V(Arm64I32x4ShrS) \ - V(Arm64I32x4Add) \ - V(Arm64I32x4AddHoriz) \ - V(Arm64I32x4Sub) \ - V(Arm64I32x4Mul) \ - V(Arm64I32x4MinS) \ - V(Arm64I32x4MaxS) \ - V(Arm64I32x4Eq) \ - V(Arm64I32x4Ne) \ - V(Arm64I32x4GtS) \ - V(Arm64I32x4GeS) \ - V(Arm64I32x4UConvertF32x4) \ - V(Arm64I32x4UConvertI16x8Low) \ - V(Arm64I32x4UConvertI16x8High) \ - V(Arm64I32x4ShrU) \ - V(Arm64I32x4MinU) \ - V(Arm64I32x4MaxU) \ - V(Arm64I32x4GtU) \ - V(Arm64I32x4GeU) \ - V(Arm64I16x8Splat) \ - V(Arm64I16x8ExtractLane) \ - V(Arm64I16x8ReplaceLane) \ - V(Arm64I16x8SConvertI8x16Low) \ - V(Arm64I16x8SConvertI8x16High) \ - V(Arm64I16x8Neg) \ - V(Arm64I16x8Shl) \ - V(Arm64I16x8ShrS) \ - V(Arm64I16x8SConvertI32x4) \ - V(Arm64I16x8Add) \ - V(Arm64I16x8AddSaturateS) \ - V(Arm64I16x8AddHoriz) \ - V(Arm64I16x8Sub) \ - V(Arm64I16x8SubSaturateS) \ - V(Arm64I16x8Mul) \ - V(Arm64I16x8MinS) \ - V(Arm64I16x8MaxS) \ - V(Arm64I16x8Eq) \ - V(Arm64I16x8Ne) \ - V(Arm64I16x8GtS) \ - V(Arm64I16x8GeS) \ - V(Arm64I16x8UConvertI8x16Low) \ - V(Arm64I16x8UConvertI8x16High) \ - V(Arm64I16x8ShrU) \ - V(Arm64I16x8UConvertI32x4) \ - V(Arm64I16x8AddSaturateU) \ - V(Arm64I16x8SubSaturateU) \ - V(Arm64I16x8MinU) \ - V(Arm64I16x8MaxU) \ - V(Arm64I16x8GtU) \ - V(Arm64I16x8GeU) \ - V(Arm64I8x16Splat) \ - V(Arm64I8x16ExtractLane) \ - V(Arm64I8x16ReplaceLane) \ - V(Arm64I8x16Neg) \ - V(Arm64I8x16Shl) \ - V(Arm64I8x16ShrS) \ - V(Arm64I8x16SConvertI16x8) \ - V(Arm64I8x16Add) \ - V(Arm64I8x16AddSaturateS) \ - V(Arm64I8x16Sub) \ - V(Arm64I8x16SubSaturateS) \ - V(Arm64I8x16Mul) \ - V(Arm64I8x16MinS) \ - V(Arm64I8x16MaxS) \ - V(Arm64I8x16Eq) \ - V(Arm64I8x16Ne) \ - V(Arm64I8x16GtS) \ - V(Arm64I8x16GeS) \ - V(Arm64I8x16ShrU) \ - V(Arm64I8x16UConvertI16x8) \ - V(Arm64I8x16AddSaturateU) \ - V(Arm64I8x16SubSaturateU) \ - V(Arm64I8x16MinU) \ - V(Arm64I8x16MaxU) \ - V(Arm64I8x16GtU) \ - V(Arm64I8x16GeU) \ - V(Arm64S128Zero) \ - V(Arm64S128Dup) \ - V(Arm64S128And) \ - V(Arm64S128Or) \ - V(Arm64S128Xor) \ - V(Arm64S128Not) \ - V(Arm64S128Select) \ - V(Arm64S32x4ZipLeft) \ - V(Arm64S32x4ZipRight) \ - V(Arm64S32x4UnzipLeft) \ - V(Arm64S32x4UnzipRight) \ - V(Arm64S32x4TransposeLeft) \ - V(Arm64S32x4TransposeRight) \ - V(Arm64S32x4Shuffle) \ - V(Arm64S16x8ZipLeft) \ - V(Arm64S16x8ZipRight) \ - V(Arm64S16x8UnzipLeft) \ - V(Arm64S16x8UnzipRight) \ - V(Arm64S16x8TransposeLeft) \ - V(Arm64S16x8TransposeRight) \ - V(Arm64S8x16ZipLeft) \ - V(Arm64S8x16ZipRight) \ - V(Arm64S8x16UnzipLeft) \ - V(Arm64S8x16UnzipRight) \ - V(Arm64S8x16TransposeLeft) \ - V(Arm64S8x16TransposeRight) \ - V(Arm64S8x16Concat) \ - V(Arm64S8x16Shuffle) \ - V(Arm64S32x2Reverse) \ - V(Arm64S16x4Reverse) \ - V(Arm64S16x2Reverse) \ - V(Arm64S8x8Reverse) \ - V(Arm64S8x4Reverse) \ - V(Arm64S8x2Reverse) \ - V(Arm64S1x4AnyTrue) \ - V(Arm64S1x4AllTrue) \ - V(Arm64S1x8AnyTrue) \ - V(Arm64S1x8AllTrue) \ - V(Arm64S1x16AnyTrue) \ - V(Arm64S1x16AllTrue) \ - V(Arm64Word64AtomicAddUint8) \ - V(Arm64Word64AtomicAddUint16) \ - V(Arm64Word64AtomicAddUint32) \ - V(Arm64Word64AtomicAddUint64) \ - V(Arm64Word64AtomicSubUint8) \ - V(Arm64Word64AtomicSubUint16) \ - V(Arm64Word64AtomicSubUint32) \ - V(Arm64Word64AtomicSubUint64) \ - V(Arm64Word64AtomicAndUint8) \ - V(Arm64Word64AtomicAndUint16) \ - V(Arm64Word64AtomicAndUint32) \ - V(Arm64Word64AtomicAndUint64) \ - V(Arm64Word64AtomicOrUint8) \ - V(Arm64Word64AtomicOrUint16) \ - V(Arm64Word64AtomicOrUint32) \ - V(Arm64Word64AtomicOrUint64) \ - V(Arm64Word64AtomicXorUint8) \ - V(Arm64Word64AtomicXorUint16) \ - V(Arm64Word64AtomicXorUint32) \ - V(Arm64Word64AtomicXorUint64) +#define TARGET_ARCH_OPCODE_LIST(V) \ + V(Arm64Add) \ + V(Arm64Add32) \ + V(Arm64And) \ + V(Arm64And32) \ + V(Arm64Bic) \ + V(Arm64Bic32) \ + V(Arm64Clz) \ + V(Arm64Clz32) \ + V(Arm64Cmp) \ + V(Arm64Cmp32) \ + V(Arm64Cmn) \ + V(Arm64Cmn32) \ + V(Arm64Tst) \ + V(Arm64Tst32) \ + V(Arm64Or) \ + V(Arm64Or32) \ + V(Arm64Orn) \ + V(Arm64Orn32) \ + V(Arm64Eor) \ + V(Arm64Eor32) \ + V(Arm64Eon) \ + V(Arm64Eon32) \ + V(Arm64Sub) \ + V(Arm64Sub32) \ + V(Arm64Mul) \ + V(Arm64Mul32) \ + V(Arm64Smull) \ + V(Arm64Umull) \ + V(Arm64Madd) \ + V(Arm64Madd32) \ + V(Arm64Msub) \ + V(Arm64Msub32) \ + V(Arm64Mneg) \ + V(Arm64Mneg32) \ + V(Arm64Idiv) \ + V(Arm64Idiv32) \ + V(Arm64Udiv) \ + V(Arm64Udiv32) \ + V(Arm64Imod) \ + V(Arm64Imod32) \ + V(Arm64Umod) \ + V(Arm64Umod32) \ + V(Arm64Not) \ + V(Arm64Not32) \ + V(Arm64Lsl) \ + V(Arm64Lsl32) \ + V(Arm64Lsr) \ + V(Arm64Lsr32) \ + V(Arm64Asr) \ + V(Arm64Asr32) \ + V(Arm64Ror) \ + V(Arm64Ror32) \ + V(Arm64Mov32) \ + V(Arm64Sxtb32) \ + V(Arm64Sxth32) \ + V(Arm64Sxtb) \ + V(Arm64Sxth) \ + V(Arm64Sxtw) \ + V(Arm64Sbfx32) \ + V(Arm64Ubfx) \ + V(Arm64Ubfx32) \ + V(Arm64Ubfiz32) \ + V(Arm64Bfi) \ + V(Arm64Rbit) \ + V(Arm64Rbit32) \ + V(Arm64TestAndBranch32) \ + V(Arm64TestAndBranch) \ + V(Arm64CompareAndBranch32) \ + V(Arm64CompareAndBranch) \ + V(Arm64Claim) \ + V(Arm64Poke) \ + V(Arm64PokePair) \ + V(Arm64Peek) \ + V(Arm64Float32Cmp) \ + V(Arm64Float32Add) \ + V(Arm64Float32Sub) \ + V(Arm64Float32Mul) \ + V(Arm64Float32Div) \ + V(Arm64Float32Abs) \ + V(Arm64Float32Neg) \ + V(Arm64Float32Sqrt) \ + V(Arm64Float32RoundDown) \ + V(Arm64Float32Max) \ + V(Arm64Float32Min) \ + V(Arm64Float64Cmp) \ + V(Arm64Float64Add) \ + V(Arm64Float64Sub) \ + V(Arm64Float64Mul) \ + V(Arm64Float64Div) \ + V(Arm64Float64Mod) \ + V(Arm64Float64Max) \ + V(Arm64Float64Min) \ + V(Arm64Float64Abs) \ + V(Arm64Float64Neg) \ + V(Arm64Float64Sqrt) \ + V(Arm64Float64RoundDown) \ + V(Arm64Float32RoundUp) \ + V(Arm64Float64RoundUp) \ + V(Arm64Float64RoundTiesAway) \ + V(Arm64Float32RoundTruncate) \ + V(Arm64Float64RoundTruncate) \ + V(Arm64Float32RoundTiesEven) \ + V(Arm64Float64RoundTiesEven) \ + V(Arm64Float64SilenceNaN) \ + V(Arm64Float32ToFloat64) \ + V(Arm64Float64ToFloat32) \ + V(Arm64Float32ToInt32) \ + V(Arm64Float64ToInt32) \ + V(Arm64Float32ToUint32) \ + V(Arm64Float64ToUint32) \ + V(Arm64Float32ToInt64) \ + V(Arm64Float64ToInt64) \ + V(Arm64Float32ToUint64) \ + V(Arm64Float64ToUint64) \ + V(Arm64Int32ToFloat32) \ + V(Arm64Int32ToFloat64) \ + V(Arm64Int64ToFloat32) \ + V(Arm64Int64ToFloat64) \ + V(Arm64Uint32ToFloat32) \ + V(Arm64Uint32ToFloat64) \ + V(Arm64Uint64ToFloat32) \ + V(Arm64Uint64ToFloat64) \ + V(Arm64Float64ExtractLowWord32) \ + V(Arm64Float64ExtractHighWord32) \ + V(Arm64Float64InsertLowWord32) \ + V(Arm64Float64InsertHighWord32) \ + V(Arm64Float64MoveU64) \ + V(Arm64U64MoveFloat64) \ + V(Arm64LdrS) \ + V(Arm64StrS) \ + V(Arm64LdrD) \ + V(Arm64StrD) \ + V(Arm64LdrQ) \ + V(Arm64StrQ) \ + V(Arm64Ldrb) \ + V(Arm64Ldrsb) \ + V(Arm64Strb) \ + V(Arm64Ldrh) \ + V(Arm64Ldrsh) \ + V(Arm64Strh) \ + V(Arm64Ldrsw) \ + V(Arm64LdrW) \ + V(Arm64StrW) \ + V(Arm64Ldr) \ + V(Arm64Str) \ + V(Arm64DsbIsb) \ + V(Arm64F32x4Splat) \ + V(Arm64F32x4ExtractLane) \ + V(Arm64F32x4ReplaceLane) \ + V(Arm64F32x4SConvertI32x4) \ + V(Arm64F32x4UConvertI32x4) \ + V(Arm64F32x4Abs) \ + V(Arm64F32x4Neg) \ + V(Arm64F32x4RecipApprox) \ + V(Arm64F32x4RecipSqrtApprox) \ + V(Arm64F32x4Add) \ + V(Arm64F32x4AddHoriz) \ + V(Arm64F32x4Sub) \ + V(Arm64F32x4Mul) \ + V(Arm64F32x4Min) \ + V(Arm64F32x4Max) \ + V(Arm64F32x4Eq) \ + V(Arm64F32x4Ne) \ + V(Arm64F32x4Lt) \ + V(Arm64F32x4Le) \ + V(Arm64I32x4Splat) \ + V(Arm64I32x4ExtractLane) \ + V(Arm64I32x4ReplaceLane) \ + V(Arm64I32x4SConvertF32x4) \ + V(Arm64I32x4SConvertI16x8Low) \ + V(Arm64I32x4SConvertI16x8High) \ + V(Arm64I32x4Neg) \ + V(Arm64I32x4Shl) \ + V(Arm64I32x4ShrS) \ + V(Arm64I32x4Add) \ + V(Arm64I32x4AddHoriz) \ + V(Arm64I32x4Sub) \ + V(Arm64I32x4Mul) \ + V(Arm64I32x4MinS) \ + V(Arm64I32x4MaxS) \ + V(Arm64I32x4Eq) \ + V(Arm64I32x4Ne) \ + V(Arm64I32x4GtS) \ + V(Arm64I32x4GeS) \ + V(Arm64I32x4UConvertF32x4) \ + V(Arm64I32x4UConvertI16x8Low) \ + V(Arm64I32x4UConvertI16x8High) \ + V(Arm64I32x4ShrU) \ + V(Arm64I32x4MinU) \ + V(Arm64I32x4MaxU) \ + V(Arm64I32x4GtU) \ + V(Arm64I32x4GeU) \ + V(Arm64I16x8Splat) \ + V(Arm64I16x8ExtractLane) \ + V(Arm64I16x8ReplaceLane) \ + V(Arm64I16x8SConvertI8x16Low) \ + V(Arm64I16x8SConvertI8x16High) \ + V(Arm64I16x8Neg) \ + V(Arm64I16x8Shl) \ + V(Arm64I16x8ShrS) \ + V(Arm64I16x8SConvertI32x4) \ + V(Arm64I16x8Add) \ + V(Arm64I16x8AddSaturateS) \ + V(Arm64I16x8AddHoriz) \ + V(Arm64I16x8Sub) \ + V(Arm64I16x8SubSaturateS) \ + V(Arm64I16x8Mul) \ + V(Arm64I16x8MinS) \ + V(Arm64I16x8MaxS) \ + V(Arm64I16x8Eq) \ + V(Arm64I16x8Ne) \ + V(Arm64I16x8GtS) \ + V(Arm64I16x8GeS) \ + V(Arm64I16x8UConvertI8x16Low) \ + V(Arm64I16x8UConvertI8x16High) \ + V(Arm64I16x8ShrU) \ + V(Arm64I16x8UConvertI32x4) \ + V(Arm64I16x8AddSaturateU) \ + V(Arm64I16x8SubSaturateU) \ + V(Arm64I16x8MinU) \ + V(Arm64I16x8MaxU) \ + V(Arm64I16x8GtU) \ + V(Arm64I16x8GeU) \ + V(Arm64I8x16Splat) \ + V(Arm64I8x16ExtractLane) \ + V(Arm64I8x16ReplaceLane) \ + V(Arm64I8x16Neg) \ + V(Arm64I8x16Shl) \ + V(Arm64I8x16ShrS) \ + V(Arm64I8x16SConvertI16x8) \ + V(Arm64I8x16Add) \ + V(Arm64I8x16AddSaturateS) \ + V(Arm64I8x16Sub) \ + V(Arm64I8x16SubSaturateS) \ + V(Arm64I8x16Mul) \ + V(Arm64I8x16MinS) \ + V(Arm64I8x16MaxS) \ + V(Arm64I8x16Eq) \ + V(Arm64I8x16Ne) \ + V(Arm64I8x16GtS) \ + V(Arm64I8x16GeS) \ + V(Arm64I8x16ShrU) \ + V(Arm64I8x16UConvertI16x8) \ + V(Arm64I8x16AddSaturateU) \ + V(Arm64I8x16SubSaturateU) \ + V(Arm64I8x16MinU) \ + V(Arm64I8x16MaxU) \ + V(Arm64I8x16GtU) \ + V(Arm64I8x16GeU) \ + V(Arm64S128Zero) \ + V(Arm64S128Dup) \ + V(Arm64S128And) \ + V(Arm64S128Or) \ + V(Arm64S128Xor) \ + V(Arm64S128Not) \ + V(Arm64S128Select) \ + V(Arm64S32x4ZipLeft) \ + V(Arm64S32x4ZipRight) \ + V(Arm64S32x4UnzipLeft) \ + V(Arm64S32x4UnzipRight) \ + V(Arm64S32x4TransposeLeft) \ + V(Arm64S32x4TransposeRight) \ + V(Arm64S32x4Shuffle) \ + V(Arm64S16x8ZipLeft) \ + V(Arm64S16x8ZipRight) \ + V(Arm64S16x8UnzipLeft) \ + V(Arm64S16x8UnzipRight) \ + V(Arm64S16x8TransposeLeft) \ + V(Arm64S16x8TransposeRight) \ + V(Arm64S8x16ZipLeft) \ + V(Arm64S8x16ZipRight) \ + V(Arm64S8x16UnzipLeft) \ + V(Arm64S8x16UnzipRight) \ + V(Arm64S8x16TransposeLeft) \ + V(Arm64S8x16TransposeRight) \ + V(Arm64S8x16Concat) \ + V(Arm64S8x16Shuffle) \ + V(Arm64S32x2Reverse) \ + V(Arm64S16x4Reverse) \ + V(Arm64S16x2Reverse) \ + V(Arm64S8x8Reverse) \ + V(Arm64S8x4Reverse) \ + V(Arm64S8x2Reverse) \ + V(Arm64S1x4AnyTrue) \ + V(Arm64S1x4AllTrue) \ + V(Arm64S1x8AnyTrue) \ + V(Arm64S1x8AllTrue) \ + V(Arm64S1x16AnyTrue) \ + V(Arm64S1x16AllTrue) \ + V(Arm64Word64AtomicLoadUint8) \ + V(Arm64Word64AtomicLoadUint16) \ + V(Arm64Word64AtomicLoadUint32) \ + V(Arm64Word64AtomicLoadUint64) \ + V(Arm64Word64AtomicStoreWord8) \ + V(Arm64Word64AtomicStoreWord16) \ + V(Arm64Word64AtomicStoreWord32) \ + V(Arm64Word64AtomicStoreWord64) \ + V(Arm64Word64AtomicAddUint8) \ + V(Arm64Word64AtomicAddUint16) \ + V(Arm64Word64AtomicAddUint32) \ + V(Arm64Word64AtomicAddUint64) \ + V(Arm64Word64AtomicSubUint8) \ + V(Arm64Word64AtomicSubUint16) \ + V(Arm64Word64AtomicSubUint32) \ + V(Arm64Word64AtomicSubUint64) \ + V(Arm64Word64AtomicAndUint8) \ + V(Arm64Word64AtomicAndUint16) \ + V(Arm64Word64AtomicAndUint32) \ + V(Arm64Word64AtomicAndUint64) \ + V(Arm64Word64AtomicOrUint8) \ + V(Arm64Word64AtomicOrUint16) \ + V(Arm64Word64AtomicOrUint32) \ + V(Arm64Word64AtomicOrUint64) \ + V(Arm64Word64AtomicXorUint8) \ + V(Arm64Word64AtomicXorUint16) \ + V(Arm64Word64AtomicXorUint32) \ + V(Arm64Word64AtomicXorUint64) \ + V(Arm64Word64AtomicExchangeUint8) \ + V(Arm64Word64AtomicExchangeUint16) \ + V(Arm64Word64AtomicExchangeUint32) \ + V(Arm64Word64AtomicExchangeUint64) \ + V(Arm64Word64AtomicCompareExchangeUint8) \ + V(Arm64Word64AtomicCompareExchangeUint16) \ + V(Arm64Word64AtomicCompareExchangeUint32) \ + V(Arm64Word64AtomicCompareExchangeUint64) // Addressing modes represent the "shape" of inputs to an instruction. // Many instructions support multiple addressing modes. Addressing modes diff --git a/src/compiler/arm64/instruction-scheduler-arm64.cc b/src/compiler/arm64/instruction-scheduler-arm64.cc index 289fe47e09a1..4ea251c590e2 100644 --- a/src/compiler/arm64/instruction-scheduler-arm64.cc +++ b/src/compiler/arm64/instruction-scheduler-arm64.cc @@ -309,6 +309,16 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64DsbIsb: return kHasSideEffect; + case kArm64Word64AtomicLoadUint8: + case kArm64Word64AtomicLoadUint16: + case kArm64Word64AtomicLoadUint32: + case kArm64Word64AtomicLoadUint64: + return kIsLoadOperation; + + case kArm64Word64AtomicStoreWord8: + case kArm64Word64AtomicStoreWord16: + case kArm64Word64AtomicStoreWord32: + case kArm64Word64AtomicStoreWord64: case kArm64Word64AtomicAddUint8: case kArm64Word64AtomicAddUint16: case kArm64Word64AtomicAddUint32: @@ -329,6 +339,14 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64Word64AtomicXorUint16: case kArm64Word64AtomicXorUint32: case kArm64Word64AtomicXorUint64: + case kArm64Word64AtomicExchangeUint8: + case kArm64Word64AtomicExchangeUint16: + case kArm64Word64AtomicExchangeUint32: + case kArm64Word64AtomicExchangeUint64: + case kArm64Word64AtomicCompareExchangeUint8: + case kArm64Word64AtomicCompareExchangeUint16: + case kArm64Word64AtomicCompareExchangeUint32: + case kArm64Word64AtomicCompareExchangeUint64: return kHasSideEffect; #define CASE(Name) case k##Name: diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc index 08538cc4e2b1..c3979a4aec88 100644 --- a/src/compiler/arm64/instruction-selector-arm64.cc +++ b/src/compiler/arm64/instruction-selector-arm64.cc @@ -2102,6 +2102,76 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node, } } +void VisitAtomicExchange(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + Arm64OperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + InstructionOperand inputs[3]; + size_t input_count = 0; + inputs[input_count++] = g.UseRegister(base); + inputs[input_count++] = g.UseRegister(index); + inputs[input_count++] = g.UseUniqueRegister(value); + InstructionOperand outputs[1]; + outputs[0] = g.DefineAsRegister(node); + InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; + InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR); + selector->Emit(code, 1, outputs, input_count, inputs, arraysize(temps), + temps); +} + +void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + Arm64OperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* old_value = node->InputAt(2); + Node* new_value = node->InputAt(3); + InstructionOperand inputs[4]; + size_t input_count = 0; + inputs[input_count++] = g.UseRegister(base); + inputs[input_count++] = g.UseRegister(index); + inputs[input_count++] = g.UseUniqueRegister(old_value); + inputs[input_count++] = g.UseUniqueRegister(new_value); + InstructionOperand outputs[1]; + outputs[0] = g.DefineAsRegister(node); + InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; + InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR); + selector->Emit(code, 1, outputs, input_count, inputs, arraysize(temps), + temps); +} + +void VisitAtomicLoad(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + Arm64OperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)}; + InstructionOperand outputs[] = {g.DefineAsRegister(node)}; + InstructionOperand temps[] = {g.TempRegister()}; + InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR); + selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, + arraysize(temps), temps); +} + +void VisitAtomicStore(InstructionSelector* selector, Node* node, + ArchOpcode opcode) { + Arm64OperandGenerator g(selector); + Node* base = node->InputAt(0); + Node* index = node->InputAt(1); + Node* value = node->InputAt(2); + InstructionOperand inputs[3]; + size_t input_count = 0; + inputs[input_count++] = g.UseRegister(base); + inputs[input_count++] = g.UseRegister(index); + inputs[input_count++] = g.UseUniqueRegister(value); + InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR); + InstructionOperand temps[] = {g.TempRegister()}; + selector->Emit(code, 0, nullptr, input_count, inputs, arraysize(temps), + temps); +} + } // namespace void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, @@ -2544,9 +2614,6 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { void InstructionSelector::VisitWord32AtomicLoad(Node* node) { LoadRepresentation load_rep = LoadRepresentationOf(node->op()); - Arm64OperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); ArchOpcode opcode = kArchNop; switch (load_rep.representation()) { case MachineRepresentation::kWord8: @@ -2564,20 +2631,34 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) { UNREACHABLE(); return; } - InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)}; - InstructionOperand outputs[] = {g.DefineAsRegister(node)}; - InstructionOperand temps[] = {g.TempRegister()}; - InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR); - Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, - arraysize(temps), temps); + VisitAtomicLoad(this, node, opcode); +} + +void InstructionSelector::VisitWord64AtomicLoad(Node* node) { + LoadRepresentation load_rep = LoadRepresentationOf(node->op()); + ArchOpcode opcode = kArchNop; + switch (load_rep.representation()) { + case MachineRepresentation::kWord8: + opcode = kArm64Word64AtomicLoadUint8; + break; + case MachineRepresentation::kWord16: + opcode = kArm64Word64AtomicLoadUint16; + break; + case MachineRepresentation::kWord32: + opcode = kArm64Word64AtomicLoadUint32; + break; + case MachineRepresentation::kWord64: + opcode = kArm64Word64AtomicLoadUint64; + break; + default: + UNREACHABLE(); + return; + } + VisitAtomicLoad(this, node, opcode); } void InstructionSelector::VisitWord32AtomicStore(Node* node) { MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); - Arm64OperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); ArchOpcode opcode = kArchNop; switch (rep) { case MachineRepresentation::kWord8: @@ -2593,23 +2674,33 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) { UNREACHABLE(); return; } + VisitAtomicStore(this, node, opcode); +} - AddressingMode addressing_mode = kMode_MRR; - InstructionOperand inputs[3]; - size_t input_count = 0; - inputs[input_count++] = g.UseUniqueRegister(base); - inputs[input_count++] = g.UseUniqueRegister(index); - inputs[input_count++] = g.UseUniqueRegister(value); - InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); - InstructionOperand temps[] = {g.TempRegister()}; - Emit(code, 0, nullptr, input_count, inputs, arraysize(temps), temps); +void InstructionSelector::VisitWord64AtomicStore(Node* node) { + MachineRepresentation rep = AtomicStoreRepresentationOf(node->op()); + ArchOpcode opcode = kArchNop; + switch (rep) { + case MachineRepresentation::kWord8: + opcode = kArm64Word64AtomicStoreWord8; + break; + case MachineRepresentation::kWord16: + opcode = kArm64Word64AtomicStoreWord16; + break; + case MachineRepresentation::kWord32: + opcode = kArm64Word64AtomicStoreWord32; + break; + case MachineRepresentation::kWord64: + opcode = kArm64Word64AtomicStoreWord64; + break; + default: + UNREACHABLE(); + return; + } + VisitAtomicStore(this, node, opcode); } void InstructionSelector::VisitWord32AtomicExchange(Node* node) { - Arm64OperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); ArchOpcode opcode = kArchNop; MachineType type = AtomicOpRepresentationOf(node->op()); if (type == MachineType::Int8()) { @@ -2626,26 +2717,28 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) { UNREACHABLE(); return; } + VisitAtomicExchange(this, node, opcode); +} - AddressingMode addressing_mode = kMode_MRR; - InstructionOperand inputs[3]; - size_t input_count = 0; - inputs[input_count++] = g.UseRegister(base); - inputs[input_count++] = g.UseRegister(index); - inputs[input_count++] = g.UseUniqueRegister(value); - InstructionOperand outputs[1]; - outputs[0] = g.DefineAsRegister(node); - InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; - InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); - Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps); +void InstructionSelector::VisitWord64AtomicExchange(Node* node) { + ArchOpcode opcode = kArchNop; + MachineType type = AtomicOpRepresentationOf(node->op()); + if (type == MachineType::Uint8()) { + opcode = kArm64Word64AtomicExchangeUint8; + } else if (type == MachineType::Uint16()) { + opcode = kArm64Word64AtomicExchangeUint16; + } else if (type == MachineType::Uint32()) { + opcode = kArm64Word64AtomicExchangeUint32; + } else if (type == MachineType::Uint64()) { + opcode = kArm64Word64AtomicExchangeUint64; + } else { + UNREACHABLE(); + return; + } + VisitAtomicExchange(this, node, opcode); } void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { - Arm64OperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* old_value = node->InputAt(2); - Node* new_value = node->InputAt(3); ArchOpcode opcode = kArchNop; MachineType type = AtomicOpRepresentationOf(node->op()); if (type == MachineType::Int8()) { @@ -2662,19 +2755,25 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { UNREACHABLE(); return; } + VisitAtomicCompareExchange(this, node, opcode); +} - AddressingMode addressing_mode = kMode_MRR; - InstructionOperand inputs[4]; - size_t input_count = 0; - inputs[input_count++] = g.UseRegister(base); - inputs[input_count++] = g.UseRegister(index); - inputs[input_count++] = g.UseUniqueRegister(old_value); - inputs[input_count++] = g.UseUniqueRegister(new_value); - InstructionOperand outputs[1]; - outputs[0] = g.DefineAsRegister(node); - InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; - InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); - Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps); +void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { + ArchOpcode opcode = kArchNop; + MachineType type = AtomicOpRepresentationOf(node->op()); + if (type == MachineType::Uint8()) { + opcode = kArm64Word64AtomicCompareExchangeUint8; + } else if (type == MachineType::Uint16()) { + opcode = kArm64Word64AtomicCompareExchangeUint16; + } else if (type == MachineType::Uint32()) { + opcode = kArm64Word64AtomicCompareExchangeUint32; + } else if (type == MachineType::Uint64()) { + opcode = kArm64Word64AtomicCompareExchangeUint64; + } else { + UNREACHABLE(); + return; + } + VisitAtomicCompareExchange(this, node, opcode); } void InstructionSelector::VisitAtomicBinaryOperation( diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc index 6fce9ea4707b..0ca0932ce844 100644 --- a/src/compiler/instruction-selector.cc +++ b/src/compiler/instruction-selector.cc @@ -2317,15 +2317,13 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) { #endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32 -#if !V8_TARGET_ARCH_X64 +#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicStore(Node* node) { UNIMPLEMENTED(); } -#endif // !V8_TARGET_ARCH_X64 -#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); } @@ -2335,9 +2333,7 @@ void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); } -#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 -#if !V8_TARGET_ARCH_X64 void InstructionSelector::VisitWord64AtomicExchange(Node* node) { UNIMPLEMENTED(); } @@ -2345,7 +2341,7 @@ void InstructionSelector::VisitWord64AtomicExchange(Node* node) { void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) { UNIMPLEMENTED(); } -#endif // !V8_TARGET_ARCH_X64 +#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 #if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \ !V8_TARGET_ARCH_MIPS64 diff --git a/test/cctest/wasm/test-run-wasm-atomics64.cc b/test/cctest/wasm/test-run-wasm-atomics64.cc index c9f24dccc2dd..3ddaaec47aca 100644 --- a/test/cctest/wasm/test-run-wasm-atomics64.cc +++ b/test/cctest/wasm/test-run-wasm-atomics64.cc @@ -46,11 +46,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr) { WASM_COMPILED_EXEC_TEST(I64AtomicXor) { RunU64BinOp(execution_mode, kExprI64AtomicXor, Xor); } -#if V8_TARGET_ARCH_X64 WASM_COMPILED_EXEC_TEST(I64AtomicExchange) { RunU64BinOp(execution_mode, kExprI64AtomicExchange, Exchange); } -#endif // V8_TARGET_ARCH_X64 void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op, Uint32BinOp expected_op) { @@ -88,11 +86,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr32U) { WASM_COMPILED_EXEC_TEST(I64AtomicXor32U) { RunU32BinOp(execution_mode, kExprI64AtomicXor32U, Xor); } -#if V8_TARGET_ARCH_X64 WASM_COMPILED_EXEC_TEST(I64AtomicExchange32U) { RunU32BinOp(execution_mode, kExprI64AtomicExchange32U, Exchange); } -#endif // V8_TARGET_ARCH_X64 void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op, Uint16BinOp expected_op) { @@ -130,11 +126,9 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr16U) { WASM_COMPILED_EXEC_TEST(I64AtomicXor16U) { RunU16BinOp(execution_mode, kExprI64AtomicXor16U, Xor); } -#if V8_TARGET_ARCH_X64 WASM_COMPILED_EXEC_TEST(I64AtomicExchange16U) { RunU16BinOp(execution_mode, kExprI64AtomicExchange16U, Exchange); } -#endif // V8_TARGET_ARCH_X64 void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op, Uint8BinOp expected_op) { @@ -172,8 +166,6 @@ WASM_COMPILED_EXEC_TEST(I64AtomicOr8U) { WASM_COMPILED_EXEC_TEST(I64AtomicXor8U) { RunU8BinOp(execution_mode, kExprI64AtomicXor8U, Xor); } - -#if V8_TARGET_ARCH_X64 WASM_COMPILED_EXEC_TEST(I64AtomicExchange8U) { RunU8BinOp(execution_mode, kExprI64AtomicExchange8U, Exchange); } @@ -397,7 +389,6 @@ WASM_COMPILED_EXEC_TEST(I64AtomicStoreLoad8U) { CHECK_EQ(*i, r.builder().ReadMemory(&memory[0])); } } -#endif // V8_TARGET_ARCH_X64 } // namespace test_run_wasm_atomics_64 } // namespace wasm