Skip to content

Commit

Permalink
Merge pull request #2287 from Sonicadvance1/rename_getreg
Browse files Browse the repository at this point in the history
Arm64: Rename GetSrcPair, GetDst, and GetSrc
  • Loading branch information
lioncash authored Dec 22, 2022
2 parents 82adc2f + 37a9588 commit bf7d0f7
Show file tree
Hide file tree
Showing 11 changed files with 336 additions and 349 deletions.
20 changes: 10 additions & 10 deletions External/FEXCore/Source/Interface/Core/JIT/Arm64/ALUOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ DEF_OP(TruncElementPair) {

switch (IROp->Size) {
case 4: {
auto Dst = GetSrcPair<RA_32>(Node);
auto Src = GetSrcPair<RA_32>(Op->Pair.ID());
auto Dst = GetRegPair<RA_32>(Node);
auto Src = GetRegPair<RA_32>(Op->Pair.ID());
mov(Dst.first, Src.first);
mov(Dst.second, Src.second);
break;
Expand Down Expand Up @@ -1105,7 +1105,7 @@ DEF_OP(Sbfe) {

#define GRCMP(Node) (Op->CompareSize == 4 ? GetReg<RA_32>(Node) : GetReg<RA_64>(Node))

#define GRFCMP(Node) (Op->CompareSize == 4 ? GetDst(Node).S() : GetDst(Node).D())
#define GRFCMP(Node) (Op->CompareSize == 4 ? GetVReg(Node).S() : GetVReg(Node).D())

Condition MapSelectCC(IR::CondClassType Cond) {
switch (Cond.Val) {
Expand Down Expand Up @@ -1178,7 +1178,7 @@ DEF_OP(VExtractToGPR) {
const auto Offset = ElementSizeBits * Op->Index;
const auto Is256Bit = Offset >= SSERegBitSize;

const auto Vector = GetSrc(Op->Vector.ID());
const auto Vector = GetVReg(Op->Vector.ID());

const auto PerformMove = [&](const aarch64::VRegister& reg, int index) {
switch (OpSize) {
Expand Down Expand Up @@ -1249,10 +1249,10 @@ DEF_OP(Float_ToGPR_ZS) {
aarch64::Register Dst{};
aarch64::VRegister Src{};
if (Op->SrcElementSize == 8) {
Src = GetSrc(Op->Scalar.ID()).D();
Src = GetVReg(Op->Scalar.ID()).D();
}
else {
Src = GetSrc(Op->Scalar.ID()).S();
Src = GetVReg(Op->Scalar.ID()).S();
}

if (IROp->Size == 8) {
Expand All @@ -1271,11 +1271,11 @@ DEF_OP(Float_ToGPR_S) {
aarch64::Register Dst{};
aarch64::VRegister Src{};
if (Op->SrcElementSize == 8) {
frinti(VTMP1.D(), GetSrc(Op->Scalar.ID()).D());
frinti(VTMP1.D(), GetVReg(Op->Scalar.ID()).D());
Src = VTMP1.D();
}
else {
frinti(VTMP1.S(), GetSrc(Op->Scalar.ID()).S());
frinti(VTMP1.S(), GetVReg(Op->Scalar.ID()).S());
Src = VTMP1.S();
}

Expand All @@ -1293,10 +1293,10 @@ DEF_OP(FCmp) {
auto Op = IROp->C<IR::IROp_FCmp>();

if (Op->ElementSize == 4) {
fcmp(GetSrc(Op->Scalar1.ID()).S(), GetSrc(Op->Scalar2.ID()).S());
fcmp(GetVReg(Op->Scalar1.ID()).S(), GetVReg(Op->Scalar2.ID()).S());
}
else {
fcmp(GetSrc(Op->Scalar1.ID()).D(), GetSrc(Op->Scalar2.ID()).D());
fcmp(GetVReg(Op->Scalar1.ID()).D(), GetVReg(Op->Scalar2.ID()).D());
}
auto Dst = GetReg<RA_64>(Node);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ using namespace vixl::aarch64;
DEF_OP(CASPair) {
auto Op = IROp->C<IR::IROp_CASPair>();
// Size is the size of each pair element
auto Dst = GetSrcPair<RA_64>(Node);
auto Expected = GetSrcPair<RA_64>(Op->Expected.ID());
auto Desired = GetSrcPair<RA_64>(Op->Desired.ID());
auto Dst = GetRegPair<RA_64>(Node);
auto Expected = GetRegPair<RA_64>(Op->Expected.ID());
auto Desired = GetRegPair<RA_64>(Op->Desired.ID());
auto MemSrc = GetReg<RA_64>(Op->Addr.ID());

if (CTX->HostFeatures.SupportsAtomics) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ DEF_OP(Jump) {
}

#define GRCMP(Node) (Op->CompareSize == 4 ? GetReg<RA_32>(Node) : GetReg<RA_64>(Node))
#define GRFCMP(Node) (Op->CompareSize == 4 ? GetDst(Node).S() : GetDst(Node).D())
#define GRFCMP(Node) (Op->CompareSize == 4 ? GetVReg(Node).S() : GetVReg(Node).D())

static Condition MapBranchCC(IR::CondClassType Cond) {
switch (Cond.Val) {
Expand Down Expand Up @@ -486,7 +486,7 @@ DEF_OP(CPUID) {

// Results are in x0, x1
// Results want to be in a i64v2 vector
auto Dst = GetSrcPair<RA_64>(Node);
auto Dst = GetRegPair<RA_64>(Node);
mov(Dst.first, x0);
mov(Dst.second, x1);
}
Expand Down
44 changes: 22 additions & 22 deletions External/FEXCore/Source/Interface/Core/JIT/Arm64/ConversionOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ DEF_OP(VInsGPR) {
const auto ElementSize = Op->Header.ElementSize;
const auto Is256Bit = OpSize == Core::CPUState::XMM_AVX_REG_SIZE;

const auto Dst = GetDst(Node);
const auto DestVector = GetSrc(Op->DestVector.ID());
const auto Dst = GetVReg(Node);
const auto DestVector = GetVReg(Op->DestVector.ID());

if (HostSupportsSVE && Is256Bit) {
const auto ElementSizeBits = ElementSize * 8;
Expand Down Expand Up @@ -129,17 +129,17 @@ DEF_OP(VCastFromGPR) {
switch (Op->Header.ElementSize) {
case 1:
uxtb(TMP1.W(), GetReg<RA_32>(Op->Src.ID()));
fmov(GetDst(Node).S(), TMP1.W());
fmov(GetVReg(Node).S(), TMP1.W());
break;
case 2:
uxth(TMP1.W(), GetReg<RA_32>(Op->Src.ID()));
fmov(GetDst(Node).S(), TMP1.W());
fmov(GetVReg(Node).S(), TMP1.W());
break;
case 4:
fmov(GetDst(Node).S(), GetReg<RA_32>(Op->Src.ID()).W());
fmov(GetVReg(Node).S(), GetReg<RA_32>(Op->Src.ID()).W());
break;
case 8:
fmov(GetDst(Node).D(), GetReg<RA_64>(Op->Src.ID()).X());
fmov(GetVReg(Node).D(), GetReg<RA_64>(Op->Src.ID()).X());
break;
default: LOGMAN_MSG_A_FMT("Unknown castGPR element size: {}", Op->Header.ElementSize);
}
Expand All @@ -153,19 +153,19 @@ DEF_OP(Float_FromGPR_S) {

switch (Conv) {
case 0x0404: { // Float <- int32_t
scvtf(GetDst(Node).S(), GetReg<RA_32>(Op->Src.ID()));
scvtf(GetVReg(Node).S(), GetReg<RA_32>(Op->Src.ID()));
break;
}
case 0x0408: { // Float <- int64_t
scvtf(GetDst(Node).S(), GetReg<RA_64>(Op->Src.ID()));
scvtf(GetVReg(Node).S(), GetReg<RA_64>(Op->Src.ID()));
break;
}
case 0x0804: { // Double <- int32_t
scvtf(GetDst(Node).D(), GetReg<RA_32>(Op->Src.ID()));
scvtf(GetVReg(Node).D(), GetReg<RA_32>(Op->Src.ID()));
break;
}
case 0x0808: { // Double <- int64_t
scvtf(GetDst(Node).D(), GetReg<RA_64>(Op->Src.ID()));
scvtf(GetVReg(Node).D(), GetReg<RA_64>(Op->Src.ID()));
break;
}
default:
Expand All @@ -180,11 +180,11 @@ DEF_OP(Float_FToF) {
const uint16_t Conv = (Op->Header.ElementSize << 8) | Op->SrcElementSize;
switch (Conv) {
case 0x0804: { // Double <- Float
fcvt(GetDst(Node).D(), GetSrc(Op->Scalar.ID()).S());
fcvt(GetVReg(Node).D(), GetVReg(Op->Scalar.ID()).S());
break;
}
case 0x0408: { // Float <- Double
fcvt(GetDst(Node).S(), GetSrc(Op->Scalar.ID()).D());
fcvt(GetVReg(Node).S(), GetVReg(Op->Scalar.ID()).D());
break;
}
default: LOGMAN_MSG_A_FMT("Unknown FCVT sizes: 0x{:x}", Conv);
Expand All @@ -198,8 +198,8 @@ DEF_OP(Vector_SToF) {
const auto ElementSize = Op->Header.ElementSize;
const auto Is256Bit = OpSize == Core::CPUState::XMM_AVX_REG_SIZE;

const auto Dst = GetDst(Node);
const auto Vector = GetSrc(Op->Vector.ID());
const auto Dst = GetVReg(Node);
const auto Vector = GetVReg(Op->Vector.ID());

if (HostSupportsSVE && Is256Bit) {
const auto Mask = PRED_TMP_32B.Merging();
Expand Down Expand Up @@ -243,8 +243,8 @@ DEF_OP(Vector_FToZS) {
const auto ElementSize = Op->Header.ElementSize;
const auto Is256Bit = OpSize == Core::CPUState::XMM_AVX_REG_SIZE;

const auto Dst = GetDst(Node);
const auto Vector = GetSrc(Op->Vector.ID());
const auto Dst = GetVReg(Node);
const auto Vector = GetVReg(Op->Vector.ID());

if (HostSupportsSVE && Is256Bit) {
const auto Mask = PRED_TMP_32B.Merging();
Expand Down Expand Up @@ -288,8 +288,8 @@ DEF_OP(Vector_FToS) {
const auto ElementSize = Op->Header.ElementSize;
const auto Is256Bit = OpSize == Core::CPUState::XMM_AVX_REG_SIZE;

const auto Dst = GetDst(Node);
const auto Vector = GetSrc(Op->Vector.ID());
const auto Dst = GetVReg(Node);
const auto Vector = GetVReg(Op->Vector.ID());

if (HostSupportsSVE && Is256Bit) {
const auto Mask = PRED_TMP_32B.Merging();
Expand Down Expand Up @@ -340,8 +340,8 @@ DEF_OP(Vector_FToF) {
const auto Is256Bit = OpSize == Core::CPUState::XMM_AVX_REG_SIZE;
const auto Conv = (ElementSize << 8) | Op->SrcElementSize;

const auto Dst = GetDst(Node);
const auto Vector = GetSrc(Op->Vector.ID());
const auto Dst = GetVReg(Node);
const auto Vector = GetVReg(Op->Vector.ID());

if (HostSupportsSVE && Is256Bit) {
// Curiously, FCVTLT and FCVTNT have no bottom variants,
Expand Down Expand Up @@ -416,8 +416,8 @@ DEF_OP(Vector_FToI) {
const auto ElementSize = Op->Header.ElementSize;
const auto Is256Bit = OpSize == Core::CPUState::XMM_AVX_REG_SIZE;

const auto Dst = GetDst(Node);
const auto Vector = GetSrc(Op->Vector.ID());
const auto Dst = GetVReg(Node);
const auto Vector = GetVReg(Op->Vector.ID());

if (HostSupportsSVE && Is256Bit) {
const auto Mask = PRED_TMP_32B.Merging();
Expand Down
30 changes: 15 additions & 15 deletions External/FEXCore/Source/Interface/Core/JIT/Arm64/EncryptionOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,41 +14,41 @@ using namespace vixl::aarch64;

DEF_OP(AESImc) {
auto Op = IROp->C<IR::IROp_VAESImc>();
aesimc(GetDst(Node).V16B(), GetSrc(Op->Vector.ID()).V16B());
aesimc(GetVReg(Node).V16B(), GetVReg(Op->Vector.ID()).V16B());
}

DEF_OP(AESEnc) {
auto Op = IROp->C<IR::IROp_VAESEnc>();
eor(VTMP2.V16B(), VTMP2.V16B(), VTMP2.V16B());
mov(VTMP1.V16B(), GetSrc(Op->State.ID()).V16B());
mov(VTMP1.V16B(), GetVReg(Op->State.ID()).V16B());
aese(VTMP1.V16B(), VTMP2.V16B());
aesmc(VTMP1.V16B(), VTMP1.V16B());
eor(GetDst(Node).V16B(), VTMP1.V16B(), GetSrc(Op->Key.ID()).V16B());
eor(GetVReg(Node).V16B(), VTMP1.V16B(), GetVReg(Op->Key.ID()).V16B());
}

DEF_OP(AESEncLast) {
auto Op = IROp->C<IR::IROp_VAESEncLast>();
eor(VTMP2.V16B(), VTMP2.V16B(), VTMP2.V16B());
mov(VTMP1.V16B(), GetSrc(Op->State.ID()).V16B());
mov(VTMP1.V16B(), GetVReg(Op->State.ID()).V16B());
aese(VTMP1.V16B(), VTMP2.V16B());
eor(GetDst(Node).V16B(), VTMP1.V16B(), GetSrc(Op->Key.ID()).V16B());
eor(GetVReg(Node).V16B(), VTMP1.V16B(), GetVReg(Op->Key.ID()).V16B());
}

DEF_OP(AESDec) {
auto Op = IROp->C<IR::IROp_VAESDec>();
eor(VTMP2.V16B(), VTMP2.V16B(), VTMP2.V16B());
mov(VTMP1.V16B(), GetSrc(Op->State.ID()).V16B());
mov(VTMP1.V16B(), GetVReg(Op->State.ID()).V16B());
aesd(VTMP1.V16B(), VTMP2.V16B());
aesimc(VTMP1.V16B(), VTMP1.V16B());
eor(GetDst(Node).V16B(), VTMP1.V16B(), GetSrc(Op->Key.ID()).V16B());
eor(GetVReg(Node).V16B(), VTMP1.V16B(), GetVReg(Op->Key.ID()).V16B());
}

DEF_OP(AESDecLast) {
auto Op = IROp->C<IR::IROp_VAESDecLast>();
eor(VTMP2.V16B(), VTMP2.V16B(), VTMP2.V16B());
mov(VTMP1.V16B(), GetSrc(Op->State.ID()).V16B());
mov(VTMP1.V16B(), GetVReg(Op->State.ID()).V16B());
aesd(VTMP1.V16B(), VTMP2.V16B());
eor(GetDst(Node).V16B(), VTMP1.V16B(), GetSrc(Op->Key.ID()).V16B());
eor(GetVReg(Node).V16B(), VTMP1.V16B(), GetVReg(Op->Key.ID()).V16B());
}

DEF_OP(AESKeyGenAssist) {
Expand All @@ -59,7 +59,7 @@ DEF_OP(AESKeyGenAssist) {

// Do a "regular" AESE step
eor(VTMP2.V16B(), VTMP2.V16B(), VTMP2.V16B());
mov(VTMP1.V16B(), GetSrc(Op->Src.ID()).V16B());
mov(VTMP1.V16B(), GetVReg(Op->Src.ID()).V16B());
aese(VTMP1.V16B(), VTMP2.V16B());

// Do a table shuffle to undo ShiftRows
Expand All @@ -71,10 +71,10 @@ DEF_OP(AESKeyGenAssist) {

LoadConstant(TMP1, static_cast<uint64_t>(Op->RCON) << 32);
dup(VTMP2.V2D(), TMP1);
eor(GetDst(Node).V16B(), VTMP1.V16B(), VTMP2.V16B());
eor(GetVReg(Node).V16B(), VTMP1.V16B(), VTMP2.V16B());
}
else {
tbl(GetDst(Node).V16B(), VTMP1.V16B(), VTMP3.V16B());
tbl(GetVReg(Node).V16B(), VTMP1.V16B(), VTMP3.V16B());
}

b(&PastConstant);
Expand Down Expand Up @@ -104,9 +104,9 @@ DEF_OP(CRC32) {
DEF_OP(PCLMUL) {
auto Op = IROp->C<IR::IROp_PCLMUL>();

auto Dst = GetDst(Node).Q();
auto Src1 = GetSrc(Op->Src1.ID()).V2D();
auto Src2 = GetSrc(Op->Src2.ID()).V2D();
auto Dst = GetVReg(Node).Q();
auto Src1 = GetVReg(Op->Src1.ID()).V2D();
auto Src2 = GetVReg(Op->Src2.ID()).V2D();

switch (Op->Selector) {
case 0b00000000:
Expand Down
Loading

0 comments on commit bf7d0f7

Please sign in to comment.