Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RISCV] Split OPERAND_SEW operand type for mask only instructions. #119776

Merged
merged 3 commits into from
Dec 13, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -346,8 +346,10 @@ enum OperandType : unsigned {
OPERAND_COND_CODE,
// Vector policy operand.
OPERAND_VEC_POLICY,
// Vector SEW operand.
// Vector SEW operand. Stores in log2(SEW).
OPERAND_SEW,
// Special SEW for mask only instructions. Always 0.
OPERAND_SEW_MASK,
// Vector rounding mode for VXRM or FRM.
OPERAND_VEC_RM,
OPERAND_LAST_RISCV_IMM = OPERAND_VEC_RM,
Expand Down
56 changes: 37 additions & 19 deletions llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1664,32 +1664,50 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
case RISCVII::VLMUL::lmulenum: \
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
: RISCV::PseudoVMSLT_VX_##suffix; \
VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \
: RISCV::PseudoVMSGT_VX_##suffix; \
break;
CASE_VMSLT_OPCODES(LMUL_F8, MF8)
CASE_VMSLT_OPCODES(LMUL_F4, MF4)
CASE_VMSLT_OPCODES(LMUL_F2, MF2)
CASE_VMSLT_OPCODES(LMUL_1, M1)
CASE_VMSLT_OPCODES(LMUL_2, M2)
CASE_VMSLT_OPCODES(LMUL_4, M4)
CASE_VMSLT_OPCODES(LMUL_8, M8)
#undef CASE_VMSLT_OPCODES
}
// Mask operations use the LMUL from the mask type.
switch (RISCVTargetLowering::getLMUL(VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \
case RISCVII::VLMUL::lmulenum: \
VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \
break;
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
#undef CASE_VMSLT_VMNAND_VMSET_OPCODES
CASE_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1)
CASE_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2)
CASE_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4)
CASE_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8)
CASE_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16)
CASE_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32)
CASE_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64)
#undef CASE_VMNAND_VMSET_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT);
SDValue VL;
selectVLOp(Node->getOperand(3), VL);

// If vmsge(u) with minimum value, expand it to vmset.
if (IsCmpMinimum) {
ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW));
ReplaceNode(Node,
CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, MaskSEW));
return;
}

Expand All @@ -1708,7 +1726,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
0);
ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT,
{Cmp, Cmp, VL, SEW}));
{Cmp, Cmp, VL, MaskSEW}));
return;
}
case Intrinsic::riscv_vmsgeu_mask:
Expand Down Expand Up @@ -1742,7 +1760,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
#define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \
#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
case RISCVII::VLMUL::lmulenum: \
VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
: RISCV::PseudoVMSLT_VX_##suffix; \
Expand All @@ -1751,13 +1769,13 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
VMSGTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix##_MASK \
: RISCV::PseudoVMSGT_VX_##suffix##_MASK; \
break;
CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1)
CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2)
CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4)
CASE_VMSLT_OPCODES(LMUL_1, M1, B8)
CASE_VMSLT_OPCODES(LMUL_2, M2, B16)
CASE_VMSLT_OPCODES(LMUL_4, M4, B32)
CASE_VMSLT_OPCODES(LMUL_8, M8, B64)
CASE_VMSLT_OPCODES(LMUL_F8, MF8)
CASE_VMSLT_OPCODES(LMUL_F4, MF4)
CASE_VMSLT_OPCODES(LMUL_F2, MF2)
CASE_VMSLT_OPCODES(LMUL_1, M1)
CASE_VMSLT_OPCODES(LMUL_2, M2)
CASE_VMSLT_OPCODES(LMUL_4, M4)
CASE_VMSLT_OPCODES(LMUL_8, M8)
#undef CASE_VMSLT_OPCODES
}
// Mask operations use the LMUL from the mask type.
Expand Down
12 changes: 9 additions & 3 deletions llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,9 @@ void RISCVInstrInfo::copyPhysRegVector(
if (UseVMV) {
const MCInstrDesc &Desc = DefMBBI->getDesc();
MIB.add(DefMBBI->getOperand(RISCVII::getVLOpNum(Desc))); // AVL
MIB.add(DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc))); // SEW
unsigned Log2SEW =
DefMBBI->getOperand(RISCVII::getSEWOpNum(Desc)).getImm();
MIB.addImm(Log2SEW ? Log2SEW : 3); // SEW
rofirrim marked this conversation as resolved.
Show resolved Hide resolved
MIB.addImm(0); // tu, mu
MIB.addReg(RISCV::VL, RegState::Implicit);
MIB.addReg(RISCV::VTYPE, RegState::Implicit);
Expand Down Expand Up @@ -2568,7 +2570,10 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
Ok = (Imm & (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) == Imm;
break;
case RISCVOp::OPERAND_SEW:
Ok = Imm == 0 || (isUInt<5>(Imm) && RISCVVType::isValidSEW(1 << Imm));
Ok = (isUInt<5>(Imm) && RISCVVType::isValidSEW(1 << Imm));
break;
case RISCVOp::OPERAND_SEW_MASK:
Ok = Imm == 0;
break;
case RISCVOp::OPERAND_VEC_RM:
assert(RISCVII::hasRoundModeOp(Desc.TSFlags));
Expand Down Expand Up @@ -3206,7 +3211,8 @@ std::string RISCVInstrInfo::createMIROperandComment(
RISCVVType::printVType(Imm, OS);
break;
}
case RISCVOp::OPERAND_SEW: {
case RISCVOp::OPERAND_SEW:
case RISCVOp::OPERAND_SEW_MASK: {
unsigned Log2SEW = Op.getImm();
unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
Expand Down
42 changes: 26 additions & 16 deletions llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,11 @@ def sew : RISCVOp {
let OperandType = "OPERAND_SEW";
}

// SEW for mask only instructions like vmand and vmsbf. Should always be 0.
def sew_mask : RISCVOp {
let OperandType = "OPERAND_SEW_MASK";
}

def vec_rm : RISCVOp {
let OperandType = "OPERAND_VEC_RM";
}
Expand Down Expand Up @@ -774,9 +779,10 @@ class GetVTypePredicates<VTypeInfo vti> {
}

class VPseudoUSLoadNoMask<VReg RetClass,
int EEW> :
int EEW,
DAGOperand sewop = sew> :
Pseudo<(outs RetClass:$rd),
(ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, sew:$sew,
(ins RetClass:$dest, GPRMem:$rs1, AVL:$vl, sewop:$sew,
vec_policy:$policy), []>,
RISCVVPseudo,
RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/0, !logtwo(EEW), VLMul> {
Expand Down Expand Up @@ -922,9 +928,10 @@ class VPseudoILoadMask<VReg RetClass,
}

class VPseudoUSStoreNoMask<VReg StClass,
int EEW> :
int EEW,
DAGOperand sewop = sew> :
Pseudo<(outs),
(ins StClass:$rd, GPRMem:$rs1, AVL:$vl, sew:$sew), []>,
(ins StClass:$rd, GPRMem:$rs1, AVL:$vl, sewop:$sew), []>,
RISCVVPseudo,
RISCVVSE</*Masked*/0, /*Strided*/0, !logtwo(EEW), VLMul> {
let mayLoad = 0;
Expand Down Expand Up @@ -1008,7 +1015,7 @@ class VPseudoNullaryMask<VReg RegClass> :
// Nullary for pseudo instructions. They are expanded in
// RISCVExpandPseudoInsts pass.
class VPseudoNullaryPseudoM<string BaseInst> :
Pseudo<(outs VR:$rd), (ins AVL:$vl, sew:$sew), []>,
Pseudo<(outs VR:$rd), (ins AVL:$vl, sew_mask:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
Expand Down Expand Up @@ -1045,7 +1052,7 @@ class VPseudoUnaryNoMaskNoPolicy<DAGOperand RetClass,
string Constraint = "",
bits<2> TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
(ins OpClass:$rs2, AVL:$vl, sew:$sew), []>,
(ins OpClass:$rs2, AVL:$vl, sew_mask:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
Expand Down Expand Up @@ -1080,10 +1087,11 @@ class VPseudoUnaryNoMaskRoundingMode<DAGOperand RetClass,
class VPseudoUnaryMask<VReg RetClass,
VReg OpClass,
string Constraint = "",
bits<2> TargetConstraintType = 1> :
bits<2> TargetConstraintType = 1,
DAGOperand sewop = sew> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$passthru, OpClass:$rs2,
VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
VMaskOp:$vm, AVL:$vl, sewop:$sew, vec_policy:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
Expand Down Expand Up @@ -1138,7 +1146,7 @@ class VPseudoUnaryMask_NoExcept<VReg RetClass,

class VPseudoUnaryNoMaskGPROut :
Pseudo<(outs GPR:$rd),
(ins VR:$rs2, AVL:$vl, sew:$sew), []>,
(ins VR:$rs2, AVL:$vl, sew_mask:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
Expand All @@ -1149,7 +1157,7 @@ class VPseudoUnaryNoMaskGPROut :

class VPseudoUnaryMaskGPROut :
Pseudo<(outs GPR:$rd),
(ins VR:$rs1, VMaskOp:$vm, AVL:$vl, sew:$sew), []>,
(ins VR:$rs1, VMaskOp:$vm, AVL:$vl, sew_mask:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
Expand Down Expand Up @@ -1177,9 +1185,10 @@ class VPseudoBinaryNoMask<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
string Constraint,
bits<2> TargetConstraintType = 1> :
bits<2> TargetConstraintType = 1,
DAGOperand sewop = sew> :
Pseudo<(outs RetClass:$rd),
(ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, sew:$sew), []>,
(ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, sewop:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
Expand Down Expand Up @@ -1852,7 +1861,7 @@ multiclass VPseudoLoadMask {
defvar mx = mti.LMul.MX;
defvar WriteVLDM_MX = !cast<SchedWrite>("WriteVLDM_" # mx);
let VLMul = mti.LMul.value in {
def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, EEW=1>,
def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, EEW=1, sewop=sew_mask>,
Sched<[WriteVLDM_MX, ReadVLDX]>;
}
}
Expand Down Expand Up @@ -1927,7 +1936,7 @@ multiclass VPseudoStoreMask {
defvar mx = mti.LMul.MX;
defvar WriteVSTM_MX = !cast<SchedWrite>("WriteVSTM_" # mx);
let VLMul = mti.LMul.value in {
def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, EEW=1>,
def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, EEW=1, sewop=sew_mask>,
Sched<[WriteVSTM_MX, ReadVSTX]>;
}
}
Expand Down Expand Up @@ -2011,7 +2020,8 @@ multiclass VPseudoVSFS_M {
SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx,
forcePassthruRead=true>;
let ForceTailAgnostic = true in
def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>,
def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint,
sewop = sew_mask>,
SchedUnary<"WriteVMSFSV", "ReadVMSFSV", mx,
forcePassthruRead=true>;
}
Expand Down Expand Up @@ -2269,7 +2279,7 @@ multiclass VPseudoVALU_MM<bit Commutable = 0> {
foreach m = MxList in {
defvar mx = m.MX;
let VLMul = m.value, isCommutable = Commutable in {
def "_MM_" # mx : VPseudoBinaryNoMask<VR, VR, VR, "">,
def "_MM_" # mx : VPseudoBinaryNoMask<VR, VR, VR, "", sewop = sew_mask>,
SchedBinary<"WriteVMALUV", "ReadVMALUV", "ReadVMALUV", mx>;
}
}
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-to-vmv.mir
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@ body: |
; CHECK-NEXT: %false:vr = COPY $v8
; CHECK-NEXT: %true:vr = COPY $v9
; CHECK-NEXT: %avl:gprnox0 = COPY $x1
; CHECK-NEXT: %mask:vmv0 = PseudoVMSET_M_B8 %avl, 5 /* e32 */
; CHECK-NEXT: %mask:vmv0 = PseudoVMSET_M_B8 %avl, 0 /* e8 */
; CHECK-NEXT: $v0 = COPY %mask
%false:vr = COPY $v8
%true:vr = COPY $v9
%avl:gprnox0 = COPY $x1
%mask:vmv0 = PseudoVMSET_M_B8 %avl, 5
%mask:vmv0 = PseudoVMSET_M_B8 %avl, 0
$v0 = COPY %mask
%x:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, %false, %true, $v0, %avl, 5
...
Expand All @@ -34,14 +34,14 @@ body: |
; CHECK-NEXT: %false:vr = COPY $noreg
; CHECK-NEXT: %true:vr = COPY $v9
; CHECK-NEXT: %avl:gprnox0 = COPY $x1
; CHECK-NEXT: %mask:vmv0 = PseudoVMSET_M_B8 %avl, 5 /* e32 */
; CHECK-NEXT: %mask:vmv0 = PseudoVMSET_M_B8 %avl, 0 /* e8 */
; CHECK-NEXT: $v0 = COPY %mask
; CHECK-NEXT: %x:vr = PseudoVMV_V_V_M1 %pt, %true, %avl, 5 /* e32 */, 0 /* tu, mu */
%pt:vrnov0 = COPY $v8
%false:vr = COPY $noreg
%true:vr = COPY $v9
%avl:gprnox0 = COPY $x1
%mask:vmv0 = PseudoVMSET_M_B8 %avl, 5
%mask:vmv0 = PseudoVMSET_M_B8 %avl, 0
$v0 = COPY %mask
%x:vrnov0 = PseudoVMERGE_VVM_M1 %pt, %false, %true, $v0, %avl, 5
...
Expand All @@ -57,14 +57,14 @@ body: |
; CHECK-NEXT: %pt:vr = COPY $v8
; CHECK-NEXT: %true:vr = COPY $v9
; CHECK-NEXT: %avl:gprnox0 = COPY $x1
; CHECK-NEXT: %mask:vmv0 = PseudoVMSET_M_B8 %avl, 5 /* e32 */
; CHECK-NEXT: %mask:vmv0 = PseudoVMSET_M_B8 %avl, 0 /* e8 */
; CHECK-NEXT: $v0 = COPY %mask
; CHECK-NEXT: %x:vr = PseudoVMV_V_V_M1 %pt, %true, %avl, 5 /* e32 */, 0 /* tu, mu */
%false:vr = COPY $v8
%pt:vrnov0 = COPY $v8
%true:vr = COPY $v9
%avl:gprnox0 = COPY $x1
%mask:vmv0 = PseudoVMSET_M_B8 %avl, 5
%mask:vmv0 = PseudoVMSET_M_B8 %avl, 0
$v0 = COPY %mask
%x:vrnov0 = PseudoVMERGE_VVM_M1 %pt, %false, %true, $v0, %avl, 5
...
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2183,7 +2183,7 @@ entry:
define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: ret
entry:
Expand Down
Loading