diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp
index 69343cf0965b4..fb8bb06a9f706 100644
--- a/src/coreclr/jit/assertionprop.cpp
+++ b/src/coreclr/jit/assertionprop.cpp
@@ -214,6 +214,13 @@ bool IntegralRange::Contains(int64_t value) const
}
case GT_CNS_INT:
+#ifndef TARGET_64BIT
+ if (node->TypeIs(TYP_LONG))
+ {
+ // TODO-CnsLng: delete this zero-diff quirk.
+ break;
+ }
+#endif // !TARGET_64BIT
if (node->IsIntegralConst(0) || node->IsIntegralConst(1))
{
return {SymbolicIntegerValue::Zero, SymbolicIntegerValue::One};
@@ -990,12 +997,12 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
{
if (op1->gtGetOp2()->IsCnsIntOrI())
{
- offset += op1->gtGetOp2()->AsIntCon()->gtIconVal;
+ offset += op1->gtGetOp2()->AsIntCon()->IconValue();
op1 = op1->gtGetOp1()->gtEffectiveVal(/* commaOnly */ true);
}
else if (op1->gtGetOp1()->IsCnsIntOrI())
{
- offset += op1->gtGetOp1()->AsIntCon()->gtIconVal;
+ offset += op1->gtGetOp1()->AsIntCon()->IconValue();
op1 = op1->gtGetOp2()->gtEffectiveVal(/* commaOnly */ true);
}
else
@@ -1114,7 +1121,7 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
assertion.op2.kind = O2K_CONST_INT;
}
- if (op2->gtOper != GT_CNS_INT)
+ if (!op2->IsCnsIntOrI())
{
goto DONE_ASSERTION; // Don't make an assertion
}
@@ -1129,7 +1136,7 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
assertion.op1.lcl.lclNum = lclNum;
assertion.op1.vn = optConservativeNormalVN(op1);
assertion.op1.lcl.ssaNum = op1->AsLclVarCommon()->GetSsaNum();
- assertion.op2.u1.iconVal = op2->AsIntCon()->gtIconVal;
+ assertion.op2.u1.iconVal = op2->AsIntCon()->IconValue();
assertion.op2.vn = optConservativeNormalVN(op2);
assertion.op2.SetIconFlag(op2->GetIconHandleFlag());
@@ -1164,16 +1171,18 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
assert(op2->IsIntegralConst(0));
op2Kind = O2K_ZEROOBJ;
}
+#ifndef TARGET_64BIT
+ else if (op1->TypeIs(TYP_LONG))
+ {
+ op2Kind = O2K_CONST_LONG;
+ }
+#endif // !TARGET_64BIT
else
{
op2Kind = O2K_CONST_INT;
}
goto CNS_COMMON;
- case GT_CNS_LNG:
- op2Kind = O2K_CONST_LONG;
- goto CNS_COMMON;
-
case GT_CNS_DBL:
op2Kind = O2K_CONST_DOUBLE;
goto CNS_COMMON;
@@ -1200,9 +1209,9 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
assertion.op2.lconVal = 0;
assertion.op2.vn = optConservativeNormalVN(op2);
- if (op2->gtOper == GT_CNS_INT)
+ if (op2->IsCnsIntOrI())
{
- ssize_t iconVal = op2->AsIntCon()->gtIconVal;
+ ssize_t iconVal = op2->AsIntCon()->IconValue();
if (varTypeIsSmall(lclVar))
{
@@ -1211,7 +1220,7 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
#ifdef TARGET_ARM
// Do not Constant-Prop large constants for ARM
- // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had
+ // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::IconValue() had
// target_ssize_t type.
if (!codeGen->validImmForMov((target_ssize_t)iconVal))
{
@@ -1222,9 +1231,9 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
assertion.op2.u1.iconVal = iconVal;
assertion.op2.SetIconFlag(op2->GetIconHandleFlag(), op2->AsIntCon()->gtFieldSeq);
}
- else if (op2->gtOper == GT_CNS_LNG)
+ else if (op2->gtOper == GT_CNS_INT)
{
- assertion.op2.lconVal = op2->AsLngCon()->gtLconVal;
+ assertion.op2.lconVal = op2->AsIntCon()->IntegralValue();
}
else
{
@@ -1462,22 +1471,12 @@ bool Compiler::optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pCon
// Is Local assertion prop?
if (!vnBased)
{
- if (tree->OperGet() == GT_CNS_INT)
+ if (tree->IsCnsIntOrI())
{
*pConstant = tree->AsIntCon()->IconValue();
*pFlags = tree->GetIconHandleFlag();
return true;
}
-#ifdef TARGET_64BIT
- // Just to be clear, get it from gtLconVal rather than
- // overlapping gtIconVal.
- else if (tree->OperGet() == GT_CNS_LNG)
- {
- *pConstant = tree->AsLngCon()->gtLconVal;
- *pFlags = tree->GetIconHandleFlag();
- return true;
- }
-#endif
return false;
}
@@ -2180,13 +2179,13 @@ AssertionInfo Compiler::optAssertionGenJtrue(GenTree* tree)
}
// Look for a call to an IsInstanceOf helper compared to a nullptr
- if ((op2->gtOper != GT_CNS_INT) && (op1->gtOper == GT_CNS_INT))
+ if (!op2->IsCnsIntOrI() && op1->IsCnsIntOrI())
{
std::swap(op1, op2);
}
// Validate op1 and op2
if ((op1->gtOper != GT_CALL) || (op1->AsCall()->gtCallType != CT_HELPER) || (op1->TypeGet() != TYP_REF) || // op1
- (op2->gtOper != GT_CNS_INT) || (op2->AsIntCon()->gtIconVal != 0)) // op2
+ !op2->IsCnsIntOrI() || (op2->AsIntCon()->IconValue() != 0)) // op2
{
return NO_ASSERTION_INDEX;
}
@@ -4040,14 +4039,14 @@ GenTree* Compiler::optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenT
}
// For Local AssertionProp we only can fold when op2 is a GT_CNS_INT
- if (op2->gtOper != GT_CNS_INT)
+ if (!op2->IsCnsIntOrI())
{
return nullptr;
}
optOp1Kind op1Kind = O1K_LCLVAR;
optOp2Kind op2Kind = O2K_CONST_INT;
- ssize_t cnsVal = op2->AsIntCon()->gtIconVal;
+ ssize_t cnsVal = op2->AsIntCon()->IconValue();
var_types cmpType = op1->TypeGet();
// Don't try to fold/optimize Floating Compares; there are multiple zero values.
@@ -4105,8 +4104,8 @@ GenTree* Compiler::optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenT
foldResult = !foldResult;
}
- op2->AsIntCon()->gtIconVal = foldResult;
- op2->gtType = TYP_INT;
+ op2->gtType = TYP_INT;
+ op2->AsIntCon()->SetIconValue(foldResult);
return optAssertionProp_Update(op2, tree, stmt);
}
diff --git a/src/coreclr/jit/clrjit.natvis b/src/coreclr/jit/clrjit.natvis
index f56ac98cf6a16..c111ebeb0a093 100644
--- a/src/coreclr/jit/clrjit.natvis
+++ b/src/coreclr/jit/clrjit.natvis
@@ -43,7 +43,7 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u
{gtTreeID, d}: [{gtOper,en}, {gtType,en}]
- {gtTreeID, d}: [IntCon={((GenTreeIntCon*)this)->gtIconVal, d}]
+ {gtTreeID, d}: [IntCon={((GenTreeIntCon*)this)->m_value, d}]
{gtTreeID, d}: [DblCon={((GenTreeDblCon*)this)->gtDconVal, g}]
@@ -54,9 +54,6 @@ Documentation for VS debugger format specifiers: https://docs.microsoft.com/en-u
CNS_VEC
-
- {gtTreeID, d}: [LngCon={((GenTreeLngCon*)this)->gtLconVal, l}]
-
{gtTreeID, d}: [{((GenTreeCast*)this)->gtCastType,en} <- {((GenTreeUnOp*)this)->gtOp1->gtType,en}]
{gtTreeID, d}: [{((GenTreeHWIntrinsic*)this)->gtHWIntrinsicId,en}, {gtType,en}]
diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp
index 2015449fc8664..aca28e3abc3b7 100644
--- a/src/coreclr/jit/codegenarm.cpp
+++ b/src/coreclr/jit/codegenarm.cpp
@@ -233,8 +233,8 @@ void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTre
{
// relocatable values tend to come down as a CNS_INT of native int type
// so the line between these two opcodes is kind of blurry
- GenTreeIntConCommon* con = tree->AsIntConCommon();
- ssize_t cnsVal = con->IconValue();
+ GenTreeIntCon* con = tree->AsIntCon();
+ ssize_t cnsVal = con->IconValue();
emitAttr attr = emitActualTypeSize(targetType);
@@ -397,7 +397,7 @@ void CodeGen::genLclHeap(GenTree* tree)
assert(size->isContained());
// If amount is zero then return null in regCnt
- size_t amount = size->AsIntCon()->gtIconVal;
+ size_t amount = size->AsIntCon()->IconValue();
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
@@ -436,7 +436,7 @@ void CodeGen::genLclHeap(GenTree* tree)
if (size->IsCnsIntOrI())
{
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
- target_size_t amount = (target_size_t)size->AsIntCon()->gtIconVal;
+ target_size_t amount = (target_size_t)size->AsIntCon()->IconValue();
amount = AlignUp(amount, STACK_ALIGN);
// For small allocations we will generate up to four push instructions (either 2 or 4, exactly,
@@ -937,7 +937,7 @@ void CodeGen::genCodeForShiftLong(GenTree* tree)
assert(shiftBy->isContainedIntOrIImmed());
- unsigned count = (unsigned)shiftBy->AsIntConCommon()->IconValue();
+ unsigned count = (unsigned)shiftBy->AsIntCon()->IconValue();
regNumber regResult = (oper == GT_LSH_HI) ? regHi : regLo;
diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp
index 4d1a2fd36f43f..57869771d58b4 100644
--- a/src/coreclr/jit/codegenarm64.cpp
+++ b/src/coreclr/jit/codegenarm64.cpp
@@ -2649,7 +2649,7 @@ void CodeGen::genCodeForBinary(GenTreeOp* tree)
opt = ShiftOpToInsOpts(op2->gtOper);
emit->emitIns_R_R_R_I(ins, emitActualTypeSize(tree), targetReg, a->GetRegNum(), b->GetRegNum(),
- c->AsIntConCommon()->IconValue(), opt);
+ c->AsIntCon()->IconValue(), opt);
genProduceReg(tree);
return;
@@ -3066,7 +3066,7 @@ void CodeGen::genLclHeap(GenTree* tree)
assert(size->isContained());
// If amount is zero then return null in targetReg
- amount = size->AsIntCon()->gtIconVal;
+ amount = size->AsIntCon()->IconValue();
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
@@ -3423,7 +3423,7 @@ void CodeGen::genCodeForNegNot(GenTree* tree)
GenTree* b = op1->gtGetOp2();
genConsumeRegs(op1);
GetEmitter()->emitIns_R_R_I(ins, emitActualTypeSize(tree), targetReg, a->GetRegNum(),
- b->AsIntConCommon()->IntegralValue(), ShiftOpToInsOpts(oper));
+ b->AsIntCon()->IntegralValue(), ShiftOpToInsOpts(oper));
}
break;
@@ -3892,7 +3892,7 @@ void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
{
// Even though INS_add is specified here, the encoder will choose either
// an INS_add or an INS_sub and encode the immediate as a positive value
- genInstrWithConstant(INS_add, dataSize, storeDataReg, loadReg, data->AsIntConCommon()->IconValue(),
+ genInstrWithConstant(INS_add, dataSize, storeDataReg, loadReg, data->AsIntCon()->IconValue(),
REG_NA);
}
else
@@ -4021,7 +4021,7 @@ void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* treeNode)
else
{
GetEmitter()->emitIns_R_I(INS_cmp, emitActualTypeSize(treeNode), targetReg,
- comparand->AsIntConCommon()->IconValue());
+ comparand->AsIntCon()->IconValue());
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}
}
@@ -4559,7 +4559,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree)
if (op2->isContainedIntOrIImmed())
{
- GenTreeIntConCommon* intConst = op2->AsIntConCommon();
+ GenTreeIntCon* intConst = op2->AsIntCon();
regNumber op1Reg = op1->GetRegNum();
@@ -4599,8 +4599,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree)
assert(shiftOp2->isContained());
emit->emitIns_R_R_I(ins, cmpSize, op1->GetRegNum(), shiftOp1->GetRegNum(),
- shiftOp2->AsIntConCommon()->IntegralValue(),
- ShiftOpToInsOpts(oper));
+ shiftOp2->AsIntCon()->IntegralValue(), ShiftOpToInsOpts(oper));
}
break;
@@ -4621,7 +4620,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree)
assert(op2->gtGetOp2()->isContained());
emit->emitIns_R_R_I(ins, cmpSize, op1->GetRegNum(), op2->gtGetOp1()->GetRegNum(),
- op2->gtGetOp2()->AsIntConCommon()->IntegralValue(), ShiftOpToInsOpts(oper));
+ op2->gtGetOp2()->AsIntCon()->IntegralValue(), ShiftOpToInsOpts(oper));
break;
default:
@@ -4687,7 +4686,7 @@ void CodeGen::genCodeForCCMP(GenTreeCCMP* ccmp)
if (op2->isContainedIntOrIImmed())
{
- GenTreeIntConCommon* intConst = op2->AsIntConCommon();
+ GenTreeIntCon* intConst = op2->AsIntCon();
emit->emitIns_R_I_FLAGS_COND(INS_ccmp, cmpSize, srcReg1, (int)intConst->IconValue(), ccmp->gtFlagsVal, insCond);
}
else
@@ -4865,12 +4864,11 @@ void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree)
if (tree->OperIs(GT_JTEST))
{
- ssize_t compareImm = op2->AsIntCon()->IconValue();
-
- assert(isPow2(((size_t)compareImm)));
+ uint64_t compareImm = op2->AsIntCon()->IntegralValueUnsigned();
+ assert(isPow2(compareImm));
instruction ins = (cc.GetCode() == GenCondition::EQ) ? INS_tbz : INS_tbnz;
- int imm = genLog2((size_t)compareImm);
+ int imm = genLog2(compareImm);
GetEmitter()->emitIns_J_R_I(ins, attr, compiler->compCurBB->GetJumpDest(), reg, imm);
}
diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp
index fd0b2de289b61..7eb21e6b66180 100644
--- a/src/coreclr/jit/codegenarmarch.cpp
+++ b/src/coreclr/jit/codegenarmarch.cpp
@@ -857,8 +857,8 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode)
if (source->isContained())
{
#ifdef TARGET_ARM64
- assert(source->OperGet() == GT_CNS_INT);
- assert(source->AsIntConCommon()->IconValue() == 0);
+ assert(source->IsCnsIntOrI());
+ assert(source->AsIntCon()->IconValue() == 0);
emit->emitIns_S_R(storeIns, storeAttr, REG_ZR, varNumOut, argOffsetOut);
#else // !TARGET_ARM64
@@ -1580,7 +1580,7 @@ void CodeGen::genCodeForShift(GenTree* tree)
else
{
unsigned immWidth = emitter::getBitWidth(size); // For ARM64, immWidth will be set to 32 or 64
- unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->gtIconVal & (immWidth - 1);
+ unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->IconValue() & (immWidth - 1);
GetEmitter()->emitIns_R_R_I(ins, size, dstReg, operand->GetRegNum(), shiftByImm);
}
@@ -4699,7 +4699,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
// Handle LEA with "contained" BFIZ
assert(scale == 0);
- scale = (DWORD)index->gtGetOp2()->AsIntConCommon()->IconValue();
+ scale = (DWORD)index->gtGetOp2()->AsIntCon()->IconValue();
index = index->gtGetOp1()->gtGetOp1();
}
else if (index->OperIs(GT_CAST))
diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp
index 83270d95b2564..9587ddcee52e9 100644
--- a/src/coreclr/jit/codegencommon.cpp
+++ b/src/coreclr/jit/codegencommon.cpp
@@ -1113,10 +1113,10 @@ bool CodeGen::genCreateAddrMode(
/* Check for an addition of a constant */
- if (op2->IsIntCnsFitsInI32() && (op2->gtType != TYP_REF) && FitsIn(cns + op2->AsIntConCommon()->IconValue()))
+ if (op2->IsIntCnsFitsInI32() && (op2->gtType != TYP_REF) && FitsIn(cns + op2->AsIntCon()->IconValue()))
{
// We should not be building address modes out of non-foldable constants
- if (!op2->AsIntConCommon()->ImmedValCanBeFolded(compiler, addr->OperGet()))
+ if (!op2->AsIntCon()->ImmedValCanBeFolded(compiler, addr->OperGet()))
{
assert(compiler->opts.compReloc);
return false;
@@ -1124,7 +1124,7 @@ bool CodeGen::genCreateAddrMode(
/* We're adding a constant */
- cns += op2->AsIntConCommon()->IconValue();
+ cns += op2->AsIntCon()->IconValue();
#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
if (cns == 0)
@@ -1386,7 +1386,7 @@ bool CodeGen::genCreateAddrMode(
}
else if (index->IsIntCnsFitsInI32())
{
- ssize_t constantIndex = index->AsIntConCommon()->IconValue() * indexScale;
+ ssize_t constantIndex = index->AsIntCon()->IconValue() * indexScale;
if (constantIndex == 0)
{
// while scale is a non-zero constant, the actual index is zero so drop it
@@ -7471,7 +7471,7 @@ const char* CodeGen::siStackVarName(size_t offs, size_t size, unsigned reg, unsi
//
GenTreeIntCon CodeGen::intForm(var_types type, ssize_t value)
{
- GenTreeIntCon i(type, value);
+ GenTreeIntCon i(type, value, nullptr);
i.SetRegNum(REG_NA);
return i;
}
diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp
index 44edc3fae0fee..a7e1fee467e3c 100644
--- a/src/coreclr/jit/codegenloongarch64.cpp
+++ b/src/coreclr/jit/codegenloongarch64.cpp
@@ -1936,7 +1936,7 @@ void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* lclNode)
}
else if (data->IsIntegralConst())
{
- ssize_t imm = data->AsIntConCommon()->IconValue();
+ ssize_t imm = data->AsIntCon()->IconValue();
emit->emitIns_I_la(EA_PTRSIZE, REG_R21, imm);
dataReg = REG_R21;
}
@@ -2083,7 +2083,7 @@ void CodeGen::genLclHeap(GenTree* tree)
assert(size->isContained());
// If amount is zero then return null in targetReg
- amount = size->AsIntCon()->gtIconVal;
+ amount = size->AsIntCon()->IconValue();
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
@@ -2472,7 +2472,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree)
// Check divisorOp first as we can always allow it to be a contained immediate
if (divisorOp->isContainedIntOrIImmed())
{
- ssize_t intConst = (int)(divisorOp->AsIntCon()->gtIconVal);
+ ssize_t intConst = (int)(divisorOp->AsIntCon()->IconValue());
divisorReg = emitter::isGeneralRegister(divisorReg) ? divisorReg : REG_R21;
emit->emitIns_I_la(EA_PTRSIZE, divisorReg, intConst);
}
@@ -3939,7 +3939,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree)
if (op2->isContainedIntOrIImmed())
{
- ssize_t imm = op2->AsIntCon()->gtIconVal;
+ ssize_t imm = op2->AsIntCon()->IconValue();
switch (cmpSize)
{
@@ -4178,7 +4178,7 @@ void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree)
if (op2->isContainedIntOrIImmed())
{
- ssize_t imm = op2->AsIntCon()->gtIconVal;
+ ssize_t imm = op2->AsIntCon()->IconValue();
if (imm)
{
@@ -4730,7 +4730,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
if (treeNode->IsReuseRegVal())
{
// For now, this is only used for constant nodes.
- assert((treeNode->OperGet() == GT_CNS_INT) || (treeNode->OperGet() == GT_CNS_DBL));
+ assert((treeNode->IsCnsIntOrI()) || (treeNode->OperGet() == GT_CNS_DBL));
JITDUMP(" TreeNode is marked ReuseReg\n");
return;
}
@@ -5280,8 +5280,8 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode)
// If it is contained then source must be the integer constant zero
if (source->isContained())
{
- assert(source->OperGet() == GT_CNS_INT);
- assert(source->AsIntConCommon()->IconValue() == 0);
+ assert(source->IsCnsIntOrI());
+ assert(source->AsIntCon()->IconValue() == 0);
emit->emitIns_S_R(storeIns, storeAttr, REG_R0, varNumOut, argOffsetOut);
}
@@ -5758,8 +5758,8 @@ void CodeGen::genRangeCheck(GenTree* oper)
genConsumeRegs(arrIndex);
genConsumeRegs(arrLen);
- emitter* emit = GetEmitter();
- GenTreeIntConCommon* intConst = nullptr;
+ emitter* emit = GetEmitter();
+ GenTreeIntCon* intConst = nullptr;
if (arrIndex->isContainedIntOrIImmed())
{
src1 = arrLen;
@@ -5767,7 +5767,7 @@ void CodeGen::genRangeCheck(GenTree* oper)
reg1 = REG_R21;
reg2 = src1->GetRegNum();
- intConst = src2->AsIntConCommon();
+ intConst = src2->AsIntCon();
ssize_t imm = intConst->IconValue();
if (imm == INT64_MAX)
{
@@ -5788,7 +5788,7 @@ void CodeGen::genRangeCheck(GenTree* oper)
if (src2->isContainedIntOrIImmed())
{
reg2 = REG_R21;
- ssize_t imm = src2->AsIntConCommon()->IconValue();
+ ssize_t imm = src2->AsIntCon()->IconValue();
emit->emitIns_I_la(EA_PTRSIZE, REG_R21, imm);
}
else
@@ -5883,7 +5883,7 @@ void CodeGen::genCodeForShift(GenTree* tree)
}
else
{
- unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->gtIconVal;
+ unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->IconValue();
// should check shiftByImm for loongarch32-ins.
unsigned immWidth = emitter::getBitWidth(size); // For LOONGARCH64, immWidth will be set to 32 or 64
diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp
index b5d2ce8387872..ac3a5fc69f93c 100644
--- a/src/coreclr/jit/codegenriscv64.cpp
+++ b/src/coreclr/jit/codegenriscv64.cpp
@@ -1578,7 +1578,7 @@ void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* lclNode)
}
else if (data->IsIntegralConst())
{
- ssize_t imm = data->AsIntConCommon()->IconValue();
+ ssize_t imm = data->AsIntCon()->IconValue();
emit->emitLoadImmediate(EA_PTRSIZE, rsGetRsvdReg(), imm);
dataReg = rsGetRsvdReg();
}
@@ -1712,7 +1712,7 @@ void CodeGen::genLclHeap(GenTree* tree)
assert(size->isContained());
// If amount is zero then return null in targetReg
- amount = size->AsIntCon()->gtIconVal;
+ amount = size->AsIntCon()->IconValue();
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
@@ -2100,7 +2100,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree)
assert(typeSize >= genTypeSize(genActualType(src1->TypeGet())) &&
typeSize >= genTypeSize(genActualType(divisorOp->TypeGet())));
- // ssize_t intConstValue = divisorOp->AsIntCon()->gtIconVal;
+ // ssize_t intConstValue = divisorOp->AsIntCon()->IconValue();
regNumber reg1 = src1->GetRegNum();
regNumber divisorReg = divisorOp->GetRegNum();
instruction ins;
@@ -2108,7 +2108,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree)
// Check divisorOp first as we can always allow it to be a contained immediate
if (divisorOp->isContainedIntOrIImmed())
{
- ssize_t intConst = (int)(divisorOp->AsIntCon()->gtIconVal);
+ ssize_t intConst = (int)(divisorOp->AsIntCon()->IconValue());
divisorReg = rsGetRsvdReg();
emit->emitLoadImmediate(EA_PTRSIZE, divisorReg, intConst);
}
@@ -2122,7 +2122,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree)
if (src1->isContainedIntOrIImmed())
{
assert(!divisorOp->isContainedIntOrIImmed());
- ssize_t intConst = (int)(src1->AsIntCon()->gtIconVal);
+ ssize_t intConst = (int)(src1->AsIntCon()->IconValue());
reg1 = rsGetRsvdReg();
emit->emitLoadImmediate(EA_PTRSIZE, reg1, intConst);
}
@@ -2147,7 +2147,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree)
//
if (divisorOp->IsCnsIntOrI())
{
- ssize_t intConstValue = divisorOp->AsIntCon()->gtIconVal;
+ ssize_t intConstValue = divisorOp->AsIntCon()->IconValue();
// assert(intConstValue != 0); // already checked above by IsIntegralConst(0)
if (intConstValue != -1)
{
@@ -3579,7 +3579,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree)
if (op2->isContainedIntOrIImmed())
{
- ssize_t imm = op2->AsIntCon()->gtIconVal;
+ ssize_t imm = op2->AsIntCon()->IconValue();
switch (cmpSize)
{
@@ -3815,7 +3815,7 @@ void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree)
if (op2->isContainedIntOrIImmed())
{
- ssize_t imm = op2->AsIntCon()->gtIconVal;
+ ssize_t imm = op2->AsIntCon()->IconValue();
if (imm)
{
assert(regOp1 != REG_R0);
@@ -4511,7 +4511,7 @@ void CodeGen::genCodeForTreeNode(GenTree* treeNode)
if (treeNode->IsReuseRegVal())
{
// For now, this is only used for constant nodes.
- assert((treeNode->OperGet() == GT_CNS_INT) || (treeNode->OperGet() == GT_CNS_DBL));
+ assert((treeNode->IsCnsIntOrI()) || (treeNode->OperGet() == GT_CNS_DBL));
JITDUMP(" TreeNode is marked ReuseReg\n");
return;
}
@@ -5046,8 +5046,8 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode)
// If it is contained then source must be the integer constant zero
if (source->isContained())
{
- assert(source->OperGet() == GT_CNS_INT);
- assert(source->AsIntConCommon()->IconValue() == 0);
+ assert(source->IsCnsIntOrI());
+ assert(source->AsIntCon()->IconValue() == 0);
emit->emitIns_S_R(storeIns, storeAttr, REG_R0, varNumOut, argOffsetOut);
}
else
@@ -5561,7 +5561,7 @@ void CodeGen::genCodeForShift(GenTree* tree)
}
else
{
- unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->gtIconVal;
+ unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->IconValue();
if (shiftByImm >= 32 && shiftByImm < 64)
{
immWidth = 64;
@@ -5591,7 +5591,7 @@ void CodeGen::genCodeForShift(GenTree* tree)
else
{
instruction ins = genGetInsForOper(tree);
- unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->gtIconVal;
+ unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->IconValue();
// should check shiftByImm for riscv64-ins.
unsigned immWidth = emitter::getBitWidth(size); // For RISCV64, immWidth will be set to 32 or 64
diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp
index 3fa633913fd4b..85fb8d6f606b0 100644
--- a/src/coreclr/jit/codegenxarch.cpp
+++ b/src/coreclr/jit/codegenxarch.cpp
@@ -843,10 +843,10 @@ void CodeGen::genCodeForLongUMod(GenTreeOp* node)
assert(dividendHi->isUsedFromReg());
GenTree* const divisor = node->gtOp2;
- assert(divisor->gtSkipReloadOrCopy()->OperGet() == GT_CNS_INT);
+ assert(divisor->gtSkipReloadOrCopy()->IsCnsIntOrI());
assert(divisor->gtSkipReloadOrCopy()->isUsedFromReg());
- assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal >= 2);
- assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->gtIconVal <= 0x3fffffff);
+ assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->IconValue() >= 2);
+ assert(divisor->gtSkipReloadOrCopy()->AsIntCon()->IconValue() <= 0x3fffffff);
// dividendLo must be in RAX; dividendHi must be in RDX
genCopyRegIfNeeded(dividendLo, REG_EAX);
@@ -937,8 +937,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
genCopyRegIfNeeded(dividend, REG_RAX);
// zero or sign extend rax to rdx
- if (oper == GT_UMOD || oper == GT_UDIV ||
- (dividend->IsIntegralConst() && (dividend->AsIntConCommon()->IconValue() > 0)))
+ if (oper == GT_UMOD || oper == GT_UDIV || (dividend->IsIntegralConst() && (dividend->AsIntCon()->IconValue() > 0)))
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, REG_EDX);
}
@@ -1094,8 +1093,7 @@ void CodeGen::genCodeForBinary(GenTreeOp* treeNode)
{
if (op2->isContainedIntOrIImmed())
{
- emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg,
- (int)op2->AsIntConCommon()->IconValue());
+ emit->emitIns_R_AR(INS_lea, emitTypeSize(treeNode), targetReg, op1reg, (int)op2->AsIntCon()->IconValue());
}
else
{
@@ -1207,7 +1205,7 @@ void CodeGen::genCodeForMul(GenTreeOp* treeNode)
if (immOp != nullptr)
{
- ssize_t imm = immOp->AsIntConCommon()->IconValue();
+ ssize_t imm = immOp->AsIntCon()->IconValue();
if (!requiresOverflowCheck && rmOp->isUsedFromReg() && ((imm == 3) || (imm == 5) || (imm == 9)))
{
@@ -2790,7 +2788,7 @@ void CodeGen::genLclHeap(GenTree* tree)
size_t amount = 0;
if (size->IsCnsIntOrI() && size->isContained())
{
- amount = size->AsIntCon()->gtIconVal;
+ amount = size->AsIntCon()->IconValue();
assert((amount > 0) && (amount <= UINT_MAX));
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
@@ -4682,7 +4680,7 @@ void CodeGen::genCodeForShift(GenTree* tree)
}
else
{
- int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
+ int shiftByValue = (int)shiftBy->AsIntCon()->IconValue();
#if defined(TARGET_64BIT)
// Try to emit rorx if BMI2 is available instead of mov+rol
@@ -4792,7 +4790,7 @@ void CodeGen::genCodeForShiftLong(GenTree* tree)
assert(shiftBy->isContainedIntOrIImmed());
- unsigned int count = (unsigned int)shiftBy->AsIntConCommon()->IconValue();
+ unsigned int count = (unsigned int)shiftBy->AsIntCon()->IconValue();
if (oper == GT_LSH_HI)
{
@@ -4895,7 +4893,7 @@ void CodeGen::genCodeForShiftRMW(GenTreeStoreInd* storeInd)
GenTree* shiftBy = data->AsOp()->gtOp2;
if (shiftBy->isContainedIntOrIImmed())
{
- int shiftByValue = (int)shiftBy->AsIntConCommon()->IconValue();
+ int shiftByValue = (int)shiftBy->AsIntCon()->IconValue();
ins = genMapShiftInsToShiftByConstantIns(ins, shiftByValue);
if (shiftByValue == 1)
{
@@ -5310,10 +5308,10 @@ void CodeGen::genCodeForIndir(GenTreeIndir* tree)
noway_assert(EA_ATTR(genTypeSize(targetType)) == EA_PTRSIZE);
#if TARGET_64BIT
emit->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, tree->GetRegNum(), FLD_GLOBAL_GS,
- (int)addr->AsIntCon()->gtIconVal);
+ (int)addr->AsIntCon()->IconValue());
#else
emit->emitIns_R_C(ins_Load(TYP_I_IMPL), EA_PTRSIZE, tree->GetRegNum(), FLD_GLOBAL_FS,
- (int)addr->AsIntCon()->gtIconVal);
+ (int)addr->AsIntCon()->IconValue());
#endif
}
else
@@ -5544,7 +5542,7 @@ void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
ssize_t ival = op2->IconValue();
assert((ival >= 0) && (ival <= 255));
- op2->gtIconVal = static_cast(ival);
+ op2->SetIconValue(static_cast(ival));
break;
}
@@ -6210,13 +6208,13 @@ void CodeGen::genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackA
{
// Note that if gtControlExpr is an indir of an absolute address, we mark it as
// contained only if it can be encoded as PC-relative offset.
- assert(target->AsIndir()->Base()->AsIntConCommon()->FitsInAddrBase(compiler));
+ assert(target->AsIndir()->Base()->AsIntCon()->FitsInAddrBase(compiler));
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN_INDIR,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
- (void*) target->AsIndir()->Base()->AsIntConCommon()->IconValue()
+ (void*) target->AsIndir()->Base()->AsIntCon()->IconValue()
X86_ARG(argSizeForEmitter),
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
@@ -8512,7 +8510,7 @@ void CodeGen::genPutArgStk(GenTreePutArgStk* putArgStk)
if (data->isContainedIntOrIImmed())
{
GetEmitter()->emitIns_S_I(ins_Store(targetType), emitTypeSize(targetType), baseVarNum, argOffset,
- (int)data->AsIntConCommon()->IconValue());
+ (int)data->AsIntCon()->IconValue());
}
else
{
@@ -11262,7 +11260,7 @@ bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsPCRelOffset(size_t addr)
//
bool CodeGenInterface::genCodeIndirAddrCanBeEncodedAsZeroRelOffset(size_t addr)
{
- return GenTreeIntConCommon::FitsInI32((ssize_t)addr);
+ return FitsIn((ssize_t)addr);
}
// Return true if an absolute indirect code address needs a relocation recorded with VM.
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index 44437870ae655..5325e5d828887 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -11186,7 +11186,6 @@ class GenTreeVisitor
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
- case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_CNS_VEC:
diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp
index 47c9007ddee4f..9215062788e92 100644
--- a/src/coreclr/jit/compiler.hpp
+++ b/src/coreclr/jit/compiler.hpp
@@ -1797,15 +1797,6 @@ inline void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate)
assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_LARGE);
assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || (gtDebugFlags & GTF_DEBUG_NODE_LARGE));
-#if defined(HOST_64BIT) && !defined(TARGET_64BIT)
- if (gtOper == GT_CNS_LNG && oper == GT_CNS_INT)
- {
- // When casting from LONG to INT, we need to force cast of the value,
- // if the host architecture represents INT and LONG with the same data size.
- AsLngCon()->gtLconVal = (INT64)(INT32)AsLngCon()->gtLconVal;
- }
-#endif // defined(HOST_64BIT) && !defined(TARGET_64BIT)
-
SetOperRaw(oper);
#ifdef DEBUG
@@ -1897,7 +1888,7 @@ inline void GenTree::ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate)
// BashToConst: Bash the node to a constant one.
//
// The function will infer the node's new oper from the type: GT_CNS_INT
-// or GT_CNS_LNG for integers and GC types, GT_CNS_DBL for floats/doubles.
+// for integers and GC types, GT_CNS_DBL for floats/doubles.
//
// The type is inferred from "value"'s type ("T") unless an explicit
// one is provided via the second argument, in which case it is checked
@@ -1944,7 +1935,7 @@ void GenTree::BashToConst(T value, var_types type /* = TYP_UNDEF */)
}
else
{
- oper = (type == TYP_LONG) ? GT_CNS_NATIVELONG : GT_CNS_INT;
+ oper = GT_CNS_INT;
}
SetOper(oper);
@@ -1954,26 +1945,16 @@ void GenTree::BashToConst(T value, var_types type /* = TYP_UNDEF */)
switch (oper)
{
case GT_CNS_INT:
-#if !defined(TARGET_64BIT)
- assert(type != TYP_LONG);
-#endif
assert(varTypeIsIntegral(type) || varTypeIsGC(type));
if (genTypeSize(type) <= genTypeSize(TYP_INT))
{
assert(FitsIn(value));
}
- AsIntCon()->SetIconValue(static_cast(value));
+ AsIntCon()->SetIntegralValue(static_cast(value));
AsIntCon()->gtFieldSeq = nullptr;
break;
-#if !defined(TARGET_64BIT)
- case GT_CNS_LNG:
- assert(type == TYP_LONG);
- AsLngCon()->SetLngValue(static_cast(value));
- break;
-#endif
-
case GT_CNS_DBL:
assert(varTypeIsFloating(type));
AsDblCon()->SetDconValue(static_cast(value));
@@ -3711,7 +3692,7 @@ inline void Compiler::LoopDsc::VERIFY_lpIterTree() const
assert(value->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_RSH, GT_LSH));
assert(value->AsOp()->gtOp1->OperGet() == GT_LCL_VAR);
assert(value->AsOp()->gtOp1->AsLclVar()->GetLclNum() == lpIterTree->AsLclVar()->GetLclNum());
- assert(value->AsOp()->gtOp2->OperGet() == GT_CNS_INT);
+ assert(value->AsOp()->gtOp2->IsCnsIntOrI());
#endif
}
@@ -3729,7 +3710,7 @@ inline int Compiler::LoopDsc::lpIterConst() const
{
VERIFY_lpIterTree();
GenTree* value = lpIterTree->AsLclVar()->Data();
- return (int)value->AsOp()->gtOp2->AsIntCon()->gtIconVal;
+ return (int)value->AsOp()->gtOp2->AsIntCon()->IconValue();
}
//-----------------------------------------------------------------------------
@@ -3860,7 +3841,7 @@ inline int Compiler::LoopDsc::lpConstLimit() const
GenTree* limit = lpLimit();
assert(limit->OperIsConst());
- return (int)limit->AsIntCon()->gtIconVal;
+ return (int)limit->AsIntCon()->IconValue();
}
//-----------------------------------------------------------------------------
@@ -4444,7 +4425,6 @@ void GenTree::VisitOperands(TVisitor visitor)
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
- case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_CNS_VEC:
diff --git a/src/coreclr/jit/decomposelongs.cpp b/src/coreclr/jit/decomposelongs.cpp
index c7b28b15a5c63..771d1ab3c29d6 100644
--- a/src/coreclr/jit/decomposelongs.cpp
+++ b/src/coreclr/jit/decomposelongs.cpp
@@ -179,7 +179,7 @@ GenTree* DecomposeLongs::DecomposeNode(GenTree* tree)
nextNode = DecomposeCast(use);
break;
- case GT_CNS_LNG:
+ case GT_CNS_INT:
nextNode = DecomposeCnsLng(use);
break;
@@ -656,7 +656,7 @@ GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use)
}
//------------------------------------------------------------------------
-// DecomposeCnsLng: Decompose GT_CNS_LNG.
+// DecomposeCnsLng: Decompose GT_CNS_INT.
//
// Arguments:
// use - the LIR::Use object for the def that needs to be decomposed.
@@ -667,11 +667,11 @@ GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use)
GenTree* DecomposeLongs::DecomposeCnsLng(LIR::Use& use)
{
assert(use.IsInitialized());
- assert(use.Def()->OperGet() == GT_CNS_LNG);
+ assert(use.Def()->OperGet() == GT_CNS_INT);
GenTree* tree = use.Def();
- INT32 loVal = tree->AsLngCon()->LoVal();
- INT32 hiVal = tree->AsLngCon()->HiVal();
+ INT32 loVal = tree->AsIntCon()->LoVal();
+ INT32 hiVal = tree->AsIntCon()->HiVal();
GenTree* loResult = tree;
loResult->BashToConst(loVal);
@@ -1059,7 +1059,7 @@ GenTree* DecomposeLongs::DecomposeShift(LIR::Use& use)
{
// Reduce count modulo 64 to match behavior found in the shift helpers,
// Compiler::gtFoldExpr and ValueNumStore::EvalOpIntegral.
- unsigned int count = shiftByOp->AsIntCon()->gtIconVal & 0x3F;
+ unsigned int count = shiftByOp->AsIntCon()->IconValue() & 0x3F;
Range().Remove(shiftByOp);
if (count == 0)
@@ -1416,7 +1416,7 @@ GenTree* DecomposeLongs::DecomposeRotate(LIR::Use& use)
oper = GT_RSH_LO;
}
- unsigned count = (unsigned)rotateByOp->AsIntCon()->gtIconVal;
+ unsigned count = (unsigned)rotateByOp->AsIntCon()->IconValue();
Range().Remove(rotateByOp);
// Make sure the rotate amount is between 0 and 63.
@@ -1657,10 +1657,10 @@ GenTree* DecomposeLongs::DecomposeUMod(LIR::Use& use)
GenTree* loOp2 = op2->gtGetOp1();
GenTree* hiOp2 = op2->gtGetOp2();
- assert(loOp2->OperGet() == GT_CNS_INT);
- assert(hiOp2->OperGet() == GT_CNS_INT);
- assert((loOp2->AsIntCon()->gtIconVal >= 2) && (loOp2->AsIntCon()->gtIconVal <= 0x3fffffff));
- assert(hiOp2->AsIntCon()->gtIconVal == 0);
+ assert(loOp2->IsCnsIntOrI());
+ assert(hiOp2->IsCnsIntOrI());
+ assert((loOp2->AsIntCon()->IconValue() >= 2) && (loOp2->AsIntCon()->IconValue() <= 0x3fffffff));
+ assert(hiOp2->AsIntCon()->IconValue() == 0);
// Get rid of op2's hi part. We don't need it.
Range().Remove(hiOp2);
diff --git a/src/coreclr/jit/earlyprop.cpp b/src/coreclr/jit/earlyprop.cpp
index 1bbf3ec95700d..2b0c95bff9372 100644
--- a/src/coreclr/jit/earlyprop.cpp
+++ b/src/coreclr/jit/earlyprop.cpp
@@ -517,7 +517,7 @@ GenTree* Compiler::optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckT
if ((addr->OperGet() == GT_ADD) && addr->gtGetOp2()->IsCnsIntOrI())
{
- offsetValue += addr->gtGetOp2()->AsIntConCommon()->IconValue();
+ offsetValue += addr->gtGetOp2()->AsIntCon()->IconValue();
addr = addr->gtGetOp1();
}
@@ -601,7 +601,7 @@ GenTree* Compiler::optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckT
(additionOp1->AsLclVarCommon()->GetLclNum() == nullCheckAddress->AsLclVarCommon()->GetLclNum()) &&
(additionOp2->IsCnsIntOrI()))
{
- offsetValue += additionOp2->AsIntConCommon()->IconValue();
+ offsetValue += additionOp2->AsIntCon()->IconValue();
nullCheckTree = commaOp1EffectiveValue;
}
}
diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp
index 33ae40ee208ef..622f353c82e28 100644
--- a/src/coreclr/jit/emitarm.cpp
+++ b/src/coreclr/jit/emitarm.cpp
@@ -8073,10 +8073,10 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
assert(!src->isContained() || src->isContainedIntOrIImmed());
// find immed (if any) - it cannot be a dst
- GenTreeIntConCommon* intConst = nullptr;
+ GenTreeIntCon* intConst = nullptr;
if (src->isContainedIntOrIImmed())
{
- intConst = src->AsIntConCommon();
+ intConst = src->AsIntCon();
}
if (intConst)
@@ -8098,8 +8098,8 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// find immed (if any) - it cannot be a dst
// Only one src can be an int.
- GenTreeIntConCommon* intConst = nullptr;
- GenTree* nonIntReg = nullptr;
+ GenTreeIntCon* intConst = nullptr;
+ GenTree* nonIntReg = nullptr;
if (varTypeIsFloating(dst))
{
@@ -8116,7 +8116,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// Check src2 first as we can always allow it to be a contained immediate
if (src2->isContainedIntOrIImmed())
{
- intConst = src2->AsIntConCommon();
+ intConst = src2->AsIntCon();
nonIntReg = src1;
}
// Only for commutative operations do we check src1 and allow it to be a contained immediate
@@ -8129,7 +8129,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
if (src1->isContainedIntOrIImmed())
{
assert(!src2->isContainedIntOrIImmed());
- intConst = src1->AsIntConCommon();
+ intConst = src1->AsIntCon();
nonIntReg = src2;
}
}
diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp
index 5e0b4f2e78a95..9bb85230ba5b5 100644
--- a/src/coreclr/jit/emitarm64.cpp
+++ b/src/coreclr/jit/emitarm64.cpp
@@ -14220,10 +14220,10 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
assert(!src->isContained() || src->isContainedIntOrIImmed());
// find immed (if any) - it cannot be a dst
- GenTreeIntConCommon* intConst = nullptr;
+ GenTreeIntCon* intConst = nullptr;
if (src->isContainedIntOrIImmed())
{
- intConst = src->AsIntConCommon();
+ intConst = src->AsIntCon();
}
if (intConst)
@@ -14248,8 +14248,8 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// find immed (if any) - it cannot be a dst
// Only one src can be an int.
- GenTreeIntConCommon* intConst = nullptr;
- GenTree* nonIntReg = nullptr;
+ GenTreeIntCon* intConst = nullptr;
+ GenTree* nonIntReg = nullptr;
if (varTypeIsFloating(dst))
{
@@ -14266,7 +14266,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// Check src2 first as we can always allow it to be a contained immediate
if (src2->isContainedIntOrIImmed())
{
- intConst = src2->AsIntConCommon();
+ intConst = src2->AsIntCon();
nonIntReg = src1;
}
// Only for commutative operations do we check src1 and allow it to be a contained immediate
@@ -14279,7 +14279,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
if (src1->isContainedIntOrIImmed())
{
assert(!src2->isContainedIntOrIImmed());
- intConst = src1->AsIntConCommon();
+ intConst = src1->AsIntCon();
nonIntReg = src2;
}
}
diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp
index 40c4937fe3b6f..9a603f30b15ba 100644
--- a/src/coreclr/jit/emitloongarch64.cpp
+++ b/src/coreclr/jit/emitloongarch64.cpp
@@ -6249,8 +6249,8 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// find immed (if any) - it cannot be a dst
// Only one src can be an int.
- GenTreeIntConCommon* intConst = nullptr;
- GenTree* nonIntReg = nullptr;
+ GenTreeIntCon* intConst = nullptr;
+ GenTree* nonIntReg = nullptr;
bool needCheckOv = dst->gtOverflowEx();
@@ -6269,7 +6269,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// Check src2 first as we can always allow it to be a contained immediate
if (src2->isContainedIntOrIImmed())
{
- intConst = src2->AsIntConCommon();
+ intConst = src2->AsIntCon();
nonIntReg = src1;
}
// Only for commutative operations do we check src1 and allow it to be a contained immediate
@@ -6282,7 +6282,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
if (src1->isContainedIntOrIImmed())
{
assert(!src2->isContainedIntOrIImmed());
- intConst = src1->AsIntConCommon();
+ intConst = src1->AsIntCon();
nonIntReg = src2;
}
}
diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp
index de9df4abd5c0a..bc7199c8c1d0a 100644
--- a/src/coreclr/jit/emitriscv64.cpp
+++ b/src/coreclr/jit/emitriscv64.cpp
@@ -4089,8 +4089,8 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// find immed (if any) - it cannot be a dst
// Only one src can be an int.
- GenTreeIntConCommon* intConst = nullptr;
- GenTree* nonIntReg = nullptr;
+ GenTreeIntCon* intConst = nullptr;
+ GenTree* nonIntReg = nullptr;
bool needCheckOv = dst->gtOverflowEx();
@@ -4109,7 +4109,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
// Check src2 first as we can always allow it to be a contained immediate
if (src2->isContainedIntOrIImmed())
{
- intConst = src2->AsIntConCommon();
+ intConst = src2->AsIntCon();
nonIntReg = src1;
}
// Only for commutative operations do we check src1 and allow it to be a contained immediate
@@ -4122,7 +4122,7 @@ regNumber emitter::emitInsTernary(instruction ins, emitAttr attr, GenTree* dst,
if (src1->isContainedIntOrIImmed())
{
assert(!src2->isContainedIntOrIImmed());
- intConst = src1->AsIntConCommon();
+ intConst = src1->AsIntCon();
nonIntReg = src2;
}
}
diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp
index 7e962c2e0c7cb..7de2957f6dd2a 100644
--- a/src/coreclr/jit/emitxarch.cpp
+++ b/src/coreclr/jit/emitxarch.cpp
@@ -4673,7 +4673,7 @@ void emitter::emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt,
else if ((memBase != nullptr) && memBase->IsCnsIntOrI() && memBase->isContained())
{
// Absolute addresses marked as contained should fit within the base of addr mode.
- assert(memBase->AsIntConCommon()->FitsInAddrBase(emitComp));
+ assert(memBase->AsIntCon()->FitsInAddrBase(emitComp));
// If we reach here, either:
// - we are not generating relocatable code, (typically the non-AOT JIT case)
@@ -4684,9 +4684,9 @@ void emitter::emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt,
// be contained.
//
assert(!emitComp->opts.compReloc || memBase->IsIconHandle() || memBase->IsIntegralConst(0) ||
- memBase->AsIntConCommon()->FitsInAddrBase(emitComp));
+ memBase->AsIntCon()->FitsInAddrBase(emitComp));
- if (memBase->AsIntConCommon()->AddrNeedsReloc(emitComp))
+ if (memBase->AsIntCon()->AddrNeedsReloc(emitComp))
{
id->idSetIsDspReloc();
}
@@ -4698,7 +4698,7 @@ void emitter::emitHandleMemOp(GenTreeIndir* indir, instrDesc* id, insFormat fmt,
id->idInsFmt(emitMapFmtForIns(fmt, ins));
// Absolute address must have already been set in the instrDesc constructor.
- assert(emitGetInsAmdAny(id) == memBase->AsIntConCommon()->IconValue());
+ assert(emitGetInsAmdAny(id) == memBase->AsIntCon()->IconValue());
}
else
{
@@ -4851,7 +4851,7 @@ void emitter::emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* m
{
if (data->isContainedIntOrIImmed())
{
- emitIns_C_I(ins, attr, addr->AsClsVar()->gtClsVarHnd, 0, (int)data->AsIntConCommon()->IconValue());
+ emitIns_C_I(ins, attr, addr->AsClsVar()->gtClsVarHnd, 0, (int)data->AsIntCon()->IconValue());
}
#if defined(FEATURE_HW_INTRINSICS)
else if (data->OperIsHWIntrinsic() && data->isContained())
@@ -4869,7 +4869,7 @@ void emitter::emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* m
{
assert(numArgs == 2);
- int icon = static_cast(hwintrinsic->Op(2)->AsIntConCommon()->IconValue());
+ int icon = static_cast(hwintrinsic->Op(2)->AsIntCon()->IconValue());
emitIns_C_R_I(ins, attr, addr->AsClsVar()->gtClsVarHnd, 0, op1->GetRegNum(), icon);
}
}
@@ -4889,7 +4889,7 @@ void emitter::emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* m
if (data->isContainedIntOrIImmed())
{
- emitIns_S_I(ins, attr, varNode->GetLclNum(), offset, (int)data->AsIntConCommon()->IconValue());
+ emitIns_S_I(ins, attr, varNode->GetLclNum(), offset, (int)data->AsIntCon()->IconValue());
}
#if defined(FEATURE_HW_INTRINSICS)
else if (data->OperIsHWIntrinsic() && data->isContained())
@@ -4907,7 +4907,7 @@ void emitter::emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* m
{
assert(numArgs == 2);
- int icon = static_cast(hwintrinsic->Op(2)->AsIntConCommon()->IconValue());
+ int icon = static_cast(hwintrinsic->Op(2)->AsIntCon()->IconValue());
emitIns_S_R_I(ins, attr, varNode->GetLclNum(), offset, op1->GetRegNum(), icon);
}
}
@@ -4929,7 +4929,7 @@ void emitter::emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* m
if (data->isContainedIntOrIImmed())
{
- int icon = (int)data->AsIntConCommon()->IconValue();
+ int icon = (int)data->AsIntCon()->IconValue();
id = emitNewInstrAmdCns(attr, offset, icon);
id->idIns(ins);
emitHandleMemOp(mem, id, emitInsModeFormat(ins, IF_ARD_CNS), ins);
@@ -4956,7 +4956,7 @@ void emitter::emitInsStoreInd(instruction ins, emitAttr attr, GenTreeStoreInd* m
else
{
assert(numArgs == 2);
- int icon = static_cast(hwintrinsic->Op(2)->AsIntConCommon()->IconValue());
+ int icon = static_cast(hwintrinsic->Op(2)->AsIntCon()->IconValue());
id = emitNewInstrAmdCns(attr, offset, icon);
id->idIns(ins);
@@ -5001,7 +5001,7 @@ void emitter::emitInsStoreLcl(instruction ins, emitAttr attr, GenTreeLclVarCommo
if (data->isContainedIntOrIImmed())
{
- emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int)data->AsIntConCommon()->IconValue());
+ emitIns_S_I(ins, attr, varNode->GetLclNum(), 0, (int)data->AsIntCon()->IconValue());
}
else
{
@@ -5169,7 +5169,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
// src is an contained immediate
// dst is a class static variable
emitIns_C_I(ins, attr, memBase->AsClsVar()->gtClsVarHnd, 0,
- (int)src->AsIntConCommon()->IconValue());
+ (int)src->AsIntCon()->IconValue());
}
else
{
@@ -5195,7 +5195,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
assert(otherOp == nullptr);
assert(src->IsCnsIntOrI());
- id = emitNewInstrAmdCns(attr, memIndir->Offset(), (int)src->AsIntConCommon()->IconValue());
+ id = emitNewInstrAmdCns(attr, memIndir->Offset(), (int)src->AsIntCon()->IconValue());
}
else
{
@@ -5278,7 +5278,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
assert(cnsOp == src);
assert(otherOp == nullptr);
- sz = emitInsSizeAM(id, insCodeMI(ins), (int)src->AsIntConCommon()->IconValue());
+ sz = emitInsSizeAM(id, insCodeMI(ins), (int)src->AsIntCon()->IconValue());
}
else
{
@@ -5359,7 +5359,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
// src is an contained immediate
// dst is a stack based local variable
- emitIns_S_I(ins, attr, varNum, offset, (int)src->AsIntConCommon()->IconValue());
+ emitIns_S_I(ins, attr, varNum, offset, (int)src->AsIntCon()->IconValue());
}
else
{
@@ -5380,7 +5380,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
if (src->IsCnsIntOrI())
{
assert(!dst->isContained());
- GenTreeIntConCommon* intCns = src->AsIntConCommon();
+ GenTreeIntCon* intCns = src->AsIntCon();
emitIns_R_I(ins, attr, dst->GetRegNum(), intCns->IconValue());
}
else
@@ -5450,8 +5450,8 @@ void emitter::emitInsRMW(instruction ins, emitAttr attr, GenTreeStoreInd* storeI
if (src->isContainedIntOrIImmed())
{
- GenTreeIntConCommon* intConst = src->AsIntConCommon();
- int iconVal = (int)intConst->IconValue();
+ GenTreeIntCon* intConst = src->AsIntCon();
+ int iconVal = (int)intConst->IconValue();
switch (ins)
{
case INS_rcl_N:
diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp
index 835c6f401fe9b..b1590d972a0b4 100644
--- a/src/coreclr/jit/fgdiagnostic.cpp
+++ b/src/coreclr/jit/fgdiagnostic.cpp
@@ -386,7 +386,7 @@ void Compiler::fgDumpTree(FILE* fgxFile, GenTree* const tree)
}
else if (tree->IsCnsIntOrI())
{
- fprintf(fgxFile, "%d", tree->AsIntCon()->gtIconVal);
+ fprintf(fgxFile, "%d", tree->AsIntCon()->IconValue());
}
else if (tree->IsCnsFltOrDbl())
{
diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp
index 7c40d037d8a99..ffb3a875b676a 100644
--- a/src/coreclr/jit/fginline.cpp
+++ b/src/coreclr/jit/fginline.cpp
@@ -661,7 +661,7 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorAsOp()->gtOp1;
assert(tree == block->lastStmt()->GetRootNode());
- if (condTree->OperGet() == GT_CNS_INT)
+ if (condTree->IsCnsIntOrI())
{
JITDUMP(" ... found foldable jtrue at [%06u] in " FMT_BB "\n", m_compiler->dspTreeID(tree),
block->bbNum);
diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp
index 38c04eb9e5256..a4a384c5dd638 100644
--- a/src/coreclr/jit/flowgraph.cpp
+++ b/src/coreclr/jit/flowgraph.cpp
@@ -916,12 +916,12 @@ bool Compiler::fgAddrCouldBeNull(GenTree* addr)
return !addr->IsHelperCall() || !s_helperCallProperties.NonNullReturn(addr->AsCall()->GetHelperNum());
case GT_ADD:
- if (addr->AsOp()->gtOp1->gtOper == GT_CNS_INT)
+ if (addr->AsOp()->gtOp1->IsCnsIntOrI())
{
GenTree* cns1Tree = addr->AsOp()->gtOp1;
if (!cns1Tree->IsIconHandle())
{
- if (!fgIsBigOffset(cns1Tree->AsIntCon()->gtIconVal))
+ if (!fgIsBigOffset(cns1Tree->AsIntCon()->IconValue()))
{
// Op1 was an ordinary small constant
return fgAddrCouldBeNull(addr->AsOp()->gtOp2);
@@ -930,13 +930,13 @@ bool Compiler::fgAddrCouldBeNull(GenTree* addr)
else // Op1 was a handle represented as a constant
{
// Is Op2 also a constant?
- if (addr->AsOp()->gtOp2->gtOper == GT_CNS_INT)
+ if (addr->AsOp()->gtOp2->IsCnsIntOrI())
{
GenTree* cns2Tree = addr->AsOp()->gtOp2;
// Is this an addition of a handle and constant
if (!cns2Tree->IsIconHandle())
{
- if (!fgIsBigOffset(cns2Tree->AsIntCon()->gtIconVal))
+ if (!fgIsBigOffset(cns2Tree->AsIntCon()->IconValue()))
{
// Op2 was an ordinary small constant
return false; // we can't have a null address
@@ -948,13 +948,13 @@ bool Compiler::fgAddrCouldBeNull(GenTree* addr)
else
{
// Op1 is not a constant. What about Op2?
- if (addr->AsOp()->gtOp2->gtOper == GT_CNS_INT)
+ if (addr->AsOp()->gtOp2->IsCnsIntOrI())
{
GenTree* cns2Tree = addr->AsOp()->gtOp2;
// Is this an addition of a small constant
if (!cns2Tree->IsIconHandle())
{
- if (!fgIsBigOffset(cns2Tree->AsIntCon()->gtIconVal))
+ if (!fgIsBigOffset(cns2Tree->AsIntCon()->IconValue()))
{
// Op2 was an ordinary small constant
return fgAddrCouldBeNull(addr->AsOp()->gtOp1);
@@ -1012,7 +1012,7 @@ GenTree* Compiler::fgOptimizeDelegateConstructor(GenTreeCall* call,
assert(targetMethod->AsCall()->gtArgs.CountArgs() == 3);
GenTree* handleNode = targetMethod->AsCall()->gtArgs.GetArgByIndex(2)->GetNode();
- if (handleNode->OperGet() == GT_CNS_INT)
+ if (handleNode->IsCnsIntOrI())
{
// it's a ldvirtftn case, fetch the methodhandle off the helper for ldvirtftn. It's the 3rd arg
targetMethodHnd = CORINFO_METHOD_HANDLE(handleNode->AsIntCon()->gtCompileTimeHandle);
@@ -1050,7 +1050,7 @@ GenTree* Compiler::fgOptimizeDelegateConstructor(GenTreeCall* call,
// This could be any of CORINFO_HELP_RUNTIMEHANDLE_(METHOD|CLASS)(_LOG?)
GenTree* tokenNode = runtimeLookupCall->gtArgs.GetArgByIndex(1)->GetNode();
- noway_assert(tokenNode->OperGet() == GT_CNS_INT);
+ noway_assert(tokenNode->IsCnsIntOrI());
targetMethodHnd = CORINFO_METHOD_HANDLE(tokenNode->AsIntCon()->gtCompileTimeHandle);
}
@@ -2161,7 +2161,7 @@ class MergedReturns
// Return Value:
// The new merged return block.
//
- BasicBlock* CreateReturnBB(unsigned index, GenTreeIntConCommon* returnConst = nullptr)
+ BasicBlock* CreateReturnBB(unsigned index, GenTreeIntCon* returnConst = nullptr)
{
BasicBlock* newReturnBB = comp->fgNewBBinRegion(BBJ_RETURN);
comp->fgReturnCount++;
@@ -2285,7 +2285,7 @@ class MergedReturns
// Check to see if this is a constant return so that we can search
// for and/or create a constant return block for it.
- GenTreeIntConCommon* retConst = GetReturnConst(returnBlock);
+ GenTreeIntCon* retConst = GetReturnConst(returnBlock);
if (retConst != nullptr)
{
// We have a constant. Now find or create a corresponding return block.
@@ -2386,7 +2386,7 @@ class MergedReturns
//------------------------------------------------------------------------
// GetReturnConst: If the given block returns an integral constant, return the
- // GenTreeIntConCommon that represents the constant.
+ // GenTreeIntCon that represents the constant.
//
// Arguments:
// returnBlock - Block whose return value is to be inspected.
@@ -2395,7 +2395,7 @@ class MergedReturns
// GenTreeIntCommon that is the argument of `returnBlock`'s `GT_RETURN` if
// such exists; nullptr otherwise.
//
- static GenTreeIntConCommon* GetReturnConst(BasicBlock* returnBlock)
+ static GenTreeIntCon* GetReturnConst(BasicBlock* returnBlock)
{
Statement* lastStmt = returnBlock->lastStmt();
if (lastStmt == nullptr)
@@ -2415,7 +2415,7 @@ class MergedReturns
return nullptr;
}
- return retExpr->AsIntConCommon();
+ return retExpr->AsIntCon();
}
//------------------------------------------------------------------------
@@ -2433,7 +2433,7 @@ class MergedReturns
// Return Value:
// A block that returns the same constant, if one is found; otherwise nullptr.
//
- BasicBlock* FindConstReturnBlock(GenTreeIntConCommon* constExpr, unsigned searchLimit, unsigned* index)
+ BasicBlock* FindConstReturnBlock(GenTreeIntCon* constExpr, unsigned searchLimit, unsigned* index)
{
INT64 constVal = constExpr->IntegralValue();
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index ea7cbf1f7e6d5..e1e0932df474c 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -279,10 +279,8 @@ void GenTree::InitNodeSize()
static_assert_no_msg(sizeof(GenTreeUnOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeOp) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeVal) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeIntConCommon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreePhysReg) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeIntCon) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeLngCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeDblCon) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStrCon) <= TREE_NODE_SZ_SMALL);
#ifdef TARGET_XARCH
@@ -2334,7 +2332,7 @@ bool GenTreeCall::HasSideEffects(Compiler* compiler, bool ignoreExceptions, bool
// if arrLen is nullptr it means it wasn't an array allocator
if ((arrLen != nullptr) && arrLen->IsIntCnsFitsInI32())
{
- ssize_t cns = arrLen->AsIntConCommon()->IconValue();
+ ssize_t cns = arrLen->AsIntCon()->IconValue();
if ((cns >= 0) && (cns <= CORINFO_Array_MaxLength))
{
return false;
@@ -2695,7 +2693,14 @@ bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK)
switch (oper)
{
case GT_CNS_INT:
- if (op1->AsIntCon()->gtIconVal == op2->AsIntCon()->gtIconVal)
+#ifndef TARGET_64BIT
+ // TODO-CnsLng: delete this zero-diff quirk.
+ if (op1->TypeIs(TYP_LONG))
+ {
+ return false;
+ }
+#endif // !TARGET_64BIT
+ if (op1->AsIntCon()->IconValue() == op2->AsIntCon()->IconValue())
{
return true;
}
@@ -2719,13 +2724,8 @@ bool GenTree::Compare(GenTree* op1, GenTree* op2, bool swapOK)
}
#if 0
- // TODO-CQ: Enable this in the future
- case GT_CNS_LNG:
- if (op1->AsLngCon()->gtLconVal == op2->AsLngCon()->gtLconVal)
- return true;
- break;
-
case GT_CNS_DBL:
+ // TODO-CQ: Enable this in the future
if (op1->AsDblCon()->DconValue() == op2->AsDblCon()->DconValue())
return true;
break;
@@ -3231,10 +3231,7 @@ unsigned Compiler::gtHashValue(GenTree* tree)
break;
case GT_CNS_INT:
- add = tree->AsIntCon()->gtIconVal;
- break;
- case GT_CNS_LNG:
- bits = (UINT64)tree->AsLngCon()->gtLconVal;
+ bits = (UINT64)tree->AsIntCon()->IntegralValue();
#ifdef HOST_64BIT
add = bits;
#else // 32-bit host
@@ -3745,7 +3742,7 @@ bool GenTreeOp::IsValidLongMul()
}
if (!(op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) &&
- !(op2->IsIntegralConst() && FitsIn(op2->AsIntConCommon()->IntegralValue())))
+ !(op2->IsIntegralConst() && FitsIn(op2->AsIntCon()->IntegralValue())))
{
return false;
}
@@ -3776,7 +3773,7 @@ bool GenTreeOp::IsValidLongMul()
return IsUnsigned() ? static_cast(UINT64_MAX) : INT32_MIN;
}
- return op->AsIntConCommon()->IntegralValue();
+ return op->AsIntCon()->IntegralValue();
};
int64_t maxOp1 = getMaxValue(op1);
@@ -3789,8 +3786,8 @@ bool GenTreeOp::IsValidLongMul()
}
// Both operands must extend the same way.
- bool op1ZeroExtends = op1->IsUnsigned();
- bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
+ bool op1ZeroExtends = op1->IsUnsigned();
+ bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntCon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
if ((op1ZeroExtends != op2ZeroExtends) && !op2AnyExtensionIsSuitable)
{
@@ -3833,12 +3830,12 @@ void GenTreeOp::DebugCheckLongMul()
// op2 has to be CAST(long <- int) or a suitably small constant.
assert((op2->OperIs(GT_CAST) && genActualTypeIsInt(op2->AsCast()->CastOp())) ||
- (op2->IsIntegralConst() && FitsIn(op2->AsIntConCommon()->IntegralValue())));
+ (op2->IsIntegralConst() && FitsIn(op2->AsIntCon()->IntegralValue())));
assert(!op2->gtOverflowEx());
// Both operands must extend the same way.
- bool op1ZeroExtends = op1->IsUnsigned();
- bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntConCommon()->IntegralValue() >= 0;
+ bool op1ZeroExtends = op1->IsUnsigned();
+ bool op2ZeroExtends = op2->OperIs(GT_CAST) ? op2->IsUnsigned() : op2->AsIntCon()->IntegralValue() >= 0;
bool op2AnyExtensionIsSuitable = op2->IsIntegralConst() && op2ZeroExtends;
assert((op1ZeroExtends == op2ZeroExtends) || op2AnyExtensionIsSuitable);
@@ -4971,77 +4968,77 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
costEx = 2;
goto COMMON_CNS;
- case GT_CNS_LNG:
+ case GT_CNS_INT:
{
- GenTreeIntConCommon* con = tree->AsIntConCommon();
-
- INT64 lngVal = con->LngValue();
- INT32 loVal = (INT32)(lngVal & 0xffffffff);
- INT32 hiVal = (INT32)(lngVal >> 32);
+ GenTreeIntCon* con = tree->AsIntCon();
- if (lngVal == 0)
- {
- costSz = 1;
- costEx = 1;
- }
- else
+ if (con->TypeIs(TYP_LONG))
{
- // Minimum of one instruction to setup hiVal,
- // and one instruction to setup loVal
- costSz = 4 + 4;
- costEx = 1 + 1;
+ INT64 lngVal = con->LngValue();
+ INT32 loVal = (INT32)(lngVal & 0xffffffff);
+ INT32 hiVal = (INT32)(lngVal >> 32);
- if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)hiVal) &&
- !codeGen->validImmForInstr(INS_mvn, (target_ssize_t)hiVal))
+ if (lngVal == 0)
{
- // Needs extra instruction: movw/movt
- costSz += 4;
- costEx += 1;
+ costSz = 1;
+ costEx = 1;
}
-
- if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)loVal) &&
- !codeGen->validImmForInstr(INS_mvn, (target_ssize_t)loVal))
+ else
{
- // Needs extra instruction: movw/movt
- costSz += 4;
- costEx += 1;
- }
- }
- goto COMMON_CNS;
- }
+ // Minimum of one instruction to setup hiVal,
+ // and one instruction to setup loVal
+ costSz = 4 + 4;
+ costEx = 1 + 1;
- case GT_CNS_INT:
- {
- // If the constant is a handle then it will need to have a relocation
- // applied to it.
- // Any constant that requires a reloc must use the movw/movt sequence
- //
- GenTreeIntConCommon* con = tree->AsIntConCommon();
- target_ssize_t conVal = (target_ssize_t)con->IconValue();
+ if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)hiVal) &&
+ !codeGen->validImmForInstr(INS_mvn, (target_ssize_t)hiVal))
+ {
+ // Needs extra instruction: movw/movt
+ costSz += 4;
+ costEx += 1;
+ }
- if (con->ImmedValNeedsReloc(this))
- {
- // Requires movw/movt
- costSz = 8;
- costEx = 2;
- }
- else if (codeGen->validImmForInstr(INS_add, conVal))
- {
- // Typically included with parent oper
- costSz = 2;
- costEx = 1;
- }
- else if (codeGen->validImmForInstr(INS_mov, conVal) || codeGen->validImmForInstr(INS_mvn, conVal))
- {
- // Uses mov or mvn
- costSz = 4;
- costEx = 1;
+ if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)loVal) &&
+ !codeGen->validImmForInstr(INS_mvn, (target_ssize_t)loVal))
+ {
+ // Needs extra instruction: movw/movt
+ costSz += 4;
+ costEx += 1;
+ }
+ }
}
else
{
- // Needs movw/movt
- costSz = 8;
- costEx = 2;
+ // If the constant is a handle then it will need to have a relocation
+ // applied to it.
+ // Any constant that requires a reloc must use the movw/movt sequence
+ //
+ target_ssize_t conVal = (target_ssize_t)con->IconValue();
+
+ if (con->ImmedValNeedsReloc(this))
+ {
+ // Requires movw/movt
+ costSz = 8;
+ costEx = 2;
+ }
+ else if (codeGen->validImmForInstr(INS_add, conVal))
+ {
+ // Typically included with parent oper
+ costSz = 2;
+ costEx = 1;
+ }
+ else if (codeGen->validImmForInstr(INS_mov, conVal) || codeGen->validImmForInstr(INS_mvn, conVal))
+ {
+ // Uses mov or mvn
+ costSz = 4;
+ costEx = 1;
+ }
+ else
+ {
+ // Needs movw/movt
+ costSz = 8;
+ costEx = 2;
+ }
}
goto COMMON_CNS;
}
@@ -5058,21 +5055,17 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
#endif
goto COMMON_CNS;
- case GT_CNS_LNG:
case GT_CNS_INT:
{
- GenTreeIntConCommon* con = tree->AsIntConCommon();
- ssize_t conVal = (oper == GT_CNS_LNG) ? (ssize_t)con->LngValue() : con->IconValue();
- bool fitsInVal = true;
+ GenTreeIntCon* con = tree->AsIntCon();
+ ssize_t conVal = (ssize_t)con->IntegralValue();
+ bool fitsInVal = true;
#ifdef TARGET_X86
- if (oper == GT_CNS_LNG)
+ if (tree->TypeIs(TYP_LONG))
{
- INT64 lngVal = con->LngValue();
-
- conVal = (ssize_t)lngVal; // truncate to 32-bits
-
- fitsInVal = ((INT64)conVal == lngVal);
+ // TODO-Bug: this code's behavior is dependent on the host's bitness.
+ fitsInVal = (conVal == con->LngValue());
}
#endif // TARGET_X86
@@ -5086,13 +5079,13 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
costSz = 4;
costEx = 1;
}
- else if (fitsInVal && GenTreeIntConCommon::FitsInI8(conVal))
+ else if (fitsInVal && FitsIn(conVal))
{
costSz = 1;
costEx = 1;
}
#ifdef TARGET_AMD64
- else if (!GenTreeIntConCommon::FitsInI32(conVal))
+ else if (!FitsIn(conVal))
{
costSz = 10;
costEx = 2;
@@ -5109,7 +5102,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
costEx = 1;
}
#ifdef TARGET_X86
- if (oper == GT_CNS_LNG)
+ if (tree->TypeIs(TYP_LONG))
{
costSz += fitsInVal ? 1 : 4;
costEx += 1;
@@ -5120,15 +5113,17 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
}
#elif defined(TARGET_ARM64)
-
case GT_CNS_STR:
- case GT_CNS_LNG:
+ costSz = 8;
+ costEx = 2;
+ goto COMMON_CNS;
+
case GT_CNS_INT:
{
- GenTreeIntConCommon* con = tree->AsIntConCommon();
- bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
- INT64 imm = con->LngValue();
- emitAttr size = EA_SIZE(emitActualTypeSize(tree));
+ GenTreeIntCon* con = tree->AsIntCon();
+ bool iconNeedsReloc = con->ImmedValNeedsReloc(this);
+ INT64 imm = con->IntegralValue();
+ emitAttr size = EA_SIZE(emitActualTypeSize(tree));
if (iconNeedsReloc)
{
@@ -5179,8 +5174,8 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
costEx = instructionCount;
costSz = 4 * instructionCount;
}
- }
goto COMMON_CNS;
+ }
#elif defined(TARGET_LOONGARCH64)
// TODO-LoongArch64-CQ: tune the costs.
@@ -5189,11 +5184,11 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
costSz = 4;
goto COMMON_CNS;
- case GT_CNS_LNG:
case GT_CNS_INT:
costEx = 1;
costSz = 4;
goto COMMON_CNS;
+
#elif defined(TARGET_RISCV64)
// TODO-RISCV64-CQ: tune the costs.
case GT_CNS_STR:
@@ -5201,27 +5196,16 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
costSz = 4;
goto COMMON_CNS;
- case GT_CNS_LNG:
case GT_CNS_INT:
costEx = 1;
costSz = 4;
goto COMMON_CNS;
#else
case GT_CNS_STR:
- case GT_CNS_LNG:
case GT_CNS_INT:
#error "Unknown TARGET"
#endif
COMMON_CNS:
- /*
- Note that some code below depends on constants always getting
- moved to be the second operand of a binary operator. This is
- easily accomplished by giving constants a level of 0, which
- we do on the next line. If you ever decide to change this, be
- aware that unless you make other arrangements for integer
- constants to be moved, stuff will break.
- */
-
level = 0;
break;
@@ -5694,7 +5678,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
if (op2->IsCnsIntOrI())
{
- size_t ival = op2->AsIntConCommon()->IconValue();
+ size_t ival = op2->AsIntCon()->IconValue();
if (ival > 0 && ival == genFindLowestBit(ival))
{
@@ -6091,7 +6075,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
if (call->gtCallType == CT_INDIRECT)
{
// pinvoke-calli cookie is a constant, or constant indirection
- assert(call->gtCallCookie == nullptr || call->gtCallCookie->gtOper == GT_CNS_INT ||
+ assert(call->gtCallCookie == nullptr || call->gtCallCookie->IsCnsIntOrI() ||
call->gtCallCookie->gtOper == GT_IND);
GenTree* indirect = call->gtCallAddr;
@@ -6340,9 +6324,9 @@ bool GenTree::OperSupportsReverseOpEvalOrder(Compiler* comp) const
unsigned GenTree::GetScaleIndexMul()
{
- if (IsCnsIntOrI() && jitIsScaleIndexMul(AsIntConCommon()->IconValue()) && AsIntConCommon()->IconValue() != 1)
+ if (IsCnsIntOrI() && jitIsScaleIndexMul(AsIntCon()->IconValue()) && AsIntCon()->IconValue() != 1)
{
- return (unsigned)AsIntConCommon()->IconValue();
+ return (unsigned)AsIntCon()->IconValue();
}
return 0;
@@ -6358,9 +6342,9 @@ unsigned GenTree::GetScaleIndexMul()
unsigned GenTree::GetScaleIndexShf()
{
- if (IsCnsIntOrI() && jitIsScaleIndexShift(AsIntConCommon()->IconValue()))
+ if (IsCnsIntOrI() && jitIsScaleIndexShift(AsIntCon()->IconValue()))
{
- return (unsigned)(1 << AsIntConCommon()->IconValue());
+ return (unsigned)(1 << AsIntCon()->IconValue());
}
return 0;
@@ -6423,7 +6407,6 @@ bool GenTree::TryGetUse(GenTree* operand, GenTree*** pUse)
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
- case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_CNS_VEC:
@@ -6823,7 +6806,7 @@ bool GenTree::OperRequiresCallFlag(Compiler* comp) const
// could mark the trees just before argument processing, but it would require a full
// tree walk of the argument tree, so we just do it when morphing, instead, even though we'll
// mark non-argument trees (that will still get converted to calls, anyway).
- return (this->TypeGet() == TYP_LONG) && (gtGetOp2()->OperGet() != GT_CNS_INT);
+ return (this->TypeGet() == TYP_LONG) && !gtGetOp2()->IsIntegralConst();
#endif // FEATURE_FIXED_OUT_ARGS && !TARGET_64BIT
default:
@@ -7341,7 +7324,7 @@ GenTree::VtablePtr GenTree::GetVtableForOper(genTreeOps oper)
assert(!IsExOp(opKind));
assert(OperIsSimple(oper) || OperIsLeaf(oper));
// Need to provide non-null operands.
- GenTreeIntCon dummyOp(TYP_INT, 0);
+ GenTreeIntCon dummyOp(TYP_INT, 0, nullptr);
GenTreeOp gt(oper, TYP_INT, &dummyOp, ((opKind & GTK_UNOP) ? nullptr : &dummyOp));
s_vtableForOp = *reinterpret_cast(>);
}
@@ -7401,7 +7384,7 @@ GenTreeQmark* Compiler::gtNewQmarkNode(var_types type, GenTree* cond, GenTreeCol
GenTreeIntCon* Compiler::gtNewIconNode(ssize_t value, var_types type)
{
assert(genActualType(type) == type);
- return new (this, GT_CNS_INT) GenTreeIntCon(type, value);
+ return new (this, GT_CNS_INT) GenTreeIntCon(type, value, nullptr);
}
GenTreeIntCon* Compiler::gtNewIconNode(unsigned fieldOffset, FieldSeq* fieldSeq)
@@ -7632,13 +7615,9 @@ GenTreeIntCon* Compiler::gtNewStringLiteralLength(GenTreeStrCon* node)
/*****************************************************************************/
-GenTree* Compiler::gtNewLconNode(__int64 value)
+GenTree* Compiler::gtNewLconNode(int64_t value)
{
-#ifdef TARGET_64BIT
- GenTree* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value);
-#else
- GenTree* node = new (this, GT_CNS_LNG) GenTreeLngCon(value);
-#endif
+ GenTree* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_LONG, value, nullptr);
return node;
}
@@ -8580,7 +8559,7 @@ void GenTreeIntCon::FixupInitBlkValue(var_types type)
unsigned size = genTypeSize(type);
if (size > 1)
{
- size_t cns = gtIconVal;
+ size_t cns = IconValue();
cns = cns & 0xFF;
cns |= cns << 8;
if (size >= 4)
@@ -8600,7 +8579,7 @@ void GenTreeIntCon::FixupInitBlkValue(var_types type)
assert(!varTypeIsGC(type) || (cns == 0));
}
- gtIconVal = cns;
+ SetIconValue(cns);
}
}
@@ -8764,7 +8743,7 @@ void GenTreeOp::CheckDivideByConstOptimized(Compiler* comp)
// Now set DONT_CSE on the GT_CNS_INT divisor, note that
// with ValueNumbering we can have a non GT_CNS_INT divisor
GenTree* divisor = gtGetOp2()->gtEffectiveVal(/*commaOnly*/ true);
- if (divisor->OperIs(GT_CNS_INT))
+ if (divisor->IsCnsIntOrI())
{
divisor->gtFlags |= GTF_DONT_CSE;
}
@@ -8798,7 +8777,7 @@ GenTree* Compiler::gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg
node->AsMultiRegOp()->gtOtherReg = REG_NEXT(argReg);
}
#else
- node = gtNewOperNode(GT_PUTARG_REG, type, arg);
+ node = gtNewOperNode(GT_PUTARG_REG, type, arg);
#endif
node->SetRegNum(argReg);
@@ -8829,7 +8808,7 @@ GenTree* Compiler::gtNewBitCastNode(var_types type, GenTree* arg)
// A BITCAST could be a MultiRegOp on arm since we could move a double register to two int registers.
node = new (this, GT_BITCAST) GenTreeMultiRegOp(GT_BITCAST, type, arg, nullptr);
#else
- node = gtNewOperNode(GT_BITCAST, type, arg);
+ node = gtNewOperNode(GT_BITCAST, type, arg);
#endif
return node;
@@ -8932,7 +8911,7 @@ GenTree* Compiler::gtClone(GenTree* tree, bool complexOK)
#if defined(LATE_DISASM)
if (tree->IsIconHandle())
{
- copy = gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
+ copy = gtNewIconHandleNode(tree->AsIntCon()->IconValue(), tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
@@ -8940,15 +8919,14 @@ GenTree* Compiler::gtClone(GenTree* tree, bool complexOK)
#endif
{
copy = new (this, GT_CNS_INT)
- GenTreeIntCon(tree->gtType, tree->AsIntCon()->gtIconVal, tree->AsIntCon()->gtFieldSeq);
+ GenTreeIntCon(tree->TypeGet(), tree->AsIntCon()->IntegralValue(), tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
+#ifdef DEBUG
+ copy->AsIntCon()->gtTargetHandle = tree->AsIntCon()->gtTargetHandle;
+#endif
}
break;
- case GT_CNS_LNG:
- copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
- break;
-
case GT_CNS_DBL:
{
copy = gtNewDconNode(tree->AsDblCon()->DconValue(), tree->TypeGet());
@@ -9118,31 +9096,25 @@ GenTree* Compiler::gtCloneExpr(
if (tree->IsIconHandle())
{
copy =
- gtNewIconHandleNode(tree->AsIntCon()->gtIconVal, tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
+ gtNewIconHandleNode(tree->AsIntCon()->IconValue(), tree->gtFlags, tree->AsIntCon()->gtFieldSeq);
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
copy->gtType = tree->gtType;
}
else
#endif
{
- copy = gtNewIconNode(tree->AsIntCon()->gtIconVal, tree->gtType);
+ copy = new (this, GT_CNS_INT)
+ GenTreeIntCon(tree->TypeGet(), tree->AsIntCon()->IntegralValue(), tree->AsIntCon()->gtFieldSeq);
+ copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
#ifdef DEBUG
copy->AsIntCon()->gtTargetHandle = tree->AsIntCon()->gtTargetHandle;
#endif
- copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
- copy->AsIntCon()->gtFieldSeq = tree->AsIntCon()->gtFieldSeq;
}
goto DONE;
- case GT_CNS_LNG:
- copy = gtNewLconNode(tree->AsLngCon()->gtLconVal);
- goto DONE;
-
case GT_CNS_DBL:
- {
copy = gtNewDconNode(tree->AsDblCon()->DconValue(), tree->TypeGet());
goto DONE;
- }
case GT_CNS_STR:
copy = gtNewSconNode(tree->AsStrCon()->gtSconCPX, tree->AsStrCon()->gtScpHnd);
@@ -10075,7 +10047,6 @@ GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node)
case GT_FTN_ADDR:
case GT_RET_EXPR:
case GT_CNS_INT:
- case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_CNS_VEC:
@@ -11833,20 +11804,26 @@ void Compiler::gtDispConst(GenTree* tree)
case GT_CNS_INT:
if (tree->IsIconHandle(GTF_ICON_STR_HDL))
{
- printf(" 0x%X [ICON_STR_HDL]", dspPtr(tree->AsIntCon()->gtIconVal));
+ printf(" 0x%X [ICON_STR_HDL]", dspPtr(tree->AsIntCon()->IconValue()));
}
else if (tree->IsIconHandle(GTF_ICON_OBJ_HDL))
{
- eePrintObjectDescription(" ", (CORINFO_OBJECT_HANDLE)tree->AsIntCon()->gtIconVal);
+ eePrintObjectDescription(" ", (CORINFO_OBJECT_HANDLE)tree->AsIntCon()->IconValue());
}
+#ifndef TARGET_64BIT
+ else if (tree->TypeIs(TYP_LONG))
+ {
+ printf(" 0x%016I64x", tree->AsIntCon()->IntegralValue());
+ }
+#endif // !TARGET_64BIT
else
{
ssize_t dspIconVal =
- tree->IsIconHandle() ? dspPtr(tree->AsIntCon()->gtIconVal) : tree->AsIntCon()->gtIconVal;
+ tree->IsIconHandle() ? dspPtr(tree->AsIntCon()->IconValue()) : tree->AsIntCon()->IconValue();
if (tree->TypeGet() == TYP_REF)
{
- if (tree->AsIntCon()->gtIconVal == 0)
+ if (tree->AsIntCon()->IconValue() == 0)
{
printf(" null");
}
@@ -11856,12 +11833,12 @@ void Compiler::gtDispConst(GenTree* tree)
printf(" 0x%llx", dspIconVal);
}
}
- else if ((tree->AsIntCon()->gtIconVal > -1000) && (tree->AsIntCon()->gtIconVal < 1000))
+ else if ((tree->AsIntCon()->IconValue() > -1000) && (tree->AsIntCon()->IconValue() < 1000))
{
printf(" %ld", dspIconVal);
}
#ifdef TARGET_64BIT
- else if ((tree->AsIntCon()->gtIconVal & 0xFFFFFFFF00000000LL) != 0)
+ else if ((tree->AsIntCon()->IconValue() & 0xFFFFFFFF00000000LL) != 0)
{
if (dspIconVal >= 0)
{
@@ -11967,10 +11944,6 @@ void Compiler::gtDispConst(GenTree* tree)
}
break;
- case GT_CNS_LNG:
- printf(" 0x%016I64x", tree->AsLngCon()->gtLconVal);
- break;
-
case GT_CNS_DBL:
{
double dcon = tree->AsDblCon()->DconValue();
@@ -13635,7 +13608,7 @@ GenTree* Compiler::gtFoldExprCompare(GenTree* tree)
cons->gtPrev = tree->gtPrev;
}
- JITDUMP("Bashed to %s:\n", cons->AsIntConCommon()->IconValue() ? "true" : "false");
+ JITDUMP("Bashed to %s:\n", cons->AsIntCon()->IconValue() ? "true" : "false");
DISPTREE(cons);
return cons;
@@ -14033,7 +14006,7 @@ CORINFO_CLASS_HANDLE Compiler::gtGetHelperArgClassHandle(GenTree* tree)
}
// The handle could be a literal constant
- if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() == TYP_I_IMPL))
+ if (tree->IsCnsIntOrI() && (tree->TypeGet() == TYP_I_IMPL))
{
assert(tree->IsIconHandle(GTF_ICON_CLASS_HDL));
result = (CORINFO_CLASS_HANDLE)tree->AsIntCon()->gtCompileTimeHandle;
@@ -14052,7 +14025,7 @@ CORINFO_CLASS_HANDLE Compiler::gtGetHelperArgClassHandle(GenTree* tree)
{
GenTree* handleTreeInternal = tree->AsOp()->gtOp1;
- if ((handleTreeInternal->OperGet() == GT_CNS_INT) && (handleTreeInternal->TypeGet() == TYP_I_IMPL))
+ if (handleTreeInternal->IsCnsIntOrI() && (handleTreeInternal->TypeGet() == TYP_I_IMPL))
{
// These handle constants should be class handles.
assert(handleTreeInternal->IsIconHandle(GTF_ICON_CLASS_HDL));
@@ -14119,7 +14092,7 @@ GenTree* Compiler::gtFoldExprSpecial(GenTree* tree)
/* Get the constant value */
- val = cons->AsIntConCommon()->IconValue();
+ val = cons->AsIntCon()->IconValue();
// Transforms that would drop op cannot be performed if op has side effects
bool opHasSideEffects = (op->gtFlags & GTF_SIDE_EFFECT) != 0;
@@ -14456,12 +14429,12 @@ GenTree* Compiler::gtFoldBoxNullable(GenTree* tree)
GenTree* op;
GenTree* cons;
- if (op1->IsCnsIntOrI())
+ if (op1->IsIntegralConst())
{
op = op2;
cons = op1;
}
- else if (op2->IsCnsIntOrI())
+ else if (op2->IsIntegralConst())
{
op = op1;
cons = op2;
@@ -14471,9 +14444,7 @@ GenTree* Compiler::gtFoldBoxNullable(GenTree* tree)
return tree;
}
- ssize_t const val = cons->AsIntConCommon()->IconValue();
-
- if (val != 0)
+ if (cons->AsIntCon()->IntegralValue() != 0)
{
return tree;
}
@@ -15158,12 +15129,12 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree)
// Fold constant LONG unary operator.
- if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
+ if (!op1->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
- lval1 = op1->AsIntConCommon()->LngValue();
+ lval1 = op1->AsIntCon()->LngValue();
switch (tree->OperGet())
{
@@ -15445,8 +15416,8 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree)
case TYP_BYREF:
- i1 = op1->AsIntConCommon()->IconValue();
- i2 = op2->AsIntConCommon()->IconValue();
+ i1 = op1->AsIntCon()->IconValue();
+ i2 = op2->AsIntCon()->IconValue();
switch (tree->OperGet())
{
@@ -15494,18 +15465,18 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree)
// No GC pointer types should be folded here...
assert(!varTypeIsGC(op1->TypeGet()) && !varTypeIsGC(op2->TypeGet()));
- if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
+ if (!op1->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
- if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
+ if (!op2->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
- i1 = op1->AsIntConCommon()->IconValue();
- i2 = op2->AsIntConCommon()->IconValue();
+ i1 = op1->AsIntCon()->IconValue();
+ i2 = op2->AsIntCon()->IconValue();
switch (tree->OperGet())
{
@@ -15694,21 +15665,18 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree)
// it is a TYP_INT.
assert(op2->TypeIs(TYP_LONG, TYP_INT));
- if (!op1->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
+ if (!op1->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
- if (!op2->AsIntConCommon()->ImmedValCanBeFolded(this, tree->OperGet()))
+ if (!op2->AsIntCon()->ImmedValCanBeFolded(this, tree->OperGet()))
{
return tree;
}
- lval1 = op1->AsIntConCommon()->LngValue();
-
- // For the shift operators we can have a op2 that is a TYP_INT.
- // Thus we cannot just use LngValue(), as it will assert on 32 bit if op2 is not GT_CNS_LNG.
- lval2 = op2->AsIntConCommon()->IntegralValue();
+ lval1 = op1->AsIntCon()->LngValue();
+ lval2 = op2->AsIntCon()->IntegralValue(); // For the shift operators we can have a op2 that is a TYP_INT.
switch (tree->OperGet())
{
@@ -15889,17 +15857,11 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree)
JITDUMP("\nFolding long operator with constant nodes into a constant:\n");
DISPTREE(tree);
- assert((GenTree::s_gtNodeSizes[GT_CNS_NATIVELONG] == TREE_NODE_SZ_SMALL) ||
- (tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE));
-
tree->BashToConst(lval1);
#ifdef TARGET_64BIT
tree->AsIntCon()->gtFieldSeq = fieldSeq;
#endif
- if (vnStore != nullptr)
- {
- fgValueNumberTreeConst(tree);
- }
+ fgUpdateConstTreeValueNumber(tree);
JITDUMP("Bashed to long constant:\n");
DISPTREE(tree);
@@ -16169,7 +16131,7 @@ GenTree* Compiler::gtFoldIndirConst(GenTreeIndir* indir)
GenTree* indexNode = addr->AsIndexAddr()->Index();
if (!stringNode->IsStringEmptyField() && indexNode->IsCnsIntOrI())
{
- int cnsIndex = static_cast(indexNode->AsIntConCommon()->IconValue());
+ int cnsIndex = static_cast(indexNode->AsIntCon()->IconValue());
if (cnsIndex >= 0)
{
char16_t chr;
@@ -17318,7 +17280,7 @@ Compiler::TypeProducerKind Compiler::gtGetTypeProducerKind(GenTree* tree)
{
return TPK_GetType;
}
- else if ((tree->gtOper == GT_CNS_INT) && (tree->AsIntCon()->gtIconVal == 0))
+ else if ((tree->IsCnsIntOrI()) && (tree->AsIntCon()->IconValue() == 0))
{
return TPK_Null;
}
@@ -17938,7 +17900,7 @@ ssize_t GenTreeIndir::Offset()
}
else if (Addr()->IsCnsIntOrI() && Addr()->isContained())
{
- return Addr()->AsIntConCommon()->IconValue();
+ return Addr()->AsIntCon()->IconValue();
}
else
{
@@ -17953,7 +17915,7 @@ unsigned GenTreeIndir::Size() const
}
//------------------------------------------------------------------------
-// GenTreeIntConCommon::ImmedValNeedsReloc: does this immediate value needs recording a relocation with the VM?
+// GenTreeIntCon::ImmedValNeedsReloc: does this immediate value needs recording a relocation with the VM?
//
// Arguments:
// comp - Compiler instance
@@ -17961,7 +17923,7 @@ unsigned GenTreeIndir::Size() const
// Return Value:
// True if this immediate value requires us to record a relocation for it; false otherwise.
-bool GenTreeIntConCommon::ImmedValNeedsReloc(Compiler* comp)
+bool GenTreeIntCon::ImmedValNeedsReloc(Compiler* comp)
{
return comp->opts.compReloc && IsIconHandle();
}
@@ -17976,7 +17938,7 @@ bool GenTreeIntConCommon::ImmedValNeedsReloc(Compiler* comp)
// Return Value:
// True if this immediate value can be folded for op; false otherwise.
-bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op)
+bool GenTreeIntCon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op)
{
// In general, immediate values that need relocations can't be folded.
// There are cases where we do want to allow folding of handle comparisons
@@ -17988,7 +17950,7 @@ bool GenTreeIntConCommon::ImmedValCanBeFolded(Compiler* comp, genTreeOps op)
// Returns true if this absolute address fits within the base of an addr mode.
// On Amd64 this effectively means, whether an absolute indirect address can
// be encoded as 32-bit offset relative to IP or zero.
-bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
+bool GenTreeIntCon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
@@ -18023,12 +17985,12 @@ bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
// offsets. Note that JIT will always attempt to relocate code addresses (.e.g call addr).
// After an overflow, VM will assume any relocation recorded is for a code address and will
// emit jump thunk if it cannot be encoded as pc-relative offset.
- return (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue())) || FitsInI32();
+ return (IMAGE_REL_BASED_REL32 == comp->eeGetRelocTypeHint((void*)IconValue())) || FitsIn(IconValue());
}
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
-bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
+bool GenTreeIntCon::AddrNeedsReloc(Compiler* comp)
{
if (comp->opts.compReloc)
{
@@ -18045,7 +18007,7 @@ bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
#elif defined(TARGET_X86)
// Returns true if this absolute address fits within the base of an addr mode.
// On x86 all addresses are 4-bytes and can be directly encoded in an addr mode.
-bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
+bool GenTreeIntCon::FitsInAddrBase(Compiler* comp)
{
#ifdef DEBUG
// Early out if PC-rel encoding of absolute addr is disabled.
@@ -18059,7 +18021,7 @@ bool GenTreeIntConCommon::FitsInAddrBase(Compiler* comp)
}
// Returns true if this icon value is encoded as addr needs recording a relocation with VM
-bool GenTreeIntConCommon::AddrNeedsReloc(Compiler* comp)
+bool GenTreeIntCon::AddrNeedsReloc(Compiler* comp)
{
// If generating relocatable code, icons should be reported for recording relocatons.
return comp->opts.compReloc && IsIconHandle();
@@ -18954,9 +18916,9 @@ void GenTreeArrAddr::ParseArrayAddress(Compiler* comp, GenTree** pArr, ValueNum*
{
case GT_CNS_INT:
assert(!tree->AsIntCon()->ImmedValNeedsReloc(comp));
- // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
+ // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::IconValue() had target_ssize_t
// type.
- *pOffset += (inputMul * (target_ssize_t)(tree->AsIntCon()->gtIconVal));
+ *pOffset += (inputMul * (target_ssize_t)(tree->AsIntCon()->IconValue()));
return;
case GT_ADD:
@@ -18978,30 +18940,29 @@ void GenTreeArrAddr::ParseArrayAddress(Compiler* comp, GenTree** pArr, ValueNum*
{
// If the other arg is an int constant, and is a "not-a-field", choose
// that as the multiplier, thus preserving constant index offsets...
- if (tree->AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
- tree->AsOp()->gtOp2->AsIntCon()->gtFieldSeq == nullptr)
+ if (tree->AsOp()->gtOp2->IsCnsIntOrI() && tree->AsOp()->gtOp2->AsIntCon()->gtFieldSeq == nullptr)
{
assert(!tree->AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
- // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
+ // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::IconValue() had
// target_ssize_t type.
- subMul = (target_ssize_t)tree->AsOp()->gtOp2->AsIntConCommon()->IconValue();
+ subMul = (target_ssize_t)tree->AsOp()->gtOp2->AsIntCon()->IconValue();
nonConst = tree->AsOp()->gtOp1;
}
else
{
assert(!tree->AsOp()->gtOp1->AsIntCon()->ImmedValNeedsReloc(comp));
- // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
+ // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::IconValue() had
// target_ssize_t type.
- subMul = (target_ssize_t)tree->AsOp()->gtOp1->AsIntConCommon()->IconValue();
+ subMul = (target_ssize_t)tree->AsOp()->gtOp1->AsIntCon()->IconValue();
nonConst = tree->AsOp()->gtOp2;
}
}
else if (tree->AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!tree->AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
- // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
+ // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::IconValue() had
// target_ssize_t type.
- subMul = (target_ssize_t)tree->AsOp()->gtOp2->AsIntConCommon()->IconValue();
+ subMul = (target_ssize_t)tree->AsOp()->gtOp2->AsIntCon()->IconValue();
nonConst = tree->AsOp()->gtOp1;
}
if (nonConst != nullptr)
@@ -19018,9 +18979,10 @@ void GenTreeArrAddr::ParseArrayAddress(Compiler* comp, GenTree** pArr, ValueNum*
if (tree->AsOp()->gtOp2->IsCnsIntOrI())
{
assert(!tree->AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
- // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t
+ // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::IconValue() had
+ // target_ssize_t
// type.
- target_ssize_t shiftVal = (target_ssize_t)tree->AsOp()->gtOp2->AsIntConCommon()->IconValue();
+ target_ssize_t shiftVal = (target_ssize_t)tree->AsOp()->gtOp2->AsIntCon()->IconValue();
target_ssize_t subMul = target_ssize_t{1} << shiftVal;
ParseArrayAddressWork(tree->AsOp()->gtOp1, comp, inputMul * subMul, pArr, pInxVN, pOffset);
return;
@@ -19587,7 +19549,7 @@ bool GenTree::isRMWHWIntrinsic(Compiler* comp)
return true;
}
- uint8_t control = static_cast(op4->AsIntCon()->gtIconVal);
+ uint8_t control = static_cast(op4->AsIntCon()->IconValue());
const TernaryLogicInfo& info = TernaryLogicInfo::lookup(control);
TernaryLogicUseFlags useFlags = info.GetAllUseFlags();
@@ -20088,7 +20050,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(
if (op2->IsCnsIntOrI())
{
- op2->AsIntCon()->gtIconVal &= shiftCountMask;
+ op2->AsIntCon()->SetIconValue(op2->AsIntCon()->IconValue() & shiftCountMask);
}
else
{
@@ -20204,7 +20166,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(
if (op2->IsCnsIntOrI())
{
- ssize_t shiftCount = op2->AsIntCon()->gtIconVal;
+ ssize_t shiftCount = op2->AsIntCon()->IconValue();
ssize_t mask = 255 >> shiftCount;
maskAmountOp = gtNewIconNode(mask, type);
@@ -20807,7 +20769,7 @@ GenTree* Compiler::gtNewSimdBinOpNode(
if (op2->IsCnsIntOrI())
{
- op2->AsIntCon()->gtIconVal &= shiftCountMask;
+ op2->AsIntCon()->SetIconValue(op2->AsIntCon()->IconValue() & shiftCountMask);
if ((simdSize == 8) && varTypeIsLong(simdBaseType))
{
@@ -22091,7 +22053,7 @@ GenTree* Compiler::gtNewSimdCreateBroadcastNode(var_types type,
case TYP_BYTE:
case TYP_UBYTE:
{
- uint8_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint8_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
for (unsigned i = 0; i < simdSize; i++)
{
@@ -22103,7 +22065,7 @@ GenTree* Compiler::gtNewSimdCreateBroadcastNode(var_types type,
case TYP_SHORT:
case TYP_USHORT:
{
- uint16_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint16_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
for (unsigned i = 0; i < (simdSize / 2); i++)
{
@@ -22115,7 +22077,7 @@ GenTree* Compiler::gtNewSimdCreateBroadcastNode(var_types type,
case TYP_INT:
case TYP_UINT:
{
- uint32_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint32_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
for (unsigned i = 0; i < (simdSize / 4); i++)
{
@@ -22127,7 +22089,7 @@ GenTree* Compiler::gtNewSimdCreateBroadcastNode(var_types type,
case TYP_LONG:
case TYP_ULONG:
{
- uint64_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint64_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
for (unsigned i = 0; i < (simdSize / 8); i++)
{
@@ -22228,7 +22190,7 @@ GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type,
case TYP_BYTE:
case TYP_UBYTE:
{
- uint8_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint8_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u8[0] = cnsVal;
break;
}
@@ -22236,7 +22198,7 @@ GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type,
case TYP_SHORT:
case TYP_USHORT:
{
- uint16_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint16_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u16[0] = cnsVal;
break;
}
@@ -22244,7 +22206,7 @@ GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type,
case TYP_INT:
case TYP_UINT:
{
- uint32_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint32_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u32[0] = cnsVal;
break;
}
@@ -22252,7 +22214,7 @@ GenTree* Compiler::gtNewSimdCreateScalarNode(var_types type,
case TYP_LONG:
case TYP_ULONG:
{
- uint64_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint64_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u64[0] = cnsVal;
break;
}
@@ -22349,7 +22311,7 @@ GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(var_types type,
case TYP_BYTE:
case TYP_UBYTE:
{
- uint8_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint8_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
for (unsigned i = 0; i < simdSize; i++)
{
@@ -22361,7 +22323,7 @@ GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(var_types type,
case TYP_SHORT:
case TYP_USHORT:
{
- uint16_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint16_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
for (unsigned i = 0; i < (simdSize / 2); i++)
{
@@ -22373,7 +22335,7 @@ GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(var_types type,
case TYP_INT:
case TYP_UINT:
{
- uint32_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint32_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
for (unsigned i = 0; i < (simdSize / 4); i++)
{
@@ -22385,7 +22347,7 @@ GenTree* Compiler::gtNewSimdCreateScalarUnsafeNode(var_types type,
case TYP_LONG:
case TYP_ULONG:
{
- uint64_t cnsVal = static_cast(op1->AsIntConCommon()->IntegralValue());
+ uint64_t cnsVal = static_cast(op1->AsIntCon()->IntegralValue());
for (unsigned i = 0; i < (simdSize / 8); i++)
{
@@ -23859,8 +23821,8 @@ GenTree* Compiler::gtNewSimdShuffleNode(
// this down to basically a broadcast equivalent.
}
- GenTree* retNode = nullptr;
- GenTreeIntConCommon* cnsNode = nullptr;
+ GenTree* retNode = nullptr;
+ GenTreeIntCon* cnsNode = nullptr;
size_t elementSize = genTypeSize(simdBaseType);
size_t elementCount = simdSize / elementSize;
@@ -26945,7 +26907,7 @@ bool GenTree::IsNeverNegative(Compiler* comp) const
if (IsIntegralConst())
{
- return AsIntConCommon()->IntegralValue() >= 0;
+ return AsIntCon()->IntegralValue() >= 0;
}
if (OperIs(GT_LCL_VAR))
@@ -27040,7 +27002,7 @@ bool GenTree::CanDivOrModPossiblyOverflow(Compiler* comp) const
{
return true;
}
- else if (this->TypeIs(TYP_LONG) && (op1->AsIntConCommon()->IntegralValue() == INT64_MIN))
+ else if (this->TypeIs(TYP_LONG) && (op1->AsIntCon()->IntegralValue() == INT64_MIN))
{
return true;
}
diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h
index 914e469f61b2a..7275ba94272cc 100644
--- a/src/coreclr/jit/gentree.h
+++ b/src/coreclr/jit/gentree.h
@@ -1113,7 +1113,7 @@ struct GenTree
static bool OperIsConst(genTreeOps gtOper)
{
- static_assert_no_msg(AreContiguous(GT_CNS_INT, GT_CNS_LNG, GT_CNS_DBL, GT_CNS_STR, GT_CNS_VEC));
+ static_assert_no_msg(AreContiguous(GT_CNS_INT, GT_CNS_DBL, GT_CNS_STR, GT_CNS_VEC));
return (GT_CNS_INT <= gtOper) && (gtOper <= GT_CNS_VEC);
}
@@ -1197,7 +1197,7 @@ struct GenTree
bool IsConstInitVal() const
{
- return (gtOper == GT_CNS_INT) || (OperIsInitVal() && (gtGetOp1()->gtOper == GT_CNS_INT));
+ return IsIntegralConst() || (OperIsInitVal() && gtGetOp1()->IsIntegralConst());
}
bool OperIsBlkOp();
@@ -2208,7 +2208,9 @@ struct GenTree
bool IsIconHandle() const
{
- return (gtOper == GT_CNS_INT) && ((gtFlags & GTF_ICON_HDL_MASK) != 0);
+ bool isIconHandle = IsIntegralConst() && ((gtFlags & GTF_ICON_HDL_MASK) != 0);
+ assert(!isIconHandle || varTypeIsI(this));
+ return isIconHandle;
}
bool IsIconHandle(GenTreeFlags handleType) const
@@ -2216,7 +2218,7 @@ struct GenTree
// check that handleType is one of the valid GTF_ICON_* values
assert((handleType & GTF_ICON_HDL_MASK) != 0);
assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
- return (gtOper == GT_CNS_INT) && ((gtFlags & GTF_ICON_HDL_MASK) == handleType);
+ return IsIntegralConst() && ((gtFlags & GTF_ICON_HDL_MASK) == handleType);
}
template
@@ -2229,13 +2231,13 @@ struct GenTree
// For non-icon handle trees, returns GTF_EMPTY.
GenTreeFlags GetIconHandleFlag() const
{
- return (gtOper == GT_CNS_INT) ? (gtFlags & GTF_ICON_HDL_MASK) : GTF_EMPTY;
+ return IsIntegralConst() ? (gtFlags & GTF_ICON_HDL_MASK) : GTF_EMPTY;
}
// Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
void ClearIconHandleMask()
{
- assert(gtOper == GT_CNS_INT);
+ assert(IsCnsIntOrI());
gtFlags &= ~GTF_ICON_HDL_MASK;
}
@@ -3048,263 +3050,176 @@ struct GenTreeVal : public GenTree
#endif
};
-struct GenTreeIntConCommon : public GenTree
+//
+// This is the GT_CNS_INT struct definition.
+// It's used to hold for both integral constants and pointer handle constants.
+//
+struct GenTreeIntCon : public GenTree
{
- inline INT64 LngValue() const;
- inline void SetLngValue(INT64 val);
- inline ssize_t IconValue() const;
- inline void SetIconValue(ssize_t val);
- inline INT64 IntegralValue() const;
- inline void SetIntegralValue(int64_t value);
+private:
+ int64_t m_value;
- template
- inline void SetValueTruncating(T value);
+public:
+ // In case the Jit is prejitting, handles representing various entities at compile time will
+ // not be the same as those representing the same entities at runtime. Since the compiler is
+ // often interested in the compile-time handle, we store it in this field.
+ ssize_t gtCompileTimeHandle = 0;
- GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
- : GenTree(oper, type DEBUGARG(largeNode))
+ // If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
+ // sequence of fields.
+ FieldSeq* gtFieldSeq = nullptr;
+
+#ifdef DEBUG
+ // If the value represents target address (for a field or call), holds the handle of the field (or call).
+ size_t gtTargetHandle = 0;
+#endif
+
+ GenTreeIntCon(var_types type, int64_t value, FieldSeq* fieldSeq DEBUGARG(bool largeNode = false))
+ : GenTree(GT_CNS_INT, type DEBUGARG(largeNode)), m_value(value), gtFieldSeq(fieldSeq)
{
}
- bool FitsInI8() // IconValue() fits into 8-bit signed storage
+#if DEBUGGABLE_GENTREE
+ GenTreeIntCon() : GenTree()
{
- return FitsInI8(IconValue());
}
+#endif
- static bool FitsInI8(ssize_t val) // Constant fits into 8-bit signed storage
+ int64_t LngValue() const
{
- return (int8_t)val == val;
+ assert(TypeIs(TYP_LONG));
+ return m_value;
}
- bool FitsInI32() // IconValue() fits into 32-bit signed storage
+ void SetLngValue(INT64 value)
{
- return FitsInI32(IconValue());
+ assert(TypeIs(TYP_LONG));
+ m_value = value;
}
- static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage
+ ssize_t IconValue() const
{
-#ifdef TARGET_64BIT
- return (int32_t)val == val;
-#else
- return true;
-#endif
+ assert(IsCnsIntOrI());
+ return static_cast(m_value);
}
- bool ImmedValNeedsReloc(Compiler* comp);
- bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
-
-#ifdef TARGET_XARCH
- bool FitsInAddrBase(Compiler* comp);
- bool AddrNeedsReloc(Compiler* comp);
-#endif
-
-#if DEBUGGABLE_GENTREE
- GenTreeIntConCommon() : GenTree()
+ void SetIconValue(ssize_t val)
{
+ assert(IsCnsIntOrI());
+ m_value = val;
}
-#endif
-};
-// node representing a read from a physical register
-struct GenTreePhysReg : public GenTree
-{
- // physregs need a field beyond GetRegNum() because
- // GetRegNum() indicates the destination (and can be changed)
- // whereas reg indicates the source
- regNumber gtSrcReg;
- GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
+ int64_t IntegralValue() const
{
+ return m_value;
}
-#if DEBUGGABLE_GENTREE
- GenTreePhysReg() : GenTree()
+
+ void SetIntegralValue(int64_t value)
{
+ assert(FitsIn(TypeGet(), value));
+ m_value = value;
}
-#endif
-};
-
-/* gtIntCon -- integer constant (GT_CNS_INT) */
-struct GenTreeIntCon : public GenTreeIntConCommon
-{
- /*
- * This is the GT_CNS_INT struct definition.
- * It's used to hold for both int constants and pointer handle constants.
- * For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
- * For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
- * In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
- */
- ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
-
- /* The InitializeArray intrinsic needs to go back to the newarray statement
- to find the class handle of the array so that we can get its size. However,
- in ngen mode, the handle in that statement does not correspond to the compile
- time handle (rather it lets you get a handle at run-time). In that case, we also
- need to store a compile time handle, which goes in this gtCompileTimeHandle field.
- */
- ssize_t gtCompileTimeHandle;
-
- // TODO-Cleanup: It's not clear what characterizes the cases where the field
- // above is used. It may be that its uses and those of the "gtFieldSeq" field below
- // are mutually exclusive, and they could be put in a union. Or else we should separate
- // this type into three subtypes.
-
- // If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
- // sequence of fields.
- FieldSeq* gtFieldSeq;
-#ifdef DEBUG
- // If the value represents target address (for a field or call), holds the handle of the field (or call).
- size_t gtTargetHandle = 0;
-#endif
-
- GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
- : GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
- , gtIconVal(value)
- , gtCompileTimeHandle(0)
- , gtFieldSeq(nullptr)
+ uint64_t IntegralValueUnsigned() const
{
+ return Is32BitConst() ? static_cast(m_value) : m_value;
}
- GenTreeIntCon(var_types type, ssize_t value, FieldSeq* fields DEBUGARG(bool largeNode = false))
- : GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
- , gtIconVal(value)
- , gtCompileTimeHandle(0)
- , gtFieldSeq(fields)
+ void SetIntegralValueUnsigned(uint64_t value)
{
- }
+ if (Is32BitConst())
+ {
+ assert(FitsIn(value));
+ value = static_cast(value);
+ }
- void FixupInitBlkValue(var_types type);
+ m_value = value;
+ }
-#if DEBUGGABLE_GENTREE
- GenTreeIntCon() : GenTreeIntConCommon()
+ //------------------------------------------------------------------------
+ // SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
+ //
+ // The function will truncate the supplied value to a 32 bit signed
+ // integer if the node's type is not TYP_LONG, otherwise setting it
+ // as-is. Note that this function intentionally does not check for
+ // small types (such nodes are created in lowering) for TP reasons.
+ //
+ // This function is intended to be used where its truncating behavior is
+ // desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
+ // wider integers, which is typical when compiling on 64 bit hosts, as
+ // most arithmetic is done in ssize_t's aka int64_t's in that case, while
+ // the node itself can be of a narrower type.
+ //
+ // Arguments:
+ // value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
+ //
+ // Notes:
+ // This function is templated so that it works well with compiler warnings of
+ // the form "Operation may overflow before being assigned to a wider type", in
+ // case "value" is of type ssize_t, which is common.
+ //
+ template
+ void SetValueTruncating(T value)
{
- }
-#endif
-};
+ static_assert_no_msg(
+ (std::is_same::value || std::is_same::value || std::is_same::value));
-/* gtLngCon -- long constant (GT_CNS_LNG) */
+ if (Is32BitConst())
+ {
+ value = static_cast(value);
+ }
-struct GenTreeLngCon : public GenTreeIntConCommon
-{
- INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
- INT32 LoVal()
- {
- return (INT32)(gtLconVal & 0xffffffff);
+ SetIntegralValue(value);
}
- INT32 HiVal()
+ int LoVal() const
{
- return (INT32)(gtLconVal >> 32);
+ assert(TypeIs(TYP_LONG));
+ return (int)(m_value & 0xffffffff);
}
- GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
- {
- SetLngValue(val);
- }
-#if DEBUGGABLE_GENTREE
- GenTreeLngCon() : GenTreeIntConCommon()
+ int HiVal() const
{
+ assert(TypeIs(TYP_LONG));
+ return (int)(m_value >> 32);
}
-#endif
-};
-
-inline INT64 GenTreeIntConCommon::LngValue() const
-{
-#ifndef TARGET_64BIT
- assert(gtOper == GT_CNS_LNG);
- return AsLngCon()->gtLconVal;
-#else
- return IconValue();
-#endif
-}
-inline void GenTreeIntConCommon::SetLngValue(INT64 val)
-{
-#ifndef TARGET_64BIT
- assert(gtOper == GT_CNS_LNG);
- AsLngCon()->gtLconVal = val;
-#else
- // Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
- C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
- C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
+ void FixupInitBlkValue(var_types asgType);
+ bool ImmedValNeedsReloc(Compiler* comp);
+ bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
- SetIconValue(ssize_t(val));
+#ifdef TARGET_XARCH
+ bool FitsInAddrBase(Compiler* comp);
+ bool AddrNeedsReloc(Compiler* comp);
#endif
-}
-
-inline ssize_t GenTreeIntConCommon::IconValue() const
-{
- assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
- return AsIntCon()->gtIconVal;
-}
-
-inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
-{
- assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
- AsIntCon()->gtIconVal = val;
-}
-
-inline INT64 GenTreeIntConCommon::IntegralValue() const
-{
-#ifdef TARGET_64BIT
- return LngValue();
-#else
- return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue();
-#endif // TARGET_64BIT
-}
-inline void GenTreeIntConCommon::SetIntegralValue(int64_t value)
-{
+private:
+ bool Is32BitConst() const
+ {
#ifdef TARGET_64BIT
- SetIconValue(value);
+ return TypeIs(TYP_INT);
#else
- if (OperIs(GT_CNS_LNG))
- {
- SetLngValue(value);
- }
- else
- {
- assert(FitsIn(value));
- SetIconValue(static_cast(value));
+ return !TypeIs(TYP_LONG);
+#endif
}
-#endif // TARGET_64BIT
-}
+};
-//------------------------------------------------------------------------
-// SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
-//
-// The function will truncate the supplied value to a 32 bit signed
-// integer if the node's type is not TYP_LONG, otherwise setting it
-// as-is. Note that this function intentionally does not check for
-// small types (such nodes are created in lowering) for TP reasons.
-//
-// This function is intended to be used where its truncating behavior is
-// desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
-// wider integers, which is typical when compiling on 64 bit hosts, as
-// most arithmetic is done in ssize_t's aka int64_t's in that case, while
-// the node itself can be of a narrower type.
-//
-// Arguments:
-// value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
-//
-// Notes:
-// This function is templated so that it works well with compiler warnings of
-// the form "Operation may overflow before being assigned to a wider type", in
-// case "value" is of type ssize_t, which is common.
-//
-template
-inline void GenTreeIntConCommon::SetValueTruncating(T value)
+// node representing a read from a physical register
+struct GenTreePhysReg : public GenTree
{
- static_assert_no_msg(
- (std::is_same::value || std::is_same::value || std::is_same::value));
-
- if (TypeIs(TYP_LONG))
+ // physregs need a field beyond GetRegNum() because
+ // GetRegNum() indicates the destination (and can be changed)
+ // whereas reg indicates the source
+ regNumber gtSrcReg;
+ GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
{
- SetLngValue(value);
}
- else
+#if DEBUGGABLE_GENTREE
+ GenTreePhysReg() : GenTree()
{
- SetIconValue(static_cast(value));
}
-}
+#endif
+};
/* gtDblCon -- double constant (GT_CNS_DBL) */
@@ -6535,7 +6450,7 @@ struct GenTreeVecCon : public GenTree
{
if (arg->IsCnsIntOrI())
{
- simdVal.i8[argIdx] = static_cast(arg->AsIntCon()->gtIconVal);
+ simdVal.i8[argIdx] = static_cast(arg->AsIntCon()->IconValue());
return true;
}
else
@@ -6551,7 +6466,7 @@ struct GenTreeVecCon : public GenTree
{
if (arg->IsCnsIntOrI())
{
- simdVal.i16[argIdx] = static_cast(arg->AsIntCon()->gtIconVal);
+ simdVal.i16[argIdx] = static_cast(arg->AsIntCon()->IconValue());
return true;
}
else
@@ -6567,7 +6482,7 @@ struct GenTreeVecCon : public GenTree
{
if (arg->IsCnsIntOrI())
{
- simdVal.i32[argIdx] = static_cast(arg->AsIntCon()->gtIconVal);
+ simdVal.i32[argIdx] = static_cast(arg->AsIntCon()->IconValue());
return true;
}
else
@@ -6584,18 +6499,18 @@ struct GenTreeVecCon : public GenTree
#if defined(TARGET_64BIT)
if (arg->IsCnsIntOrI())
{
- simdVal.i64[argIdx] = static_cast(arg->AsIntCon()->gtIconVal);
+ simdVal.i64[argIdx] = static_cast(arg->AsIntCon()->IconValue());
return true;
}
#else
if (arg->OperIsLong() && arg->AsOp()->gtOp1->IsCnsIntOrI() && arg->AsOp()->gtOp2->IsCnsIntOrI())
{
- // 32-bit targets will decompose GT_CNS_LNG into two GT_CNS_INT
+ // 32-bit targets will decompose long constants into two GT_CNS_INT
// We need to reconstruct the 64-bit value in order to handle this
- INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->gtIconVal;
+ INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->IconValue();
gtLconVal <<= 32;
- gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->gtIconVal;
+ gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->IconValue();
simdVal.i64[argIdx] = gtLconVal;
return true;
@@ -8877,23 +8792,12 @@ inline bool GenTree::OperIsCopyBlkOp()
// the given value.
//
// Notes:
-// Like gtIconVal, the argument is of ssize_t, so cannot check for
+// Like IconValue(), the argument is of ssize_t, so cannot check for
// long constants in a target-independent way.
-
+//
inline bool GenTree::IsIntegralConst(ssize_t constVal) const
-
{
- if ((gtOper == GT_CNS_INT) && (AsIntConCommon()->IconValue() == constVal))
- {
- return true;
- }
-
- if ((gtOper == GT_CNS_LNG) && (AsIntConCommon()->LngValue() == constVal))
- {
- return true;
- }
-
- return false;
+ return IsIntegralConst() && (AsIntCon()->IntegralValue() == constVal);
}
//-------------------------------------------------------------------
@@ -9720,16 +9624,16 @@ inline bool GenTree::IsCopyOrReloadOfMultiRegCall() const
inline bool GenTree::IsCnsIntOrI() const
{
- return (gtOper == GT_CNS_INT);
+#ifdef TARGET_64BIT
+ return IsIntegralConst();
+#else // !TARGET_64BIT
+ return IsIntegralConst() && !TypeIs(TYP_LONG);
+#endif // !TARGET_64BIT
}
inline bool GenTree::IsIntegralConst() const
{
-#ifdef TARGET_64BIT
- return IsCnsIntOrI();
-#else // !TARGET_64BIT
- return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
-#endif // !TARGET_64BIT
+ return OperIs(GT_CNS_INT);
}
//-------------------------------------------------------------------------
@@ -9744,7 +9648,7 @@ inline bool GenTree::IsIntegralConstPow2() const
{
if (IsIntegralConst())
{
- return isPow2(AsIntConCommon()->IntegralValue());
+ return isPow2(AsIntCon()->IntegralValue());
}
return false;
@@ -9767,7 +9671,7 @@ inline bool GenTree::IsIntegralConstUnsignedPow2() const
{
if (IsIntegralConst())
{
- return isPow2((UINT64)AsIntConCommon()->IntegralValue());
+ return isPow2((UINT64)AsIntCon()->IntegralValue());
}
return false;
@@ -9785,7 +9689,7 @@ inline bool GenTree::IsIntegralConstAbsPow2() const
{
if (IsIntegralConst())
{
- INT64 svalue = AsIntConCommon()->IntegralValue();
+ INT64 svalue = AsIntCon()->IntegralValue();
size_t value = (svalue == SSIZE_T_MIN) ? static_cast(svalue) : static_cast(abs(svalue));
return isPow2(value);
}
@@ -9797,7 +9701,7 @@ inline bool GenTree::IsIntegralConstAbsPow2() const
inline bool GenTree::IsIntCnsFitsInI32()
{
#ifdef TARGET_64BIT
- return IsCnsIntOrI() && AsIntCon()->FitsInI32();
+ return IsCnsIntOrI() && FitsIn(AsIntCon()->IconValue());
#else // !TARGET_64BIT
return IsCnsIntOrI();
#endif // !TARGET_64BIT
diff --git a/src/coreclr/jit/gentreeopsdef.h b/src/coreclr/jit/gentreeopsdef.h
index acffd4a12045d..abb213680d7dc 100644
--- a/src/coreclr/jit/gentreeopsdef.h
+++ b/src/coreclr/jit/gentreeopsdef.h
@@ -12,16 +12,6 @@ enum genTreeOps : BYTE
#include "gtlist.h"
GT_COUNT,
-
-#ifdef TARGET_64BIT
- // GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
- // For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
- GT_CNS_NATIVELONG = GT_CNS_INT,
-#else
- // For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
- // In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
- GT_CNS_NATIVELONG = GT_CNS_LNG,
-#endif
};
/*****************************************************************************/
diff --git a/src/coreclr/jit/gtlist.h b/src/coreclr/jit/gtlist.h
index e88c447d3774d..5200a54365832 100644
--- a/src/coreclr/jit/gtlist.h
+++ b/src/coreclr/jit/gtlist.h
@@ -43,7 +43,6 @@ GTNODE(RET_EXPR , GenTreeRetExpr ,0,0,GTK_LEAF|DBK_NOTLIR) // Place
//-----------------------------------------------------------------------------
GTNODE(CNS_INT , GenTreeIntCon ,0,0,GTK_LEAF)
-GTNODE(CNS_LNG , GenTreeLngCon ,0,0,GTK_LEAF)
GTNODE(CNS_DBL , GenTreeDblCon ,0,0,GTK_LEAF)
GTNODE(CNS_STR , GenTreeStrCon ,0,0,GTK_LEAF)
GTNODE(CNS_VEC , GenTreeVecCon ,0,0,GTK_LEAF)
diff --git a/src/coreclr/jit/gtstructs.h b/src/coreclr/jit/gtstructs.h
index c53eff1bbf36b..415d87cdedc89 100644
--- a/src/coreclr/jit/gtstructs.h
+++ b/src/coreclr/jit/gtstructs.h
@@ -44,7 +44,7 @@
//
// The "SPECIAL" variants indicate that some or all of the allowed opers exist elsewhere. This is
// used in the DEBUGGABLE_GENTREE implementation when determining which vtable pointer to use for
-// a given oper. For example, IntConCommon (for the GenTreeIntConCommon type) allows opers
+// a given oper. For example, IntConCommon (for the GenTreeIntCon type) allows opers
// for all its subtypes. The "SPECIAL" version is attached to the supertypes. "N" is always
// considered "special".
@@ -55,9 +55,7 @@ GTSTRUCT_2(Val , GT_END_LFIN, GT_JMP)
#else
GTSTRUCT_1(Val , GT_JMP)
#endif
-GTSTRUCT_2_SPECIAL(IntConCommon, GT_CNS_INT, GT_CNS_LNG)
GTSTRUCT_1(IntCon , GT_CNS_INT)
-GTSTRUCT_1(LngCon , GT_CNS_LNG)
GTSTRUCT_1(DblCon , GT_CNS_DBL)
GTSTRUCT_1(StrCon , GT_CNS_STR)
GTSTRUCT_1(VecCon , GT_CNS_VEC)
diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp
index 01f64f47ce3cc..321f91950361b 100644
--- a/src/coreclr/jit/hwintrinsicarm64.cpp
+++ b/src/coreclr/jit/hwintrinsicarm64.cpp
@@ -714,7 +714,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
for (uint32_t index = 0; index < sig->numArgs; index++)
{
- cnsVal = static_cast(impPopStack().val->AsIntConCommon()->IntegralValue());
+ cnsVal = static_cast(impPopStack().val->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u8[simdLength - 1 - index] = cnsVal;
}
break;
@@ -727,7 +727,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
for (uint32_t index = 0; index < sig->numArgs; index++)
{
- cnsVal = static_cast(impPopStack().val->AsIntConCommon()->IntegralValue());
+ cnsVal = static_cast(impPopStack().val->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u16[simdLength - 1 - index] = cnsVal;
}
break;
@@ -740,7 +740,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
for (uint32_t index = 0; index < sig->numArgs; index++)
{
- cnsVal = static_cast(impPopStack().val->AsIntConCommon()->IntegralValue());
+ cnsVal = static_cast(impPopStack().val->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u32[simdLength - 1 - index] = cnsVal;
}
break;
@@ -753,7 +753,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
for (uint32_t index = 0; index < sig->numArgs; index++)
{
- cnsVal = static_cast(impPopStack().val->AsIntConCommon()->IntegralValue());
+ cnsVal = static_cast(impPopStack().val->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u64[simdLength - 1 - index] = cnsVal;
}
break;
diff --git a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
index bcdf6ba8bb1b9..c708bb284f446 100644
--- a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
+++ b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
@@ -638,7 +638,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
if (intrin.op3->isContainedFltOrDblImmed())
{
assert(intrin.op2->isContainedIntOrIImmed());
- assert(intrin.op2->AsIntCon()->gtIconVal == 0);
+ assert(intrin.op2->AsIntCon()->IconValue() == 0);
const double dataValue = intrin.op3->AsDblCon()->DconValue();
GetEmitter()->emitIns_R_F(INS_fmov, emitSize, targetReg, dataValue, opt);
@@ -701,8 +701,8 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
GetEmitter()->emitIns_Mov(INS_mov, emitTypeSize(node), targetReg, op1Reg, /* canSkip */ true);
}
- const int resultIndex = (int)intrin.op2->AsIntCon()->gtIconVal;
- const int valueIndex = (int)intrin.op4->AsIntCon()->gtIconVal;
+ const int resultIndex = (int)intrin.op2->AsIntCon()->IconValue();
+ const int valueIndex = (int)intrin.op4->AsIntCon()->IconValue();
GetEmitter()->emitIns_R_R_I_I(ins, emitSize, targetReg, op3Reg, resultIndex, valueIndex, opt);
}
break;
@@ -859,7 +859,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
if (intrin.op1->isContainedIntOrIImmed())
{
// movi/movni reg, #imm8
- const ssize_t dataValue = intrin.op1->AsIntCon()->gtIconVal;
+ const ssize_t dataValue = intrin.op1->AsIntCon()->IconValue();
GetEmitter()->emitIns_R_I(INS_movi, emitSize, targetReg, dataValue, opt);
}
else
@@ -924,7 +924,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
}
else if (intrin.op1->isContainedIntOrIImmed())
{
- const ssize_t dataValue = intrin.op1->AsIntCon()->gtIconVal;
+ const ssize_t dataValue = intrin.op1->AsIntCon()->IconValue();
GetEmitter()->emitIns_R_I(INS_movi, emitSize, targetReg, dataValue, opt);
}
else if (GetEmitter()->IsMovInstruction(ins))
diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp
index e2cb1ceaead47..27833bfc0f5b4 100644
--- a/src/coreclr/jit/hwintrinsicxarch.cpp
+++ b/src/coreclr/jit/hwintrinsicxarch.cpp
@@ -1549,7 +1549,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
for (uint32_t index = 0; index < sig->numArgs; index++)
{
- cnsVal = static_cast(impPopStack().val->AsIntConCommon()->IntegralValue());
+ cnsVal = static_cast(impPopStack().val->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u8[simdLength - 1 - index] = cnsVal;
}
break;
@@ -1562,7 +1562,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
for (uint32_t index = 0; index < sig->numArgs; index++)
{
- cnsVal = static_cast(impPopStack().val->AsIntConCommon()->IntegralValue());
+ cnsVal = static_cast(impPopStack().val->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u16[simdLength - 1 - index] = cnsVal;
}
break;
@@ -1575,7 +1575,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
for (uint32_t index = 0; index < sig->numArgs; index++)
{
- cnsVal = static_cast(impPopStack().val->AsIntConCommon()->IntegralValue());
+ cnsVal = static_cast(impPopStack().val->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u32[simdLength - 1 - index] = cnsVal;
}
break;
@@ -1588,7 +1588,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
for (uint32_t index = 0; index < sig->numArgs; index++)
{
- cnsVal = static_cast(impPopStack().val->AsIntConCommon()->IntegralValue());
+ cnsVal = static_cast(impPopStack().val->AsIntCon()->IntegralValue());
vecCon->gtSimdVal.u64[simdLength - 1 - index] = cnsVal;
}
break;
@@ -3393,7 +3393,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
if (op4->IsIntegralConst())
{
- uint8_t control = static_cast(op4->AsIntCon()->gtIconVal);
+ uint8_t control = static_cast(op4->AsIntCon()->IconValue());
const TernaryLogicInfo& info = TernaryLogicInfo::lookup(control);
TernaryLogicUseFlags useFlags = info.GetAllUseFlags();
@@ -3702,7 +3702,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
*val2 = gtNewZeroConNode(retType);
}
- op4->AsIntCon()->gtIconVal = static_cast(~0xAA);
+ op4->AsIntCon()->SetIconValue(static_cast(~0xAA));
break;
}
@@ -3760,7 +3760,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
*val1 = gtNewZeroConNode(retType);
}
- op4->AsIntCon()->gtIconVal = static_cast(~0xCC | 0xAA);
+ op4->AsIntCon()->SetIconValue(static_cast(~0xCC | 0xAA));
}
break;
}
@@ -3803,7 +3803,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
*val1 = gtNewZeroConNode(retType);
}
- op4->AsIntCon()->gtIconVal = static_cast(~(0xCC & 0xAA));
+ op4->AsIntCon()->SetIconValue(static_cast(~(0xCC & 0xAA)));
break;
}
@@ -3845,7 +3845,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
*val1 = gtNewZeroConNode(retType);
}
- op4->AsIntCon()->gtIconVal = static_cast(~(0xCC | 0xAA));
+ op4->AsIntCon()->SetIconValue(static_cast(~(0xCC | 0xAA)));
break;
}
@@ -3887,7 +3887,7 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic,
*val1 = gtNewZeroConNode(retType);
}
- op4->AsIntCon()->gtIconVal = static_cast(~(0xCC ^ 0xAA));
+ op4->AsIntCon()->SetIconValue(static_cast(~(0xCC ^ 0xAA)));
break;
}
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 0b24cbda69bba..e664ec46e8f5a 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -229,7 +229,6 @@ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
switch (tree->gtOper)
{
case GT_CNS_INT:
- case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_CNS_VEC:
@@ -6909,7 +6908,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
assertImp(op1->TypeIs(TYP_REF));
// Check for null pointer - in the inliner case we simply abort.
- if (compIsForInlining() && op1->IsCnsIntOrI())
+ if (compIsForInlining() && op1->IsIntegralConst())
{
compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
return;
@@ -7159,7 +7158,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
/* Special case: "int+0", "int-0", "int*1", "int/1" */
- if (op2->gtOper == GT_CNS_INT)
+ if (op2->IsCnsIntOrI())
{
if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
(op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
@@ -7376,13 +7375,13 @@ void Compiler::impImportBlockCode(BasicBlock* block)
/* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
/* Don't make any blocks unreachable in import only mode */
- if (op1->gtOper == GT_CNS_INT)
+ if (op1->IsCnsIntOrI())
{
/* gtFoldExpr() should prevent this as we don't want to make any blocks
unreachable under compDbgCode */
assert(!opts.compDbgCode);
- BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
+ BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->IconValue() ? BBJ_ALWAYS : BBJ_NONE);
// BBJ_COND: normal case
// foldedJumpKind: this can happen if we are reimporting the block for the second time
assertImp(block->KindIs(BBJ_COND, foldedJumpKind)); // normal case
@@ -7638,10 +7637,10 @@ void Compiler::impImportBlockCode(BasicBlock* block)
assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
// Fold Switch for GT_CNS_INT
- if (opts.OptimizationEnabled() && (op1->gtOper == GT_CNS_INT))
+ if (opts.OptimizationEnabled() && op1->IsCnsIntOrI())
{
// Find the jump target
- size_t switchVal = (size_t)op1->AsIntCon()->gtIconVal;
+ size_t switchVal = (size_t)op1->AsIntCon()->IconValue();
unsigned jumpCnt = block->GetJumpSwt()->bbsCount;
BasicBlock** jumpTab = block->GetJumpSwt()->bbsDstTab;
bool foundVal = false;
@@ -7880,9 +7879,9 @@ void Compiler::impImportBlockCode(BasicBlock* block)
{
op2 = op1->AsOp()->gtOp2;
- if (op2->gtOper == GT_CNS_INT)
+ if (op2->IsCnsIntOrI())
{
- ssize_t ival = op2->AsIntCon()->gtIconVal;
+ ssize_t ival = op2->AsIntCon()->IconValue();
ssize_t mask, umask;
switch (lclTyp)
@@ -10271,7 +10270,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
break;
}
- ClassLayout* layout = typGetBlkLayout(static_cast(op3->AsIntConCommon()->IconValue()));
+ ClassLayout* layout = typGetBlkLayout(static_cast(op3->AsIntCon()->IconValue()));
if (opcode == CEE_INITBLK)
{
@@ -12737,7 +12736,7 @@ void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
if (impIsInvariant(curArgVal))
{
inlCurArgInfo->argIsInvariant = true;
- if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0))
+ if (inlCurArgInfo->argIsThis && curArgVal->IsCnsIntOrI() && (curArgVal->AsIntCon()->IconValue() == 0))
{
// Abort inlining at this call site
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
@@ -13643,10 +13642,10 @@ bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array)
}
// Check for assignment of NULL.
- if (value->OperIs(GT_CNS_INT))
+ if (value->IsCnsIntOrI())
{
assert(value->gtType == TYP_REF);
- if (value->AsIntCon()->gtIconVal == 0)
+ if (value->AsIntCon()->IconValue() == 0)
{
JITDUMP("\nstelem of null: skipping covariant store check\n");
return true;
diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp
index 6006a48a86f2a..63367287ddecb 100644
--- a/src/coreclr/jit/importercalls.cpp
+++ b/src/coreclr/jit/importercalls.cpp
@@ -698,7 +698,7 @@ var_types Compiler::impImportCall(OPCODE opcode,
{
cookieConst = cookie->AsOp()->gtOp1;
}
- assert(cookieConst->gtOper == GT_CNS_INT);
+ assert(cookieConst->IsCnsIntOrI());
// Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
// we won't allow this tree to participate in any CSE logic
@@ -1474,18 +1474,18 @@ var_types Compiler::impImportJitTestLabelMark(int numArgs)
StackEntry se = impPopStack();
GenTree* val = se.val;
assert(val->IsCnsIntOrI());
- tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
+ tlAndN.m_tl = (TestLabel)val->AsIntCon()->IconValue();
}
else if (numArgs == 3)
{
StackEntry se = impPopStack();
GenTree* val = se.val;
assert(val->IsCnsIntOrI());
- tlAndN.m_num = val->AsIntConCommon()->IconValue();
+ tlAndN.m_num = val->AsIntCon()->IconValue();
se = impPopStack();
val = se.val;
assert(val->IsCnsIntOrI());
- tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
+ tlAndN.m_tl = (TestLabel)val->AsIntCon()->IconValue();
}
else
{
@@ -1809,7 +1809,7 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
}
// Check for constant
- if (fieldTokenNode->gtOper != GT_CNS_INT)
+ if (!fieldTokenNode->IsCnsIntOrI())
{
return nullptr;
}
@@ -2043,12 +2043,12 @@ GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
//
// This optimization is only valid for a constant array size.
//
- if (arrayLengthNode->gtOper != GT_CNS_INT)
+ if (!arrayLengthNode->IsCnsIntOrI())
{
return nullptr;
}
- numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal);
+ numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->IconValue());
if (!info.compCompHnd->isSDArray(arrayClsHnd))
{
@@ -2151,7 +2151,7 @@ GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig)
}
// Check for constant
- if (fieldTokenNode->gtOper != GT_CNS_INT)
+ if (!fieldTokenNode->IsCnsIntOrI())
{
return nullptr;
}
@@ -3643,7 +3643,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
{
// `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument
// is `int` sized.
- INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue();
+ INT64 dimValue = gtDim->AsIntCon()->IntegralValue();
assert((unsigned int)dimValue == dimValue);
unsigned dim = (unsigned int)dimValue;
if (dim < rank)
@@ -3808,7 +3808,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
if (op1->IsIntegralConst())
{
- float f32Cns = BitOperations::UInt32BitsToSingle((uint32_t)op1->AsIntConCommon()->IconValue());
+ float f32Cns = BitOperations::UInt32BitsToSingle((uint32_t)op1->AsIntCon()->IconValue());
retNode = gtNewDconNodeF(f32Cns);
}
else
@@ -3827,7 +3827,7 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
{
impPopStack();
- int64_t i64Cns = op1->AsIntConCommon()->LngValue();
+ int64_t i64Cns = op1->AsIntCon()->LngValue();
retNode = gtNewDconNodeD(*reinterpret_cast(&i64Cns));
}
#if TARGET_64BIT
@@ -4156,14 +4156,14 @@ GenTree* Compiler::impSRCSUnsafeIntrinsic(NamedIntrinsic intrinsic,
{
if (toType == TYP_DOUBLE)
{
- uint64_t u64Cns = static_cast(op1->AsIntConCommon()->LngValue());
+ uint64_t u64Cns = static_cast(op1->AsIntCon()->LngValue());
return gtNewDconNodeD(BitOperations::UInt64BitsToDouble(u64Cns));
}
else
{
assert(toType == TYP_FLOAT);
- uint32_t u32Cns = static_cast(op1->AsIntConCommon()->IconValue());
+ uint32_t u32Cns = static_cast(op1->AsIntCon()->IconValue());
return gtNewDconNodeF(BitOperations::UInt32BitsToSingle(u32Cns));
}
}
@@ -4606,12 +4606,12 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic,
if (varTypeIsLong(baseType))
{
- uint64_t cns = static_cast(op1->AsIntConCommon()->LngValue());
+ uint64_t cns = static_cast(op1->AsIntCon()->LngValue());
result = gtNewLconNode(BitOperations::LeadingZeroCount(cns));
}
else
{
- uint32_t cns = static_cast(op1->AsIntConCommon()->IconValue());
+ uint32_t cns = static_cast(op1->AsIntCon()->IconValue());
result = gtNewIconNode(BitOperations::LeadingZeroCount(cns), baseType);
}
break;
@@ -4708,7 +4708,7 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic,
if (varTypeIsLong(baseType))
{
- uint64_t cns = static_cast(op1->AsIntConCommon()->LngValue());
+ uint64_t cns = static_cast(op1->AsIntCon()->LngValue());
if (varTypeIsUnsigned(JitType2PreciseVarType(baseJitType)) || (static_cast(cns) >= 0))
{
@@ -4717,7 +4717,7 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic,
}
else
{
- uint32_t cns = static_cast(op1->AsIntConCommon()->IconValue());
+ uint32_t cns = static_cast(op1->AsIntCon()->IconValue());
if (varTypeIsUnsigned(JitType2PreciseVarType(baseJitType)) || (static_cast(cns) >= 0))
{
@@ -4782,12 +4782,12 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic,
if (varTypeIsLong(baseType))
{
- uint64_t cns = static_cast(op1->AsIntConCommon()->LngValue());
+ uint64_t cns = static_cast(op1->AsIntCon()->LngValue());
result = gtNewLconNode(BitOperations::PopCount(cns));
}
else
{
- uint32_t cns = static_cast(op1->AsIntConCommon()->IconValue());
+ uint32_t cns = static_cast(op1->AsIntCon()->IconValue());
result = gtNewIconNode(BitOperations::PopCount(cns), baseType);
}
break;
@@ -4842,7 +4842,7 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic,
impPopStack();
GenTree* op1 = impPopStack().val;
- uint32_t cns2 = static_cast(op2->AsIntConCommon()->IconValue());
+ uint32_t cns2 = static_cast(op2->AsIntCon()->IconValue());
// Mask the offset to ensure deterministic xplat behavior for overshifting
cns2 &= varTypeIsLong(baseType) ? 0x3F : 0x1F;
@@ -4857,18 +4857,18 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic,
{
if (varTypeIsLong(baseType))
{
- uint64_t cns1 = static_cast(op1->AsIntConCommon()->LngValue());
+ uint64_t cns1 = static_cast(op1->AsIntCon()->LngValue());
result = gtNewLconNode(BitOperations::RotateLeft(cns1, cns2));
}
else
{
- uint32_t cns1 = static_cast(op1->AsIntConCommon()->IconValue());
+ uint32_t cns1 = static_cast(op1->AsIntCon()->IconValue());
result = gtNewIconNode(BitOperations::RotateLeft(cns1, cns2), baseType);
}
break;
}
- op2->AsIntConCommon()->SetIconValue(cns2);
+ op2->AsIntCon()->SetIconValue(cns2);
result = gtFoldExpr(gtNewOperNode(GT_ROL, baseType, op1, op2));
break;
@@ -4891,7 +4891,7 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic,
impPopStack();
GenTree* op1 = impPopStack().val;
- uint32_t cns2 = static_cast(op2->AsIntConCommon()->IconValue());
+ uint32_t cns2 = static_cast(op2->AsIntCon()->IconValue());
// Mask the offset to ensure deterministic xplat behavior for overshifting
cns2 &= varTypeIsLong(baseType) ? 0x3F : 0x1F;
@@ -4906,18 +4906,18 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic,
{
if (varTypeIsLong(baseType))
{
- uint64_t cns1 = static_cast(op1->AsIntConCommon()->LngValue());
+ uint64_t cns1 = static_cast(op1->AsIntCon()->LngValue());
result = gtNewLconNode(BitOperations::RotateRight(cns1, cns2));
}
else
{
- uint32_t cns1 = static_cast(op1->AsIntConCommon()->IconValue());
+ uint32_t cns1 = static_cast(op1->AsIntCon()->IconValue());
result = gtNewIconNode(BitOperations::RotateRight(cns1, cns2), baseType);
}
break;
}
- op2->AsIntConCommon()->SetIconValue(cns2);
+ op2->AsIntCon()->SetIconValue(cns2);
result = gtFoldExpr(gtNewOperNode(GT_ROR, baseType, op1, op2));
break;
@@ -4937,12 +4937,12 @@ GenTree* Compiler::impPrimitiveNamedIntrinsic(NamedIntrinsic intrinsic,
if (varTypeIsLong(baseType))
{
- uint64_t cns = static_cast(op1->AsIntConCommon()->LngValue());
+ uint64_t cns = static_cast(op1->AsIntCon()->LngValue());
result = gtNewLconNode(BitOperations::TrailingZeroCount(cns));
}
else
{
- uint32_t cns = static_cast(op1->AsIntConCommon()->IconValue());
+ uint32_t cns = static_cast(op1->AsIntCon()->IconValue());
result = gtNewIconNode(BitOperations::TrailingZeroCount(cns), baseType);
}
diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp
index f820518d141ec..274ce66d2e978 100644
--- a/src/coreclr/jit/indirectcalltransformer.cpp
+++ b/src/coreclr/jit/indirectcalltransformer.cpp
@@ -366,10 +366,10 @@ class IndirectCallTransformer
assert(checkIdx == 0);
checkBlock = CreateAndInsertBasicBlock(BBJ_NONE, currBlock);
- GenTree* fatPointerMask = new (compiler, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, FAT_POINTER_MASK);
+ GenTree* fatPointerMask = compiler->gtNewIconNode(FAT_POINTER_MASK, TYP_I_IMPL);
GenTree* fptrAddressCopy = compiler->gtCloneExpr(fptrAddress);
GenTree* fatPointerAnd = compiler->gtNewOperNode(GT_AND, TYP_I_IMPL, fptrAddressCopy, fatPointerMask);
- GenTree* zero = new (compiler, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
+ GenTree* zero = compiler->gtNewIconNode(0, TYP_I_IMPL);
GenTree* fatPointerCmp = compiler->gtNewOperNode(GT_NE, TYP_INT, fatPointerAnd, zero);
GenTree* jmpTree = compiler->gtNewOperNode(GT_JTRUE, TYP_VOID, fatPointerCmp);
Statement* jmpStmt = compiler->fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
@@ -411,7 +411,7 @@ class IndirectCallTransformer
GenTree* GetFixedFptrAddress()
{
GenTree* fptrAddressCopy = compiler->gtCloneExpr(fptrAddress);
- GenTree* fatPointerMask = new (compiler, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, FAT_POINTER_MASK);
+ GenTree* fatPointerMask = compiler->gtNewIconNode(FAT_POINTER_MASK, TYP_I_IMPL);
return compiler->gtNewOperNode(GT_SUB, pointerType, fptrAddressCopy, fatPointerMask);
}
@@ -426,7 +426,7 @@ class IndirectCallTransformer
GenTree* GetHiddenArgument(GenTree* fixedFptrAddress)
{
GenTree* fixedFptrAddressCopy = compiler->gtCloneExpr(fixedFptrAddress);
- GenTree* wordSize = new (compiler, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, genTypeSize(TYP_I_IMPL));
+ GenTree* wordSize = compiler->gtNewIconNode(genTypeSize(TYP_I_IMPL), TYP_I_IMPL);
GenTree* hiddenArgumentPtr = compiler->gtNewOperNode(GT_ADD, pointerType, fixedFptrAddressCopy, wordSize);
return compiler->gtNewIndir(fixedFptrAddressCopy->TypeGet(), hiddenArgumentPtr);
}
diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp
index 3c914b2875f68..251884dcef4bb 100644
--- a/src/coreclr/jit/liveness.cpp
+++ b/src/coreclr/jit/liveness.cpp
@@ -1992,7 +1992,6 @@ void Compiler::fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALAR
case GT_LABEL:
case GT_FTN_ADDR:
case GT_CNS_INT:
- case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_CNS_VEC:
diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp
index 2cdf1906295c0..edbd8a5365111 100644
--- a/src/coreclr/jit/loopcloning.cpp
+++ b/src/coreclr/jit/loopcloning.cpp
@@ -2751,7 +2751,7 @@ Compiler::fgWalkResult Compiler::optCanOptimizeByLoopCloning(GenTree* tree, Loop
// Update the loop context.
//
assert(relopOp2->IsIconHandle(GTF_ICON_CLASS_HDL));
- CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)relopOp2->AsIntConCommon()->IconValue();
+ CORINFO_CLASS_HANDLE clsHnd = (CORINFO_CLASS_HANDLE)relopOp2->AsIntCon()->IconValue();
assert(compCurBB->lastStmt() == info->stmt);
info->context->EnsureLoopOptInfo(info->loopNum)
@@ -2781,7 +2781,7 @@ Compiler::fgWalkResult Compiler::optCanOptimizeByLoopCloning(GenTree* tree, Loop
return WALK_CONTINUE;
}
- offset = indirAddr->gtGetOp2()->AsIntConCommon()->IconValue();
+ offset = indirAddr->gtGetOp2()->AsIntCon()->IconValue();
indirAddr = indirAddr->gtGetOp1();
}
diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp
index 35677c2bad83f..a34ff979e1b7e 100644
--- a/src/coreclr/jit/lower.cpp
+++ b/src/coreclr/jit/lower.cpp
@@ -739,7 +739,7 @@ GenTree* Lowering::LowerArrLength(GenTreeArrCommon* node)
GenTree* addr;
noway_assert(arr->gtNext == node);
- if ((arr->gtOper == GT_CNS_INT) && (arr->AsIntCon()->gtIconVal == 0))
+ if (arr->IsIntegralConst(0))
{
// If the array is NULL, then we should get a NULL reference
// exception when computing its length. We need to maintain
@@ -2911,26 +2911,26 @@ GenTree* Lowering::LowerTailCallViaJitHelper(GenTreeCall* call, GenTree* callTar
argEntry = call->gtArgs.GetArgByIndex(numArgs - 2);
assert(argEntry != nullptr);
GenTree* arg1 = argEntry->GetEarlyNode()->AsPutArgStk()->gtGetOp1();
- assert(arg1->gtOper == GT_CNS_INT);
+ assert(arg1->IsCnsIntOrI());
ssize_t tailCallHelperFlags = 1 | // always restore EDI,ESI,EBX
(call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag
- arg1->AsIntCon()->gtIconVal = tailCallHelperFlags;
+ arg1->AsIntCon()->SetIconValue(tailCallHelperFlags);
// arg 2 == numberOfNewStackArgsWords
argEntry = call->gtArgs.GetArgByIndex(numArgs - 3);
assert(argEntry != nullptr);
GenTree* arg2 = argEntry->GetEarlyNode()->AsPutArgStk()->gtGetOp1();
- assert(arg2->gtOper == GT_CNS_INT);
+ assert(arg2->IsCnsIntOrI());
- arg2->AsIntCon()->gtIconVal = nNewStkArgsWords;
+ arg2->AsIntCon()->SetIconValue(nNewStkArgsWords);
#ifdef DEBUG
// arg 3 == numberOfOldStackArgsWords
argEntry = call->gtArgs.GetArgByIndex(numArgs - 4);
assert(argEntry != nullptr);
GenTree* arg3 = argEntry->GetEarlyNode()->AsPutArgStk()->gtGetOp1();
- assert(arg3->gtOper == GT_CNS_INT);
+ assert(arg3->IsCnsIntOrI());
#endif // DEBUG
// Transform this call node into a call to Jit tail call helper.
@@ -3311,7 +3311,7 @@ GenTree* Lowering::DecomposeLongCompare(GenTree* cmp)
// then hiSrc1 would be 0.
//
- if (loSrc1->OperIs(GT_CNS_INT))
+ if (loSrc1->IsCnsIntOrI())
{
std::swap(loSrc1, loSrc2);
}
@@ -3328,7 +3328,7 @@ GenTree* Lowering::DecomposeLongCompare(GenTree* cmp)
ContainCheckBinary(loCmp->AsOp());
}
- if (hiSrc1->OperIs(GT_CNS_INT))
+ if (hiSrc1->IsCnsIntOrI())
{
std::swap(hiSrc1, hiSrc2);
}
@@ -3380,7 +3380,7 @@ GenTree* Lowering::DecomposeLongCompare(GenTree* cmp)
{
bool mustSwap = true;
- if (loSrc2->OperIs(GT_CNS_INT) && hiSrc2->OperIs(GT_CNS_INT))
+ if (loSrc2->IsCnsIntOrI() && hiSrc2->IsCnsIntOrI())
{
uint32_t loValue = static_cast(loSrc2->AsIntCon()->IconValue());
uint32_t hiValue = static_cast(hiSrc2->AsIntCon()->IconValue());
@@ -3832,8 +3832,7 @@ GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue)
newOper = GT_JTEST;
cc = cond->OperIs(GT_LT) ? GenCondition(GenCondition::NE) : GenCondition(GenCondition::EQ);
// x < 0 => (x & signBit) != 0. Update the constant to be the sign bit.
- relopOp2->AsIntConCommon()->SetIntegralValue(
- (static_cast(1) << (8 * genTypeSize(genActualType(relopOp1)) - 1)));
+ relopOp2->AsIntCon()->SetIntegralValueUnsigned(1ULL << (8 * genTypeSize(genActualType(relopOp1)) - 1));
}
else if (cond->OperIs(GT_TEST_EQ, GT_TEST_NE) && isPow2(relopOp2->AsIntCon()->IconValue()))
{
@@ -4386,7 +4385,7 @@ void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore)
{
convertToStoreObj = true;
}
- else if (src->OperIs(GT_CNS_INT))
+ else if (src->IsCnsIntOrI())
{
assert(src->IsIntegralConst(0) && "expected an INIT_VAL for non-zero init.");
@@ -5150,7 +5149,7 @@ GenTree* Lowering::SetGCState(int state)
GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot);
- GenTree* stateNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state);
+ GenTree* stateNode = comp->gtNewIconNode(state);
GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState);
GenTree* storeGcState = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_BYTE, addr, stateNode);
return storeGcState;
@@ -6833,7 +6832,7 @@ bool Lowering::TryLowerConstIntDivOrMod(GenTree* node, GenTree** nextNode)
#endif // !TARGET_64BIT
}
- divisor->AsIntConCommon()->SetIconValue(magic);
+ divisor->AsIntCon()->SetIconValue(magic);
// Insert a new GT_MULHI node in front of the existing GT_DIV/GT_MOD node.
// The existing node will later be transformed into a GT_ADD/GT_SUB that
@@ -8295,7 +8294,7 @@ void Lowering::LowerStoreIndirCoalescing(GenTreeStoreInd* ind)
size_t val = (lowerCns | (upperCns << (genTypeSize(oldType) * BITS_IN_BYTE)));
JITDUMP("Coalesced two stores into a single store with value %lld\n", (int64_t)val);
- ind->Data()->AsIntCon()->gtIconVal = (ssize_t)val;
+ ind->Data()->AsIntCon()->SetIconValue(val);
if (genTypeSize(oldType) == 1)
{
// A mark for future foldings that this IND doesn't need to be atomic.
diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp
index 0ef5399dff06d..6a77df3f84668 100644
--- a/src/coreclr/jit/lowerarmarch.cpp
+++ b/src/coreclr/jit/lowerarmarch.cpp
@@ -65,13 +65,13 @@ bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const
#endif
// Make sure we have an actual immediate
- if (!childNode->IsCnsIntOrI())
+ if (!childNode->IsIntegralConst())
return false;
if (childNode->AsIntCon()->ImmedValNeedsReloc(comp))
return false;
- // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t type.
- target_ssize_t immVal = (target_ssize_t)childNode->AsIntCon()->gtIconVal;
+ // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::IconValue() had target_ssize_t type.
+ target_ssize_t immVal = (target_ssize_t)childNode->AsIntCon()->IconValue();
emitAttr attr = emitActualTypeSize(childNode->TypeGet());
emitAttr size = EA_SIZE(attr);
#ifdef TARGET_ARM
@@ -250,7 +250,7 @@ bool Lowering::IsContainableUnaryOrBinaryOp(GenTree* parentNode, GenTree* childN
GenTree* shiftAmountNode = childNode->gtGetOp2();
- if (!shiftAmountNode->IsCnsIntOrI())
+ if (!shiftAmountNode->IsIntegralConst())
{
// Cannot contain if the childs op2 is not a constant
return false;
@@ -397,7 +397,7 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
// On ARM, small stores can cost a bit more in terms of code size so we try to widen them. This is legal
// as most small locals have 4-byte-wide stack homes, the common exception being (dependent) struct fields.
//
- if (storeLoc->OperIs(GT_STORE_LCL_VAR) && varTypeIsSmall(storeLoc) && storeLoc->Data()->IsCnsIntOrI())
+ if (storeLoc->OperIs(GT_STORE_LCL_VAR) && varTypeIsSmall(storeLoc) && storeLoc->Data()->IsIntegralConst())
{
LclVarDsc* varDsc = comp->lvaGetDesc(storeLoc);
if (!varDsc->lvIsStructField && (varDsc->GetStackSlotHomeType() == TYP_INT))
@@ -472,7 +472,7 @@ GenTree* Lowering::LowerMul(GenTreeOp* mul)
else
{
assert(op2->IsIntegralConst());
- assert(FitsIn(op2->AsIntConCommon()->IntegralValue()));
+ assert(FitsIn(op2->AsIntCon()->IntegralValue()));
op2->ChangeType(TYP_INT);
}
@@ -571,7 +571,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= comp->getUnrollThreshold(Compiler::UnrollKind::Memset)) &&
- src->OperIs(GT_CNS_INT))
+ src->IsIntegralConst())
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
@@ -688,7 +688,7 @@ void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenT
return;
}
- if (!addr->OperIs(GT_ADD) || addr->gtOverflow() || !addr->AsOp()->gtGetOp2()->OperIs(GT_CNS_INT))
+ if (!addr->OperIs(GT_ADD) || addr->gtOverflow() || !addr->AsOp()->gtGetOp2()->IsIntegralConst())
{
return;
}
@@ -811,11 +811,11 @@ void Lowering::LowerRotate(GenTree* tree)
unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
GenTree* rotateLeftIndexNode = tree->AsOp()->gtOp2;
- if (rotateLeftIndexNode->IsCnsIntOrI())
+ if (rotateLeftIndexNode->IsIntegralConst())
{
- ssize_t rotateLeftIndex = rotateLeftIndexNode->AsIntCon()->gtIconVal;
- ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
- rotateLeftIndexNode->AsIntCon()->gtIconVal = rotateRightIndex;
+ ssize_t rotateLeftIndex = rotateLeftIndexNode->AsIntCon()->IconValue();
+ ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
+ rotateLeftIndexNode->AsIntCon()->SetIconValue(rotateRightIndex);
}
else
{
@@ -853,7 +853,7 @@ void Lowering::LowerModPow2(GenTree* node)
const var_types type = mod->TypeGet();
assert((type == TYP_INT) || (type == TYP_LONG));
- ssize_t divisorCnsValue = static_cast(divisor->AsIntConCommon()->IntegralValue());
+ ssize_t divisorCnsValue = static_cast(divisor->AsIntCon()->IntegralValue());
ssize_t divisorCnsValueMinusOne = divisorCnsValue - 1;
BlockRange().Remove(divisor);
@@ -1223,9 +1223,9 @@ bool Lowering::IsValidConstForMovImm(GenTreeHWIntrinsic* node)
op1 = castOp;
}
- if (op1->IsCnsIntOrI())
+ if (op1->IsIntegralConst())
{
- const ssize_t dataValue = op1->AsIntCon()->gtIconVal;
+ const ssize_t dataValue = op1->AsIntCon()->IconValue();
if (comp->GetEmitter()->emitIns_valid_imm_for_movi(dataValue, emitActualTypeSize(node->GetSimdBaseType())))
{
@@ -2149,7 +2149,7 @@ void Lowering::ContainCheckShiftRotate(GenTreeOp* node)
}
#endif // TARGET_ARM
- if (shiftBy->IsCnsIntOrI())
+ if (shiftBy->IsIntegralConst())
{
MakeSrcContained(node, shiftBy);
}
@@ -2479,7 +2479,7 @@ void Lowering::ContainCheckConditionalCompare(GenTreeCCMP* cmp)
if (op2->IsCnsIntOrI() && !op2->AsIntCon()->ImmedValNeedsReloc(comp))
{
- target_ssize_t immVal = (target_ssize_t)op2->AsIntCon()->gtIconVal;
+ target_ssize_t immVal = (target_ssize_t)op2->AsIntCon()->IconValue();
if (emitter::emitIns_valid_imm_for_ccmp(immVal))
{
@@ -3051,7 +3051,7 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
{
MakeSrcContained(node, intrin.op2);
- if ((intrin.op2->AsIntCon()->gtIconVal == 0) && intrin.op3->IsCnsFltOrDbl())
+ if ((intrin.op2->AsIntCon()->IconValue() == 0) && intrin.op3->IsCnsFltOrDbl())
{
assert(varTypeIsFloating(intrin.baseType));
diff --git a/src/coreclr/jit/lowerloongarch64.cpp b/src/coreclr/jit/lowerloongarch64.cpp
index d89c8723e80f6..b9349d821fac7 100644
--- a/src/coreclr/jit/lowerloongarch64.cpp
+++ b/src/coreclr/jit/lowerloongarch64.cpp
@@ -59,8 +59,8 @@ bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const
if (childNode->AsIntCon()->ImmedValNeedsReloc(comp))
return false;
- // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t type.
- target_ssize_t immVal = (target_ssize_t)childNode->AsIntCon()->gtIconVal;
+ // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::IconValue() had target_ssize_t type.
+ target_ssize_t immVal = (target_ssize_t)childNode->AsIntCon()->IconValue();
switch (parentNode->OperGet())
{
@@ -297,7 +297,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= comp->getUnrollThreshold(Compiler::UnrollKind::Memset)) &&
- src->OperIs(GT_CNS_INT))
+ src->IsCnsIntOrI())
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
@@ -408,7 +408,7 @@ void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenT
return;
}
- if (!addr->OperIs(GT_ADD) || addr->gtOverflow() || !addr->AsOp()->gtGetOp2()->OperIs(GT_CNS_INT))
+ if (!addr->OperIs(GT_ADD) || addr->gtOverflow() || !addr->AsOp()->gtGetOp2()->IsCnsIntOrI())
{
return;
}
@@ -554,9 +554,9 @@ void Lowering::LowerRotate(GenTree* tree)
if (rotateLeftIndexNode->IsCnsIntOrI())
{
- ssize_t rotateLeftIndex = rotateLeftIndexNode->AsIntCon()->gtIconVal;
- ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
- rotateLeftIndexNode->AsIntCon()->gtIconVal = rotateRightIndex;
+ ssize_t rotateLeftIndex = rotateLeftIndexNode->AsIntCon()->IconValue();
+ ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
+ rotateLeftIndexNode->AsIntCon()->SetIconValue(rotateRightIndex);
}
else
{
diff --git a/src/coreclr/jit/lowerriscv64.cpp b/src/coreclr/jit/lowerriscv64.cpp
index 65426e493a2c1..676975ca4a023 100644
--- a/src/coreclr/jit/lowerriscv64.cpp
+++ b/src/coreclr/jit/lowerriscv64.cpp
@@ -59,8 +59,8 @@ bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const
if (childNode->AsIntCon()->ImmedValNeedsReloc(comp))
return false;
- // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t type.
- target_ssize_t immVal = (target_ssize_t)childNode->AsIntCon()->gtIconVal;
+ // TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::IconValue() had target_ssize_t type.
+ target_ssize_t immVal = (target_ssize_t)childNode->AsIntCon()->IconValue();
switch (parentNode->OperGet())
{
@@ -245,7 +245,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
src = src->AsUnOp()->gtGetOp1();
}
- if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= INITBLK_UNROLL_LIMIT) && src->OperIs(GT_CNS_INT))
+ if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= INITBLK_UNROLL_LIMIT) && src->IsCnsIntOrI())
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
@@ -354,7 +354,7 @@ void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenT
return;
}
- if (!addr->OperIs(GT_ADD) || addr->gtOverflow() || !addr->AsOp()->gtGetOp2()->OperIs(GT_CNS_INT))
+ if (!addr->OperIs(GT_ADD) || addr->gtOverflow() || !addr->AsOp()->gtGetOp2()->IsCnsIntOrI())
{
return;
}
diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp
index 3daa96268d721..78fbb01157a4e 100644
--- a/src/coreclr/jit/lowerxarch.cpp
+++ b/src/coreclr/jit/lowerxarch.cpp
@@ -144,8 +144,8 @@ GenTree* Lowering::TryLowerMulWithConstant(GenTreeOp* node)
if (!op2->IsCnsIntOrI())
return nullptr;
- GenTreeIntConCommon* cns = op2->AsIntConCommon();
- ssize_t cnsVal = cns->IconValue();
+ GenTreeIntCon* cns = op2->AsIntCon();
+ ssize_t cnsVal = cns->IconValue();
// Use GT_LEA if cnsVal is 3, 5, or 9.
// These are handled in codegen.
@@ -318,7 +318,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= comp->getUnrollThreshold(Compiler::UnrollKind::Memset)))
{
- if (!src->OperIs(GT_CNS_INT))
+ if (!src->IsCnsIntOrI())
{
// TODO-CQ: We could unroll even when the initialization value is not a constant
// by inserting a MUL init, 0x01010101 instruction. We need to determine if the
@@ -1364,7 +1364,7 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
break;
}
- ssize_t ival = op3->AsIntConCommon()->IconValue();
+ ssize_t ival = op3->AsIntCon()->IconValue();
ssize_t zmask = (ival & 0x0F);
ssize_t count_d = (ival & 0x30) >> 4;
@@ -1379,7 +1379,7 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
zmask &= 0x0F;
ival = (count_s << 6) | (count_d << 4) | (zmask);
- op3->AsIntConCommon()->SetIconValue(ival);
+ op3->AsIntCon()->SetIconValue(ival);
}
else if (op2IsVectorZero)
{
@@ -1390,7 +1390,7 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
zmask &= 0x0F;
ival = (count_s << 6) | (count_d << 4) | (zmask);
- op3->AsIntConCommon()->SetIconValue(ival);
+ op3->AsIntCon()->SetIconValue(ival);
}
if (zmask == 0x0F)
@@ -1465,9 +1465,9 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
assert(op1Intrinsic->Op(2)->isContained());
- ssize_t op1Ival = op1Idx->AsIntConCommon()->IconValue();
+ ssize_t op1Ival = op1Idx->AsIntCon()->IconValue();
ival |= ((op1Ival & 0x0F) & ~(1 << count_d));
- op3->AsIntConCommon()->SetIconValue(ival);
+ op3->AsIntCon()->SetIconValue(ival);
// Then we'll just carry the original non-zero input and
// remove the now unused constant nodes
@@ -1486,9 +1486,9 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
// Since this is a later op, direct merging is safe
- ssize_t op1Ival = op1Idx->AsIntConCommon()->IconValue();
+ ssize_t op1Ival = op1Idx->AsIntCon()->IconValue();
ival = op1Ival | zmask;
- op3->AsIntConCommon()->SetIconValue(ival);
+ op3->AsIntCon()->SetIconValue(ival);
// Then we'll just carry the inputs from op1 and remove the now
// unused constant nodes
@@ -6286,7 +6286,7 @@ bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const
// At this point we know that it is an int const fits within 4-bytes and hence can safely cast to IntConCommon.
// Icons that need relocation should never be marked as contained immed
- if (childNode->AsIntConCommon()->ImmedValNeedsReloc(comp))
+ if (childNode->AsIntCon()->ImmedValNeedsReloc(comp))
{
return false;
}
@@ -6493,7 +6493,7 @@ void Lowering::ContainCheckIndir(GenTreeIndir* node)
}
else if (addr->IsCnsIntOrI())
{
- GenTreeIntConCommon* icon = addr->AsIntConCommon();
+ GenTreeIntCon* icon = addr->AsIntCon();
#if defined(FEATURE_SIMD)
if (((addr->TypeGet() != TYP_SIMD12) || !icon->ImmedValNeedsReloc(comp)) && icon->FitsInAddrBase(comp))
@@ -6804,9 +6804,9 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
bool useLeaEncoding = false;
GenTree* memOp = nullptr;
- bool hasImpliedFirstOperand = false;
- GenTreeIntConCommon* imm = nullptr;
- GenTree* other = nullptr;
+ bool hasImpliedFirstOperand = false;
+ GenTreeIntCon* imm = nullptr;
+ GenTree* other = nullptr;
// Multiply should never be using small types
assert(!varTypeIsSmall(node->TypeGet()));
@@ -6832,17 +6832,17 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
{
if (IsContainableImmed(node, op2))
{
- imm = op2->AsIntConCommon();
+ imm = op2->AsIntCon();
other = op1;
}
else
{
- imm = op1->AsIntConCommon();
+ imm = op1->AsIntCon();
other = op2;
}
// CQ: We want to rewrite this into a LEA
- ssize_t immVal = imm->AsIntConCommon()->IconValue();
+ ssize_t immVal = imm->AsIntCon()->IconValue();
if (!requiresOverflowCheck && (immVal == 3 || immVal == 5 || immVal == 9))
{
useLeaEncoding = true;
@@ -6997,8 +6997,8 @@ void Lowering::ContainCheckShiftRotate(GenTreeOp* node)
#endif
GenTree* shiftBy = node->gtOp2;
- if (IsContainableImmed(node, shiftBy) && (shiftBy->AsIntConCommon()->IconValue() <= 255) &&
- (shiftBy->AsIntConCommon()->IconValue() >= 0))
+ if (IsContainableImmed(node, shiftBy) && (shiftBy->AsIntCon()->IconValue() <= 255) &&
+ (shiftBy->AsIntCon()->IconValue() >= 0))
{
MakeSrcContained(node, shiftBy);
}
@@ -7433,7 +7433,7 @@ bool Lowering::LowerRMWMemOp(GenTreeIndir* storeInd)
{
indirDst->SetContained();
}
- else if (indirCandidateChild->IsCnsIntOrI() && indirCandidateChild->AsIntConCommon()->FitsInAddrBase(comp))
+ else if (indirCandidateChild->IsCnsIntOrI() && indirCandidateChild->AsIntCon()->FitsInAddrBase(comp))
{
indirDst->SetContained();
}
@@ -8525,7 +8525,7 @@ void Lowering::ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* ad
assert((addr->TypeGet() == TYP_I_IMPL) || (addr->TypeGet() == TYP_BYREF));
TryCreateAddrMode(addr, true, node);
if ((addr->OperIs(GT_CLS_VAR_ADDR, GT_LCL_ADDR, GT_LEA) ||
- (addr->IsCnsIntOrI() && addr->AsIntConCommon()->FitsInAddrBase(comp))) &&
+ (addr->IsCnsIntOrI() && addr->AsIntCon()->FitsInAddrBase(comp))) &&
IsInvariantInRange(addr, node))
{
MakeSrcContained(node, addr);
@@ -9574,7 +9574,7 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
// ival is already in the correct state to account for it
#if DEBUG
- ssize_t ival = lastOp->AsIntConCommon()->IconValue();
+ ssize_t ival = lastOp->AsIntCon()->IconValue();
ssize_t zmask = (ival & 0x0F);
ssize_t count_d = (ival & 0x30) >> 4;
@@ -9595,7 +9595,7 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
// zmask is already in the correct state to account for it
#if DEBUG
- ssize_t ival = lastOp->AsIntConCommon()->IconValue();
+ ssize_t ival = lastOp->AsIntCon()->IconValue();
ssize_t zmask = (ival & 0x0F);
ssize_t count_d = (ival & 0x30) >> 4;
@@ -9707,7 +9707,7 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
MakeSrcRegOptional(node, op3);
}
- uint8_t control = static_cast(op4->AsIntCon()->gtIconVal);
+ uint8_t control = static_cast(op4->AsIntCon()->IconValue());
const TernaryLogicInfo& info = TernaryLogicInfo::lookup(control);
TernaryLogicUseFlags useFlags = info.GetAllUseFlags();
diff --git a/src/coreclr/jit/lsra.h b/src/coreclr/jit/lsra.h
index d0f7aeb836969..92af3a03aa59a 100644
--- a/src/coreclr/jit/lsra.h
+++ b/src/coreclr/jit/lsra.h
@@ -540,11 +540,11 @@ inline bool leafInRange(GenTree* leaf, int lower, int upper)
{
return false;
}
- if (leaf->AsIntCon()->gtIconVal < lower)
+ if (leaf->AsIntCon()->IconValue() < lower)
{
return false;
}
- if (leaf->AsIntCon()->gtIconVal > upper)
+ if (leaf->AsIntCon()->IconValue() > upper)
{
return false;
}
@@ -558,7 +558,7 @@ inline bool leafInRange(GenTree* leaf, int lower, int upper, int multiple)
{
return false;
}
- if (leaf->AsIntCon()->gtIconVal % multiple)
+ if (leaf->AsIntCon()->IconValue() % multiple)
{
return false;
}
diff --git a/src/coreclr/jit/lsraarm.cpp b/src/coreclr/jit/lsraarm.cpp
index c16cc9162b3d4..0545fff51e27b 100644
--- a/src/coreclr/jit/lsraarm.cpp
+++ b/src/coreclr/jit/lsraarm.cpp
@@ -53,7 +53,7 @@ int LinearScan::BuildLclHeap(GenTree* tree)
assert(size->isContained());
srcCount = 0;
- size_t sizeVal = size->AsIntCon()->gtIconVal;
+ size_t sizeVal = size->AsIntCon()->IconValue();
if (sizeVal == 0)
{
internalIntCount = 0;
diff --git a/src/coreclr/jit/lsraarm64.cpp b/src/coreclr/jit/lsraarm64.cpp
index 81bdd5334a54e..21ea4793bdf64 100644
--- a/src/coreclr/jit/lsraarm64.cpp
+++ b/src/coreclr/jit/lsraarm64.cpp
@@ -1115,7 +1115,7 @@ int LinearScan::BuildNode(GenTree* tree)
assert(size->isContained());
srcCount = 0;
- size_t sizeVal = size->AsIntCon()->gtIconVal;
+ size_t sizeVal = size->AsIntCon()->IconValue();
if (sizeVal != 0)
{
diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp
index 7d9803c645799..8c73f24e9341a 100644
--- a/src/coreclr/jit/lsrabuild.cpp
+++ b/src/coreclr/jit/lsrabuild.cpp
@@ -4348,14 +4348,14 @@ int LinearScan::BuildCmpOperands(GenTree* tree)
// Example2: GT_EQ(int, op1 of type ubyte, op2 is GT_CNS_INT) - in this case codegen uses
// ubyte as the result of the comparison and if the result needs to be materialized into a reg
// simply zero extend it to TYP_INT size.
- else if (varTypeIsByte(op1) && op2->IsCnsIntOrI())
+ else if (varTypeIsByte(op1) && op2->IsIntegralConst())
{
needByteRegs = true;
}
// Example3: GT_EQ(int, op1 is GT_CNS_INT, op2 of type ubyte) - in this case codegen uses
// ubyte as the result of the comparison and if the result needs to be materialized into a reg
// simply zero extend it to TYP_INT size.
- else if (op1->IsCnsIntOrI() && varTypeIsByte(op2))
+ else if (op1->IsIntegralConst() && varTypeIsByte(op2))
{
needByteRegs = true;
}
diff --git a/src/coreclr/jit/lsraloongarch64.cpp b/src/coreclr/jit/lsraloongarch64.cpp
index b695e6652f497..d52d819e48c5c 100644
--- a/src/coreclr/jit/lsraloongarch64.cpp
+++ b/src/coreclr/jit/lsraloongarch64.cpp
@@ -436,7 +436,7 @@ int LinearScan::BuildNode(GenTree* tree)
assert(size->isContained());
srcCount = 0;
- size_t sizeVal = size->AsIntCon()->gtIconVal;
+ size_t sizeVal = size->AsIntCon()->IconValue();
if (sizeVal != 0)
{
diff --git a/src/coreclr/jit/lsrariscv64.cpp b/src/coreclr/jit/lsrariscv64.cpp
index a9c357aca8ac0..55ad9a1ec31d5 100644
--- a/src/coreclr/jit/lsrariscv64.cpp
+++ b/src/coreclr/jit/lsrariscv64.cpp
@@ -470,7 +470,7 @@ int LinearScan::BuildNode(GenTree* tree)
assert(size->isContained());
srcCount = 0;
- size_t sizeVal = size->AsIntCon()->gtIconVal;
+ size_t sizeVal = size->AsIntCon()->IconValue();
if (sizeVal != 0)
{
diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp
index 3257b3c04b216..72aca3cbb948a 100644
--- a/src/coreclr/jit/lsraxarch.cpp
+++ b/src/coreclr/jit/lsraxarch.cpp
@@ -149,7 +149,6 @@ int LinearScan::BuildNode(GenTree* tree)
break;
case GT_CNS_INT:
- case GT_CNS_LNG:
case GT_CNS_DBL:
case GT_CNS_VEC:
{
@@ -1811,7 +1810,7 @@ int LinearScan::BuildLclHeap(GenTree* tree)
if (size->IsCnsIntOrI() && size->isContained())
{
srcCount = 0;
- size_t sizeVal = AlignUp((size_t)size->AsIntCon()->gtIconVal, STACK_ALIGN);
+ size_t sizeVal = AlignUp((size_t)size->AsIntCon()->IconValue(), STACK_ALIGN);
// Explicitly zeroed LCLHEAP also needs a regCnt in case of x86 or large page
if ((TARGET_POINTER_SIZE == 4) || (sizeVal >= compiler->eeGetPageSize()))
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index ab10da3bed363..96d484f0379a1 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -600,7 +600,7 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree)
// than 2^{31} for a cast to int.
int maxWidth = (dstType == TYP_UINT) ? 32 : 31;
- if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0))
+ if (andOp2->IsIntegralConst() && ((andOp2->AsIntCon()->LngValue() >> maxWidth) == 0))
{
tree->ClearOverflow();
tree->SetAllEffectsFlags(oper);
@@ -1341,7 +1341,7 @@ void CallArgs::SortArgs(Compiler* comp, GenTreeCall* call, CallArg** sortedArgs)
assert(argx != nullptr);
// put constants at the end of the table
//
- if (argx->gtOper == GT_CNS_INT)
+ if (argx->IsCnsIntOrI())
{
noway_assert(curInx <= endTab);
@@ -1501,8 +1501,7 @@ void CallArgs::SortArgs(Compiler* comp, GenTreeCall* call, CallArg** sortedArgs)
assert(argx != nullptr);
// We should have already handled these kinds of args
- assert((!argx->OperIs(GT_LCL_VAR, GT_LCL_FLD) || argx->TypeIs(TYP_STRUCT)) &&
- !argx->OperIs(GT_CNS_INT));
+ assert((!argx->OperIs(GT_LCL_VAR, GT_LCL_FLD) || argx->TypeIs(TYP_STRUCT)) && !argx->IsCnsIntOrI());
// This arg should either have no persistent side effects or be the last one in our table
// assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1)));
@@ -4408,7 +4407,7 @@ GenTree* Compiler::fgMorphIndexAddr(GenTreeIndexAddr* indexAddr)
// Widen 'index' on 64-bit targets
if (index->TypeGet() != TYP_I_IMPL)
{
- if (index->OperGet() == GT_CNS_INT)
+ if (index->IsCnsIntOrI())
{
index->gtType = TYP_I_IMPL;
}
@@ -7777,7 +7776,7 @@ GenTree* Compiler::fgMorphCall(GenTreeCall* call)
GenTree* value = call->gtArgs.GetArgByIndex(2)->GetNode();
if (value->IsIntegralConst(0))
{
- assert(value->OperGet() == GT_CNS_INT);
+ assert(value->IsCnsIntOrI());
GenTree* arr = call->gtArgs.GetArgByIndex(0)->GetNode();
GenTree* index = call->gtArgs.GetArgByIndex(1)->GetNode();
@@ -8103,7 +8102,7 @@ GenTree* Compiler::fgMorphLeaf(GenTree* tree)
// Refer to gtNewIconHandleNode() as the template for constructing a constant handle
//
tree->SetOper(GT_CNS_INT);
- tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle));
+ tree->AsIntCon()->SetIconValue(ssize_t(addrInfo.handle));
tree->gtFlags |= GTF_ICON_FTN_ADDR;
INDEBUG(tree->AsIntCon()->gtTargetHandle = reinterpret_cast(funcHandle));
break;
@@ -8396,8 +8395,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA
else
{
GenTree* effOp1 = op1->gtEffectiveVal();
- noway_assert((effOp1->gtOper == GT_CNS_INT) &&
- (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1)));
+ noway_assert(effOp1->IsCnsIntOrI() && (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1)));
}
break;
@@ -8606,8 +8604,8 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA
noway_assert(op2);
if ((typ == TYP_LONG) && opts.OptimizationEnabled())
{
- if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 &&
- op2->AsIntConCommon()->LngValue() <= 0x3fffffff)
+ if (op2->IsIntegralConst() && op2->AsIntCon()->LngValue() >= 2 &&
+ op2->AsIntCon()->LngValue() <= 0x3fffffff)
{
tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1);
noway_assert(op1->TypeIs(TYP_LONG));
@@ -8619,7 +8617,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA
tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
// If op1 is a constant, then do constant folding of the division operator.
- if (op1->OperIs(GT_CNS_NATIVELONG))
+ if (op1->IsIntegralConst())
{
tree = gtFoldExpr(tree);
}
@@ -8814,8 +8812,8 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA
JITDUMP("\nTransforming:\n");
DISPTREE(tree);
- op1->SetOper(GT_AND); // Change % => &
- op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1
+ op1->SetOper(GT_AND); // Change % => &
+ op1op2->AsIntCon()->SetIconValue(modValue - 1); // Change c => c - 1
fgUpdateConstTreeValueNumber(op1op2);
JITDUMP("\ninto:\n");
@@ -9258,7 +9256,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA
if (tree->OperIs(GT_GT, GT_LT, GT_LE, GT_GE))
{
tree = fgOptimizeRelationalComparisonWithFullRangeConst(tree->AsOp());
- if (tree->OperIs(GT_CNS_INT))
+ if (tree->IsCnsIntOrI())
{
return tree;
}
@@ -9302,7 +9300,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA
// Negate the constant and change the node to be "+",
// except when `op2` is a const byref.
- op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue());
+ op2->AsIntCon()->SetIconValue(-op2->AsIntCon()->IconValue());
op2->AsIntConRef().gtFieldSeq = nullptr;
oper = GT_ADD;
tree->ChangeOper(oper);
@@ -9877,7 +9875,7 @@ GenTree* Compiler::fgMorphFinalizeIndir(GenTreeIndir* indir)
gtPeelOffsets(&effAddr, &offset);
if (((offset % genTypeSize(TYP_FLOAT)) != 0) ||
- (effAddr->IsCnsIntOrI() && ((effAddr->AsIntConCommon()->IconValue() % genTypeSize(TYP_FLOAT)) != 0)))
+ (effAddr->IsCnsIntOrI() && ((effAddr->AsIntCon()->IconValue() % genTypeSize(TYP_FLOAT)) != 0)))
{
indir->gtFlags |= GTF_IND_UNALIGNED;
}
@@ -10148,8 +10146,8 @@ GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp)
assert(cmp->gtGetOp2()->IsIntegralConst());
assert(!optValnumCSE_phase);
- GenTree* op1 = cmp->gtGetOp1();
- GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon();
+ GenTree* op1 = cmp->gtGetOp1();
+ GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon();
// Check for "(expr +/- icon1) ==/!= (non-zero-icon2)".
if (op2->IsCnsIntOrI() && (op2->IconValue() != 0))
@@ -10252,7 +10250,7 @@ GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp)
goto SKIP;
}
- GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();
+ GenTreeIntCon* andMask = andOp->gtGetOp2()->AsIntCon();
if (andOp->TypeIs(TYP_INT) && shiftAmount < 32)
{
@@ -10339,12 +10337,12 @@ GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp)
// Is the result of the mask effectively an INT?
GenTreeOp* andOp = op1->AsOp();
- if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG))
+ if (!andOp->gtGetOp2()->IsIntegralConst())
{
return cmp;
}
- GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();
+ GenTreeIntCon* andMask = andOp->gtGetOp2()->AsIntCon();
if ((andMask->LngValue() >> 32) != 0)
{
return cmp;
@@ -10402,7 +10400,7 @@ GenTree* Compiler::fgOptimizeRelationalComparisonWithFullRangeConst(GenTreeOp* c
int64_t lhsMax;
if (cmp->gtGetOp1()->IsIntegralConst())
{
- lhsMin = cmp->gtGetOp1()->AsIntConCommon()->IntegralValue();
+ lhsMin = cmp->gtGetOp1()->AsIntCon()->IntegralValue();
lhsMax = lhsMin;
}
else
@@ -10416,7 +10414,7 @@ GenTree* Compiler::fgOptimizeRelationalComparisonWithFullRangeConst(GenTreeOp* c
int64_t rhsMax;
if (cmp->gtGetOp2()->IsIntegralConst())
{
- rhsMin = cmp->gtGetOp2()->AsIntConCommon()->IntegralValue();
+ rhsMin = cmp->gtGetOp2()->AsIntCon()->IntegralValue();
rhsMax = rhsMin;
}
else
@@ -10516,8 +10514,8 @@ GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp)
assert(cmp->gtGetOp2()->IsIntegralConst());
assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2()));
- GenTree* op1 = cmp->gtGetOp1();
- GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon();
+ GenTree* op1 = cmp->gtGetOp1();
+ GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon();
assert(genActualType(op1) == genActualType(op2));
@@ -11132,7 +11130,7 @@ GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add)
if (op2->IsIntegralConst(0) && (genActualType(add) == genActualType(op1)))
{
// Keep the offset nodes with annotations for value numbering purposes.
- if (!op2->IsCnsIntOrI() || (op2->AsIntCon()->gtFieldSeq == nullptr))
+ if (op2->AsIntCon()->gtFieldSeq == nullptr)
{
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(add);
@@ -11148,7 +11146,7 @@ GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add)
{
// Reduce local addresses: "ADD(LCL_ADDR, OFFSET)" => "LCL_FLD_ADDR".
//
- if (op1->OperIs(GT_LCL_ADDR) && op2->IsCnsIntOrI())
+ if (op1->OperIs(GT_LCL_ADDR) && op2->IsIntegralConst())
{
GenTreeLclVarCommon* lclAddrNode = op1->AsLclVarCommon();
GenTreeIntCon* offsetNode = op2->AsIntCon();
@@ -11273,15 +11271,15 @@ GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul)
// MUL(NEG(a), C) => MUL(a, NEG(C))
if (opts.OptimizationEnabled() && op1->OperIs(GT_NEG) && !op2->IsIconHandle())
{
- mul->gtOp1 = op1->AsUnOp()->gtGetOp1();
- op2->AsIntCon()->gtIconVal = -op2->AsIntCon()->gtIconVal;
+ mul->gtOp1 = op1->AsUnOp()->gtGetOp1();
+ op2->AsIntCon()->SetIconValue(-op2->AsIntCon()->IconValue());
fgUpdateConstTreeValueNumber(op2);
DEBUG_DESTROY_NODE(op1);
op1 = mul->gtOp1;
}
- ssize_t mult = op2->AsIntConCommon()->IconValue();
+ ssize_t mult = op2->AsIntCon()->IconValue();
if (mult == 0)
{
@@ -11330,7 +11328,7 @@ GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul)
}
// Change the multiplication into a shift by log2(val) bits.
- op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult));
+ op2->AsIntCon()->SetIconValue(genLog2(abs_mult));
changeToShift = true;
}
else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit))
@@ -11353,7 +11351,7 @@ GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul)
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
- op2->AsIntConCommon()->SetIconValue(shift);
+ op2->AsIntCon()->SetIconValue(shift);
changeToShift = true;
}
}
@@ -11485,7 +11483,7 @@ GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp)
auto isUpperZero = [this](GenTree* op) {
if (op->IsIntegralConst())
{
- int64_t lng = op->AsIntConCommon()->LngValue();
+ int64_t lng = op->AsIntCon()->LngValue();
return (lng >= 0) && (lng <= UINT_MAX);
}
@@ -11511,7 +11509,7 @@ GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp)
auto transform = [this](GenTree** use) {
if ((*use)->IsIntegralConst())
{
- (*use)->BashToConst(static_cast((*use)->AsIntConCommon()->LngValue()));
+ (*use)->BashToConst(static_cast((*use)->AsIntCon()->LngValue()));
fgUpdateConstTreeValueNumber(*use);
}
else
@@ -11873,7 +11871,7 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree, bool* optAssertionPropD
/* Check for the case "(val + icon) * icon" */
- if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD)
+ if (op2->IsCnsIntOrI() && op1->gtOper == GT_ADD)
{
GenTree* add = op1->AsOp()->gtOp2;
@@ -11884,8 +11882,8 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree, bool* optAssertionPropD
break;
}
- ssize_t imul = op2->AsIntCon()->gtIconVal;
- ssize_t iadd = add->AsIntCon()->gtIconVal;
+ ssize_t imul = op2->AsIntCon()->IconValue();
+ ssize_t iadd = add->AsIntCon()->IconValue();
/* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */
@@ -11928,8 +11926,8 @@ GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree, bool* optAssertionPropD
if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0))
{
- ssize_t ishf = op2->AsIntConCommon()->IconValue();
- ssize_t iadd = cns->AsIntConCommon()->IconValue();
+ ssize_t ishf = op2->AsIntCon()->IconValue();
+ ssize_t iadd = cns->AsIntCon()->IconValue();
// printf("Changing '(val+icon1)<gtType = op1->gtType;
- op2->AsIntConCommon()->SetValueTruncating(iadd << ishf);
+ op2->AsIntCon()->SetValueTruncating(iadd << ishf);
op1->ChangeOper(GT_LSH);
- cns->AsIntConCommon()->SetIconValue(ishf);
+ cns->AsIntCon()->SetIconValue(ishf);
}
}
@@ -12127,7 +12125,7 @@ GenTree* Compiler::fgMorphModToZero(GenTreeOp* tree)
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
- op2->AsIntConCommon()->SetIntegralValue(0);
+ op2->AsIntCon()->SetIntegralValue(0);
fgUpdateConstTreeValueNumber(op2);
GenTree* const zero = op2;
@@ -12310,7 +12308,7 @@ GenTree* Compiler::fgMorphUModToAndSub(GenTreeOp* tree)
const var_types type = tree->TypeGet();
- const size_t cnsValue = (static_cast(tree->gtOp2->AsIntConCommon()->IntegralValue())) - 1;
+ const size_t cnsValue = (static_cast(tree->gtOp2->AsIntCon()->IntegralValue())) - 1;
GenTree* const newTree = gtNewOperNode(GT_AND, type, tree->gtOp1, gtNewIconNode(cnsValue, type));
INDEBUG(newTree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
@@ -12444,7 +12442,7 @@ GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree)
{
if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
- leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal;
+ leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->IconValue();
leftShiftIndex = leftShiftIndex->gtGetOp1();
}
else
@@ -12457,7 +12455,7 @@ GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree)
{
if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
- rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal;
+ rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->IconValue();
rightShiftIndex = rightShiftIndex->gtGetOp1();
}
else
@@ -12497,7 +12495,7 @@ GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree)
{
if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI())
{
- if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize)
+ if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->IconValue() == rotatedValueBitSize)
{
if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG)
{
@@ -12530,7 +12528,8 @@ GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree)
}
else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI()))
{
- if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize)
+ if (leftShiftIndex->AsIntCon()->IconValue() + rightShiftIndex->AsIntCon()->IconValue() ==
+ rotatedValueBitSize)
{
// We found this pattern:
// (x << c1) | (x >>> c2)
@@ -13132,7 +13131,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
/* Yupee - we folded the conditional!
* Remove the conditional statement */
- noway_assert(cond->gtOper == GT_CNS_INT);
+ noway_assert(cond->IsCnsIntOrI());
noway_assert((block->Next()->countOfInEdges() > 0) && (block->GetJumpDest()->countOfInEdges() > 0));
if (condTree != cond)
@@ -13155,7 +13154,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
BasicBlock* bTaken;
BasicBlock* bNotTaken;
- if (cond->AsIntCon()->gtIconVal != 0)
+ if (cond->AsIntCon()->IconValue() != 0)
{
/* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */
block->SetJumpKind(BBJ_ALWAYS);
@@ -13365,7 +13364,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
// Yupee - we folded the conditional!
// Remove the conditional statement
- noway_assert(cond->gtOper == GT_CNS_INT);
+ noway_assert(cond->IsCnsIntOrI());
if (condTree != cond)
{
@@ -13384,7 +13383,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
// modify the flow graph
// Find the actual jump target
- size_t switchVal = (size_t)cond->AsIntCon()->gtIconVal;
+ size_t switchVal = (size_t)cond->AsIntCon()->IconValue();
unsigned jumpCnt = block->GetJumpSwt()->bbsCount;
BasicBlock** jumpTab = block->GetJumpSwt()->bbsDstTab;
bool foundVal = false;
diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp
index 75e19184cfcf9..c9f09d3123d8a 100644
--- a/src/coreclr/jit/morphblock.cpp
+++ b/src/coreclr/jit/morphblock.cpp
@@ -379,7 +379,7 @@ void MorphInitBlockHelper::TryInitFieldByField()
GenTree* initVal = m_src->OperIsInitVal() ? m_src->gtGetOp1() : m_src;
- if (!initVal->OperIs(GT_CNS_INT))
+ if (!initVal->IsCnsIntOrI())
{
JITDUMP(" source is not constant.\n");
return;
@@ -1541,7 +1541,7 @@ GenTree* Compiler::fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree)
if (tree->gtDynamicSize->IsIntegralConst())
{
- int64_t size = tree->gtDynamicSize->AsIntConCommon()->IntegralValue();
+ int64_t size = tree->gtDynamicSize->AsIntCon()->IntegralValue();
if ((size != 0) && FitsIn(size))
{
diff --git a/src/coreclr/jit/optcse.cpp b/src/coreclr/jit/optcse.cpp
index 9ad785a1257dc..13e58634e51c9 100644
--- a/src/coreclr/jit/optcse.cpp
+++ b/src/coreclr/jit/optcse.cpp
@@ -480,7 +480,7 @@ unsigned Compiler::optValnumCSE_Index(GenTree* tree, Statement* stmt)
// We don't share small offset constants when they require a reloc
// Also, we don't share non-null const gc handles
//
- if (!tree->AsIntConCommon()->ImmedValNeedsReloc(this) && ((tree->IsIntegralConst(0)) || !varTypeIsGC(tree)))
+ if (!tree->AsIntCon()->ImmedValNeedsReloc(this) && ((tree->IsIntegralConst(0)) || !varTypeIsGC(tree)))
{
// Here we make constants that have the same upper bits use the same key
//
@@ -523,7 +523,7 @@ unsigned Compiler::optValnumCSE_Index(GenTree* tree, Statement* stmt)
if (hashDsc->csdHashKey == key)
{
// Check for mismatched types on GT_CNS_INT nodes
- if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() != hashDsc->csdTree->TypeGet()))
+ if ((tree->IsCnsIntOrI()) && (tree->TypeGet() != hashDsc->csdTree->TypeGet()))
{
continue;
}
@@ -3536,11 +3536,15 @@ bool Compiler::optIsCSEcandidate(GenTree* tree)
return (tree->AsOp()->gtOp1->gtOper != GT_ARR_ELEM);
- case GT_CNS_LNG:
+ case GT_CNS_INT:
#ifndef TARGET_64BIT
- return false; // Don't CSE 64-bit constants on 32-bit platforms
+ if (tree->TypeIs(TYP_LONG))
+ {
+ return false; // Don't CSE 64-bit constants on 32-bit platforms
+ }
#endif
- case GT_CNS_INT:
+ FALLTHROUGH;
+
case GT_CNS_DBL:
case GT_CNS_STR:
case GT_CNS_VEC:
diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp
index 3c55cef198ab5..2de4ed9c3062a 100644
--- a/src/coreclr/jit/optimizebools.cpp
+++ b/src/coreclr/jit/optimizebools.cpp
@@ -1021,7 +1021,7 @@ Statement* OptBoolsDsc::optOptimizeBoolsChkBlkCond()
}
// The third block is Return with "CNS_INT int 0/1"
- if (testTree3->AsOp()->gtOp1->gtOper != GT_CNS_INT)
+ if (!testTree3->AsOp()->gtOp1->IsCnsIntOrI())
{
return nullptr;
}
@@ -1126,9 +1126,9 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees()
if (optReturnBlock)
{
// Update tree when m_b1 is BBJ_COND and m_b2 and m_b3 are GT_RETURN (BBJ_RETURN)
- t1Comp->AsOp()->gtOp2->AsIntCon()->gtIconVal = 0;
- m_testInfo1.testTree->gtOper = GT_RETURN;
- m_testInfo1.testTree->gtType = m_testInfo2.testTree->gtType;
+ t1Comp->AsOp()->gtOp2->AsIntCon()->SetIconValue(0);
+ m_testInfo1.testTree->gtOper = GT_RETURN;
+ m_testInfo1.testTree->gtType = m_testInfo2.testTree->gtType;
// Update the return count of flow graph
assert(m_comp->fgReturnCount >= 2);
@@ -1336,9 +1336,9 @@ bool OptBoolsDsc::optOptimizeBoolsReturnBlock(BasicBlock* b3)
genTreeOps foldOp;
genTreeOps cmpOp;
- ssize_t it1val = m_testInfo1.compTree->AsOp()->gtOp2->AsIntCon()->gtIconVal;
- ssize_t it2val = m_testInfo2.compTree->AsOp()->gtOp2->AsIntCon()->gtIconVal;
- ssize_t it3val = m_t3->AsOp()->gtOp1->AsIntCon()->gtIconVal;
+ ssize_t it1val = m_testInfo1.compTree->AsOp()->gtOp2->AsIntCon()->IconValue();
+ ssize_t it2val = m_testInfo2.compTree->AsOp()->gtOp2->AsIntCon()->IconValue();
+ ssize_t it3val = m_t3->AsOp()->gtOp1->AsIntCon()->IconValue();
if (m_c1->gtOper == GT_LCL_VAR && m_c2->gtOper == GT_LCL_VAR &&
m_c1->AsLclVarCommon()->GetLclNum() == m_c2->AsLclVarCommon()->GetLclNum())
@@ -1540,7 +1540,7 @@ void OptBoolsDsc::optOptimizeBoolsGcStress()
// Comparand type is already checked, and we have const int, there is no harm
// morphing it into a TYP_I_IMPL.
- noway_assert(relop->AsOp()->gtOp2->gtOper == GT_CNS_INT);
+ noway_assert(relop->AsOp()->gtOp2->IsCnsIntOrI());
relop->AsOp()->gtOp2->gtType = TYP_I_IMPL;
// Recost/rethread the tree if necessary
@@ -1597,7 +1597,7 @@ GenTree* OptBoolsDsc::optIsBoolComp(OptTestInfo* pOptTest)
GenTree* opr1 = cond->AsOp()->gtOp1;
GenTree* opr2 = cond->AsOp()->gtOp2;
- if (opr2->gtOper != GT_CNS_INT)
+ if (!opr2->IsCnsIntOrI())
{
return nullptr;
}
@@ -1607,7 +1607,7 @@ GenTree* OptBoolsDsc::optIsBoolComp(OptTestInfo* pOptTest)
return nullptr;
}
- ssize_t ival2 = opr2->AsIntCon()->gtIconVal;
+ ssize_t ival2 = opr2->AsIntCon()->IconValue();
// Is the value a boolean?
// We can either have a boolean expression (marked GTF_BOOLEAN) or a constant 0/1.
@@ -1616,7 +1616,7 @@ GenTree* OptBoolsDsc::optIsBoolComp(OptTestInfo* pOptTest)
{
pOptTest->isBool = true;
}
- else if ((opr1->gtOper == GT_CNS_INT) && (opr1->IsIntegralConst(0) || opr1->IsIntegralConst(1)))
+ else if (opr1->IsCnsIntOrI() && (opr1->IsIntegralConst(0) || opr1->IsIntegralConst(1)))
{
pOptTest->isBool = true;
}
@@ -1629,7 +1629,7 @@ GenTree* OptBoolsDsc::optIsBoolComp(OptTestInfo* pOptTest)
if (pOptTest->isBool)
{
m_comp->gtReverseCond(cond);
- opr2->AsIntCon()->gtIconVal = 0;
+ opr2->AsIntCon()->SetIconValue(0);
}
else
{
diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp
index 355e0f198d5a0..2a7566354dd5d 100644
--- a/src/coreclr/jit/optimizer.cpp
+++ b/src/coreclr/jit/optimizer.cpp
@@ -760,7 +760,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT
}
optLoopTable[loopInd].lpFlags |= LPFLG_CONST_INIT;
- optLoopTable[loopInd].lpConstInit = (int)initValue->AsIntCon()->gtIconVal;
+ optLoopTable[loopInd].lpConstInit = (int)initValue->AsIntCon()->IconValue();
optLoopTable[loopInd].lpInitBlock = initBlock;
return true;
@@ -830,7 +830,7 @@ bool Compiler::optCheckIterInLoopTest(unsigned loopInd, GenTree* test, unsigned
iterOp->gtFlags |= GTF_VAR_ITERATOR;
// Check what type of limit we have - constant, variable or arr-len.
- if (limitOp->gtOper == GT_CNS_INT)
+ if (limitOp->IsCnsIntOrI())
{
optLoopTable[loopInd].lpFlags |= LPFLG_CONST_LIMIT;
if ((limitOp->gtFlags & GTF_ICON_SIMD_COUNT) != 0)
@@ -922,7 +922,7 @@ unsigned Compiler::optIsLoopIncrTree(GenTree* incr)
// Increment should be by a const int.
// TODO-CQ: CLONE: allow variable increments.
- if ((incrVal->gtOper != GT_CNS_INT) || (incrVal->TypeGet() != TYP_INT))
+ if (!incrVal->IsCnsIntOrI() || (incrVal->TypeGet() != TYP_INT))
{
return BAD_VAR_NUM;
}
@@ -1029,7 +1029,7 @@ bool Compiler::optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTes
GenTree* opr2 = relop->AsOp()->gtOp2;
// Make sure we have jtrue (vtmp != 0)
- if ((relop->OperGet() == GT_NE) && (opr1->OperGet() == GT_LCL_VAR) && (opr2->OperGet() == GT_CNS_INT) &&
+ if ((relop->OperGet() == GT_NE) && (opr1->OperGet() == GT_LCL_VAR) && (opr2->IsCnsIntOrI()) &&
opr2->IsIntegralConst(0))
{
// Get the previous statement to get the def (rhs) of Vtmp to see
@@ -4261,13 +4261,13 @@ PhaseStatus Compiler::optUnrollLoops()
if (!init->OperIs(GT_STORE_LCL_VAR) ||
(init->AsLclVar()->GetLclNum() != lvar) ||
!init->AsLclVar()->Data()->IsCnsIntOrI() ||
- (init->AsLclVar()->Data()->AsIntCon()->gtIconVal != lbeg) ||
+ (init->AsLclVar()->Data()->AsIntCon()->IconValue() != lbeg) ||
!((incr->gtOper == GT_ADD) || (incr->gtOper == GT_SUB)) ||
(incr->AsOp()->gtOp1->gtOper != GT_LCL_VAR) ||
(incr->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != lvar) ||
- (incr->AsOp()->gtOp2->gtOper != GT_CNS_INT) ||
- (incr->AsOp()->gtOp2->AsIntCon()->gtIconVal != iterInc) ||
+ !incr->AsOp()->gtOp2->IsCnsIntOrI() ||
+ (incr->AsOp()->gtOp2->AsIntCon()->IconValue() != iterInc) ||
(testStmt->GetRootNode()->gtOper != GT_JTRUE))
{
@@ -5733,62 +5733,10 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu
switch (oper)
{
/* Constants can usually be narrowed by changing their value */
- CLANG_FORMAT_COMMENT_ANCHOR;
-
-#ifndef TARGET_64BIT
- __int64 lval;
- __int64 lmask;
-
- case GT_CNS_LNG:
- lval = tree->AsIntConCommon()->LngValue();
- lmask = 0;
-
- switch (dstt)
- {
- case TYP_BYTE:
- lmask = 0x0000007F;
- break;
- case TYP_UBYTE:
- lmask = 0x000000FF;
- break;
- case TYP_SHORT:
- lmask = 0x00007FFF;
- break;
- case TYP_USHORT:
- lmask = 0x0000FFFF;
- break;
- case TYP_INT:
- lmask = 0x7FFFFFFF;
- break;
- case TYP_UINT:
- lmask = 0xFFFFFFFF;
- break;
-
- default:
- return false;
- }
-
- if ((lval & lmask) != lval)
- return false;
-
- if (doit)
- {
- tree->BashToConst(static_cast(lval));
- if (vnStore != nullptr)
- {
- fgValueNumberTreeConst(tree);
- }
- }
-
- return true;
-#endif
-
case GT_CNS_INT:
-
- ssize_t ival;
- ival = tree->AsIntCon()->gtIconVal;
- ssize_t imask;
- imask = 0;
+ {
+ int64_t value = tree->AsIntCon()->IntegralValue();
+ int64_t imask = 0;
switch (dstt)
{
@@ -5804,36 +5752,30 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu
case TYP_USHORT:
imask = 0x0000FFFF;
break;
-#ifdef TARGET_64BIT
case TYP_INT:
imask = 0x7FFFFFFF;
break;
case TYP_UINT:
imask = 0xFFFFFFFF;
break;
-#endif // TARGET_64BIT
default:
return false;
}
- if ((ival & imask) != ival)
+ if ((value & imask) != value)
{
return false;
}
-#ifdef TARGET_64BIT
if (doit)
{
- tree->gtType = TYP_INT;
- tree->AsIntCon()->gtIconVal = (int)ival;
- if (vnStore != nullptr)
- {
- fgValueNumberTreeConst(tree);
- }
+ tree->gtType = TYP_INT;
+ tree->AsIntCon()->SetIconValue((int)value);
+ fgUpdateConstTreeValueNumber(tree);
}
-#endif // TARGET_64BIT
return true;
+ }
/* Operands that are in memory can usually be narrowed
simply by changing their gtType */
@@ -5879,7 +5821,7 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu
// If 'dstt' is unsigned and one of the operands can be narrowed into 'dsst',
// the result of the GT_AND will also fit into 'dstt' and can be narrowed.
// The same is true if one of the operands is an int const and can be narrowed into 'dsst'.
- if (!gtIsActiveCSE_Candidate(op2) && ((op2->gtOper == GT_CNS_INT) || varTypeIsUnsigned(dstt)))
+ if (!gtIsActiveCSE_Candidate(op2) && ((op2->IsCnsIntOrI()) || varTypeIsUnsigned(dstt)))
{
if (optNarrowTree(op2, srct, dstt, NoVNPair, false))
{
@@ -5893,7 +5835,7 @@ bool Compiler::optNarrowTree(GenTree* tree, var_types srct, var_types dstt, Valu
}
if ((opToNarrow == nullptr) && !gtIsActiveCSE_Candidate(op1) &&
- ((op1->gtOper == GT_CNS_INT) || varTypeIsUnsigned(dstt)))
+ ((op1->IsCnsIntOrI()) || varTypeIsUnsigned(dstt)))
{
if (optNarrowTree(op1, srct, dstt, NoVNPair, false))
{
@@ -9002,7 +8944,7 @@ ssize_t Compiler::optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEB
assert(mul->gtOper == GT_MUL || mul->gtOper == GT_LSH);
assert(mul->AsOp()->gtOp2->IsCnsIntOrI());
- ssize_t scale = mul->AsOp()->gtOp2->AsIntConCommon()->IconValue();
+ ssize_t scale = mul->AsOp()->gtOp2->AsIntCon()->IconValue();
if (mul->gtOper == GT_LSH)
{
@@ -9017,7 +8959,7 @@ ssize_t Compiler::optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEB
// When index->gtOper is GT_MUL and index->AsOp()->gtOp2->gtOper is GT_CNS_INT (i.e. * 5),
// we can bump up the scale from 4 to 5*4, and then change index to index->AsOp()->gtOp1.
// Otherwise, we cannot optimize it. We will simply keep the original scale and index.
- scale *= index->AsOp()->gtOp2->AsIntConCommon()->IconValue();
+ scale *= index->AsOp()->gtOp2->AsIntCon()->IconValue();
index = index->AsOp()->gtOp1;
}
diff --git a/src/coreclr/jit/rangecheck.cpp b/src/coreclr/jit/rangecheck.cpp
index 89720f6c7748b..08dcdf13a1d14 100644
--- a/src/coreclr/jit/rangecheck.cpp
+++ b/src/coreclr/jit/rangecheck.cpp
@@ -434,7 +434,7 @@ bool RangeCheck::IsBinOpMonotonicallyIncreasing(GenTreeOp* binop)
return IsMonotonicallyIncreasing(op1, true) && IsMonotonicallyIncreasing(op2, true);
case GT_CNS_INT:
- if (op2->AsIntConCommon()->IconValue() < 0)
+ if (op2->AsIntCon()->IconValue() < 0)
{
JITDUMP("Not monotonically increasing because of encountered negative constant\n");
return false;
diff --git a/src/coreclr/jit/rationalize.cpp b/src/coreclr/jit/rationalize.cpp
index 8eccc1a13208c..ca087b9c2009e 100644
--- a/src/coreclr/jit/rationalize.cpp
+++ b/src/coreclr/jit/rationalize.cpp
@@ -157,8 +157,8 @@ void Rationalizer::RewriteSubLshDiv(GenTree** use)
if (a->OperIs(GT_LCL_VAR) && cns->IsIntegralConstPow2() &&
op1->AsLclVar()->GetLclNum() == a->AsLclVar()->GetLclNum())
{
- size_t shiftValue = shift->AsIntConCommon()->IntegralValue();
- size_t cnsValue = cns->AsIntConCommon()->IntegralValue();
+ size_t shiftValue = shift->AsIntCon()->IntegralValue();
+ size_t cnsValue = cns->AsIntCon()->IntegralValue();
if ((cnsValue >> shiftValue) == 1)
{
node->ChangeOper(GT_MOD);
diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp
index d69730ad520ed..84428c8dde703 100644
--- a/src/coreclr/jit/simd.cpp
+++ b/src/coreclr/jit/simd.cpp
@@ -613,8 +613,8 @@ bool Compiler::areArrayElementsContiguous(GenTree* op1, GenTree* op2)
GenTree* op1IndexNode = op1IndexAddr->Index();
GenTree* op2IndexNode = op2IndexAddr->Index();
- if ((op1IndexNode->OperGet() == GT_CNS_INT && op2IndexNode->OperGet() == GT_CNS_INT) &&
- (op1IndexNode->AsIntCon()->gtIconVal + 1 == op2IndexNode->AsIntCon()->gtIconVal))
+ if ((op1IndexNode->IsCnsIntOrI() && op2IndexNode->IsCnsIntOrI()) &&
+ (op1IndexNode->AsIntCon()->IconValue() + 1 == op2IndexNode->AsIntCon()->IconValue()))
{
if (op1ArrayRef->OperIs(GT_IND) && op2ArrayRef->OperIs(GT_IND))
{
@@ -718,7 +718,7 @@ GenTree* Compiler::CreateAddressNodeForSimdHWIntrinsicCreate(GenTree* tree, var_
GenTree* index = addr->AsIndexAddr()->Index();
assert(index->IsCnsIntOrI());
- unsigned indexVal = (unsigned)index->AsIntCon()->gtIconVal;
+ unsigned indexVal = (unsigned)index->AsIntCon()->IconValue();
unsigned offset = indexVal * genTypeSize(tree->TypeGet());
// Generate the boundary check exception.
diff --git a/src/coreclr/jit/simdcodegenxarch.cpp b/src/coreclr/jit/simdcodegenxarch.cpp
index d0fb1bf0aef4e..8d9997ed4800e 100644
--- a/src/coreclr/jit/simdcodegenxarch.cpp
+++ b/src/coreclr/jit/simdcodegenxarch.cpp
@@ -71,7 +71,7 @@ void CodeGen::genStoreIndTypeSimd12(GenTreeStoreInd* treeNode)
}
else if (addr->IsCnsIntOrI() && addr->isContained())
{
- GenTreeIntConCommon* icon = addr->AsIntConCommon();
+ GenTreeIntCon* icon = addr->AsIntCon();
assert(!icon->ImmedValNeedsReloc(compiler));
icon->SetIconValue(icon->IconValue() + 8);
}
@@ -152,7 +152,7 @@ void CodeGen::genLoadIndTypeSimd12(GenTreeIndir* treeNode)
}
else if (addr->IsCnsIntOrI() && addr->isContained())
{
- GenTreeIntConCommon* icon = addr->AsIntConCommon();
+ GenTreeIntCon* icon = addr->AsIntCon();
assert(!icon->ImmedValNeedsReloc(compiler));
icon->SetIconValue(icon->IconValue() + 8);
}
@@ -188,7 +188,7 @@ void CodeGen::genLoadIndTypeSimd12(GenTreeIndir* treeNode)
}
else if (addr->IsCnsIntOrI() && addr->isContained())
{
- GenTreeIntConCommon* icon = addr->AsIntConCommon();
+ GenTreeIntCon* icon = addr->AsIntCon();
icon->SetIconValue(icon->IconValue() - 8);
}
else
diff --git a/src/coreclr/jit/utils.h b/src/coreclr/jit/utils.h
index ba044c7b9cc5d..3f55108a47d9b 100644
--- a/src/coreclr/jit/utils.h
+++ b/src/coreclr/jit/utils.h
@@ -1016,6 +1016,13 @@ bool FitsIn(var_types type, T value)
return FitsIn(value);
case TYP_ULONG:
return FitsIn(value);
+ case TYP_REF:
+ case TYP_BYREF:
+#ifdef TARGET_64BIT
+ return FitsIn(value);
+#else
+ return FitsIn(value);
+#endif
default:
unreached();
}
diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp
index aea80a8ca48ed..0c61ad7a38b93 100644
--- a/src/coreclr/jit/valuenum.cpp
+++ b/src/coreclr/jit/valuenum.cpp
@@ -5480,7 +5480,7 @@ FieldSeq* ValueNumStore::FieldSeqVNToFieldSeq(ValueNum vn)
ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, GenTree* opB)
{
- if (opB->OperGet() == GT_CNS_INT)
+ if (opB->IsCnsIntOrI())
{
return ExtendPtrVN(opA, opB->AsIntCon()->gtFieldSeq, opB->AsIntCon()->IconValue());
}
@@ -10503,11 +10503,11 @@ void Compiler::fgValueNumberTreeConst(GenTree* tree)
}
else if ((typ == TYP_LONG) || (typ == TYP_ULONG))
{
- tree->gtVNPair.SetBoth(vnStore->VNForLongCon(INT64(tree->AsIntConCommon()->LngValue())));
+ tree->gtVNPair.SetBoth(vnStore->VNForLongCon(INT64(tree->AsIntCon()->LngValue())));
}
else
{
- tree->gtVNPair.SetBoth(vnStore->VNForIntCon(int(tree->AsIntConCommon()->IconValue())));
+ tree->gtVNPair.SetBoth(vnStore->VNForIntCon(int(tree->AsIntCon()->IconValue())));
}
if (tree->IsCnsIntOrI())
@@ -10580,7 +10580,7 @@ void Compiler::fgValueNumberTreeConst(GenTree* tree)
}
case TYP_REF:
- if (tree->AsIntConCommon()->IconValue() == 0)
+ if (tree->AsIntCon()->IconValue() == 0)
{
tree->gtVNPair.SetBoth(ValueNumStore::VNForNull());
}
@@ -10588,14 +10588,14 @@ void Compiler::fgValueNumberTreeConst(GenTree* tree)
{
assert(doesMethodHaveFrozenObjects());
tree->gtVNPair.SetBoth(
- vnStore->VNForHandle(ssize_t(tree->AsIntConCommon()->IconValue()), tree->GetIconHandleFlag()));
+ vnStore->VNForHandle(ssize_t(tree->AsIntCon()->IconValue()), tree->GetIconHandleFlag()));
fgValueNumberRegisterConstFieldSeq(tree->AsIntCon());
}
break;
case TYP_BYREF:
- if (tree->AsIntConCommon()->IconValue() == 0)
+ if (tree->AsIntCon()->IconValue() == 0)
{
tree->gtVNPair.SetBoth(ValueNumStore::VNForNull());
}
@@ -10606,13 +10606,13 @@ void Compiler::fgValueNumberTreeConst(GenTree* tree)
if (tree->IsIconHandle())
{
tree->gtVNPair.SetBoth(
- vnStore->VNForHandle(ssize_t(tree->AsIntConCommon()->IconValue()), tree->GetIconHandleFlag()));
+ vnStore->VNForHandle(ssize_t(tree->AsIntCon()->IconValue()), tree->GetIconHandleFlag()));
fgValueNumberRegisterConstFieldSeq(tree->AsIntCon());
}
else
{
- tree->gtVNPair.SetBoth(vnStore->VNForByrefCon((target_size_t)tree->AsIntConCommon()->IconValue()));
+ tree->gtVNPair.SetBoth(vnStore->VNForByrefCon((target_size_t)tree->AsIntCon()->IconValue()));
}
}
break;