Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ensure that Sse3.MoveAndDuplicate correctly tracks supporting SIMD scalar loads #97783

Merged
merged 1 commit into from
Jan 31, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 33 additions & 14 deletions src/coreclr/jit/lowerxarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7866,16 +7866,16 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre
case NI_AVX512F_BroadcastVector128ToVector512:
case NI_AVX512F_BroadcastVector256ToVector512:
{
assert(!supportsSIMDScalarLoads);

if (parentNode->OperIsMemoryLoad())
{
supportsGeneralLoads = !childNode->OperIsHWIntrinsic();
break;
}
else
{
supportsGeneralLoads = true;
break;
}

supportsGeneralLoads = true;
break;
}

case NI_SSE41_ConvertToVector128Int16:
Expand Down Expand Up @@ -7941,26 +7941,47 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre
}

case NI_SSE2_ConvertToVector128Double:
case NI_SSE3_MoveAndDuplicate:
case NI_AVX_ConvertToVector256Double:
case NI_AVX512F_ConvertToVector512Double:
case NI_AVX512F_VL_ConvertToVector128Double:
case NI_AVX512F_VL_ConvertToVector256Double:
{
assert(!supportsSIMDScalarLoads);

// Most instructions under the non-VEX encoding require aligned operands.
// Those used for Sse2.ConvertToVector128Double (CVTDQ2PD and CVTPS2PD)
// and Sse3.MoveAndDuplicate (MOVDDUP) are exceptions and don't fail for
// unaligned inputs as they read mem64 (half the vector width) instead
// are exceptions and don't fail for unaligned inputs as they read half
// the vector width instead

supportsAlignedSIMDLoads = !comp->opts.MinOpts();
supportsUnalignedSIMDLoads = true;

const unsigned expectedSize = genTypeSize(parentNode->TypeGet()) / 2;
const unsigned operandSize = genTypeSize(childNode->TypeGet());

// For broadcasts we can only optimize constants and memory operands
const bool broadcastIsContainable = childNode->OperIsConst() || childNode->isMemoryOp();
supportsGeneralLoads =
broadcastIsContainable && supportsUnalignedSIMDLoads && (operandSize >= expectedSize);
if (childNode->OperIsConst() || childNode->isMemoryOp())
{
// For broadcasts we can only optimize constants and memory operands
// since we're going from a smaller base type to a larger base type
supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize);
}
break;
}

case NI_SSE3_MoveAndDuplicate:
{
// Most instructions under the non-VEX encoding require aligned operands.
// Those used for Sse3.MoveAndDuplicate (MOVDDUP) are exceptions and don't
// fail for unaligned inputs as they read half the vector width instead

supportsAlignedSIMDLoads = !comp->opts.MinOpts();
supportsUnalignedSIMDLoads = true;

const unsigned expectedSize = genTypeSize(parentNode->TypeGet()) / 2;
const unsigned operandSize = genTypeSize(childNode->TypeGet());

supportsGeneralLoads = supportsUnalignedSIMDLoads && (operandSize >= expectedSize);
supportsSIMDScalarLoads = true;
break;
}

Expand All @@ -7986,8 +8007,6 @@ bool Lowering::IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* parentNode, GenTre
break;
}
}

assert(supportsSIMDScalarLoads == false);
break;
}

Expand Down
Loading