GlobalISel/Utils: Refactor integer/float constant match functions
Rework getConstantstVRegValWithLookThrough in order to make it clear if we
are matching integer/float constant only or any constant(default).
Add helper functions that get DefVReg and APInt/APFloat from constant instr
getIConstantVRegValWithLookThrough: integer constant, only G_CONSTANT
getFConstantVRegValWithLookThrough: float constant, only G_FCONSTANT
getAnyConstantVRegValWithLookThrough: either G_CONSTANT or G_FCONSTANT
Rename getConstantVRegVal and getConstantVRegSExtVal to getIConstantVRegVal
and getIConstantVRegSExtVal. These now only match G_CONSTANT as described
in comment.
Relevant matchers now return both DefVReg and APInt/APFloat.
Replace existing uses of getConstantstVRegValWithLookThrough and
getConstantVRegVal with new helper functions. Any constant match is
only required in:
ConstantFoldBinOp: for constant argument that was bit-cast of float to int
getAArch64VectorSplat: AArch64::G_DUP operands can be any constant
amdgpu select for G_BUILD_VECTOR_TRUNC: operands can be any constant
In other places use integer only constant match.
Differential Revision: https://reviews.llvm.org/D104409
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 2c99c11..65cbbf2 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -4075,9 +4075,7 @@
// If the index is a constant, we can really break this down as you would
// expect, and index into the target size pieces.
int64_t IdxVal;
- auto MaybeCst =
- getConstantVRegValWithLookThrough(Idx, MRI, /*LookThroughInstrs*/ true,
- /*HandleFConstants*/ false);
+ auto MaybeCst = getIConstantVRegValWithLookThrough(Idx, MRI);
if (MaybeCst) {
IdxVal = MaybeCst->Value.getSExtValue();
// Avoid out of bounds indexing the pieces.
@@ -4931,8 +4929,7 @@
const LLT HalfTy = LLT::scalar(NewBitSize);
const LLT CondTy = LLT::scalar(1);
- if (auto VRegAndVal =
- getConstantVRegValWithLookThrough(Amt, MRI, true, false)) {
+ if (auto VRegAndVal = getIConstantVRegValWithLookThrough(Amt, MRI)) {
return narrowScalarShiftByConstant(MI, VRegAndVal->Value, HalfTy,
ShiftAmtTy);
}
@@ -7536,7 +7533,7 @@
static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
MachineRegisterInfo &MRI = *MIB.getMRI();
unsigned NumBits = Ty.getScalarSizeInBits();
- auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
+ auto ValVRegAndVal = getIConstantVRegValWithLookThrough(Val, MRI);
if (!Ty.isVector() && ValVRegAndVal) {
APInt Scalar = ValVRegAndVal->Value.truncOrSelf(8);
APInt SplatVal = APInt::getSplat(NumBits, Scalar);
@@ -7590,7 +7587,7 @@
const auto &DstMMO = **MI.memoperands_begin();
MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
- auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
+ auto ValVRegAndVal = getIConstantVRegValWithLookThrough(Val, MRI);
bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0;
if (!findGISelOptimalMemOpLowering(MemOps, Limit,
@@ -7691,7 +7688,7 @@
bool IsVolatile = MemOp->isVolatile();
// See if this is a constant length copy
- auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI);
+ auto LenVRegAndVal = getIConstantVRegValWithLookThrough(Len, MRI);
// FIXME: support dynamically sized G_MEMCPY_INLINE
assert(LenVRegAndVal.hasValue() &&
"inline memcpy with dynamic size is not yet supported");
@@ -7954,7 +7951,7 @@
}
// See if this is a constant length copy
- auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI);
+ auto LenVRegAndVal = getIConstantVRegValWithLookThrough(Len, MRI);
if (!LenVRegAndVal)
return UnableToLegalize;
uint64_t KnownLen = LenVRegAndVal->Value.getZExtValue();