GlobalISel: Introduce GenericMachineInstr classes and derivatives for idiomatic LLVM RTTI.
This adds some level of type safety, allows helper functions to be added for
specific opcodes for free, and also allows us to succinctly check for class
membership with the usual dyn_cast/isa/cast functions.
To start off with, add variants for the different load/store operations with some
places using it.
Differential Revision: https://reviews.llvm.org/D105751
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 354cecf..7b4a957 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -893,53 +893,53 @@
case TargetOpcode::G_INSERT:
return narrowScalarInsert(MI, TypeIdx, NarrowTy);
case TargetOpcode::G_LOAD: {
- auto &MMO = **MI.memoperands_begin();
- Register DstReg = MI.getOperand(0).getReg();
+ auto &LoadMI = cast<GLoad>(MI);
+ Register DstReg = LoadMI.getDstReg();
LLT DstTy = MRI.getType(DstReg);
if (DstTy.isVector())
return UnableToLegalize;
- if (8 * MMO.getSize() != DstTy.getSizeInBits()) {
+ if (8 * LoadMI.getMemSize() != DstTy.getSizeInBits()) {
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
- MIRBuilder.buildLoad(TmpReg, MI.getOperand(1), MMO);
+ MIRBuilder.buildLoad(TmpReg, LoadMI.getPointerReg(), LoadMI.getMMO());
MIRBuilder.buildAnyExt(DstReg, TmpReg);
- MI.eraseFromParent();
+ LoadMI.eraseFromParent();
return Legalized;
}
- return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy);
+ return reduceLoadStoreWidth(LoadMI, TypeIdx, NarrowTy);
}
case TargetOpcode::G_ZEXTLOAD:
case TargetOpcode::G_SEXTLOAD: {
- bool ZExt = MI.getOpcode() == TargetOpcode::G_ZEXTLOAD;
- Register DstReg = MI.getOperand(0).getReg();
- Register PtrReg = MI.getOperand(1).getReg();
+ auto &LoadMI = cast<GExtLoad>(MI);
+ Register DstReg = LoadMI.getDstReg();
+ Register PtrReg = LoadMI.getPointerReg();
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
- auto &MMO = **MI.memoperands_begin();
+ auto &MMO = LoadMI.getMMO();
unsigned MemSize = MMO.getSizeInBits();
if (MemSize == NarrowSize) {
MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
} else if (MemSize < NarrowSize) {
- MIRBuilder.buildLoadInstr(MI.getOpcode(), TmpReg, PtrReg, MMO);
+ MIRBuilder.buildLoadInstr(LoadMI.getOpcode(), TmpReg, PtrReg, MMO);
} else if (MemSize > NarrowSize) {
// FIXME: Need to split the load.
return UnableToLegalize;
}
- if (ZExt)
+ if (isa<GZExtLoad>(LoadMI))
MIRBuilder.buildZExt(DstReg, TmpReg);
else
MIRBuilder.buildSExt(DstReg, TmpReg);
- MI.eraseFromParent();
+ LoadMI.eraseFromParent();
return Legalized;
}
case TargetOpcode::G_STORE: {
- const auto &MMO = **MI.memoperands_begin();
+ auto &StoreMI = cast<GStore>(MI);
- Register SrcReg = MI.getOperand(0).getReg();
+ Register SrcReg = StoreMI.getValueReg();
LLT SrcTy = MRI.getType(SrcReg);
if (SrcTy.isVector())
return UnableToLegalize;
@@ -950,16 +950,15 @@
if (SrcTy.isVector() && LeftoverBits != 0)
return UnableToLegalize;
- if (8 * MMO.getSize() != SrcTy.getSizeInBits()) {
+ if (8 * StoreMI.getMemSize() != SrcTy.getSizeInBits()) {
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
- auto &MMO = **MI.memoperands_begin();
MIRBuilder.buildTrunc(TmpReg, SrcReg);
- MIRBuilder.buildStore(TmpReg, MI.getOperand(1), MMO);
- MI.eraseFromParent();
+ MIRBuilder.buildStore(TmpReg, StoreMI.getPointerReg(), StoreMI.getMMO());
+ StoreMI.eraseFromParent();
return Legalized;
}
- return reduceLoadStoreWidth(MI, 0, NarrowTy);
+ return reduceLoadStoreWidth(StoreMI, 0, NarrowTy);
}
case TargetOpcode::G_SELECT:
return narrowScalarSelect(MI, TypeIdx, NarrowTy);
@@ -2797,13 +2796,12 @@
return UnableToLegalize;
}
-LegalizerHelper::LegalizeResult
-LegalizerHelper::lowerLoad(MachineInstr &MI) {
+LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
// Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT
- Register DstReg = MI.getOperand(0).getReg();
- Register PtrReg = MI.getOperand(1).getReg();
+ Register DstReg = LoadMI.getDstReg();
+ Register PtrReg = LoadMI.getPointerReg();
LLT DstTy = MRI.getType(DstReg);
- MachineMemOperand &MMO = **MI.memoperands_begin();
+ MachineMemOperand &MMO = LoadMI.getMMO();
LLT MemTy = MMO.getMemoryType();
MachineFunction &MF = MIRBuilder.getMF();
if (MemTy.isVector())
@@ -2829,11 +2827,10 @@
LoadReg = MRI.createGenericVirtualRegister(WideMemTy);
}
- if (MI.getOpcode() == TargetOpcode::G_SEXTLOAD) {
+ if (isa<GSExtLoad>(LoadMI)) {
auto NewLoad = MIRBuilder.buildLoad(LoadTy, PtrReg, *NewMMO);
MIRBuilder.buildSExtInReg(LoadReg, NewLoad, MemSizeInBits);
- } else if (MI.getOpcode() == TargetOpcode::G_ZEXTLOAD ||
- WideMemTy == DstTy) {
+ } else if (isa<GZExtLoad>(LoadMI) || WideMemTy == DstTy) {
auto NewLoad = MIRBuilder.buildLoad(LoadTy, PtrReg, *NewMMO);
// The extra bits are guaranteed to be zero, since we stored them that
// way. A zext load from Wide thus automatically gives zext from MemVT.
@@ -2845,7 +2842,7 @@
if (DstTy != LoadTy)
MIRBuilder.buildTrunc(DstReg, LoadReg);
- MI.eraseFromParent();
+ LoadMI.eraseFromParent();
return Legalized;
}
@@ -2875,23 +2872,22 @@
uint64_t SmallSplitSize = MemSizeInBits - LargeSplitSize;
MachineMemOperand *LargeMMO =
- MF.getMachineMemOperand(&MMO, 0, LargeSplitSize / 8);
- MachineMemOperand *SmallMMO = MF.getMachineMemOperand(
- &MMO, LargeSplitSize / 8, SmallSplitSize / 8);
+ MF.getMachineMemOperand(&MMO, 0, LargeSplitSize / 8);
+ MachineMemOperand *SmallMMO =
+ MF.getMachineMemOperand(&MMO, LargeSplitSize / 8, SmallSplitSize / 8);
LLT PtrTy = MRI.getType(PtrReg);
unsigned AnyExtSize = PowerOf2Ceil(DstTy.getSizeInBits());
LLT AnyExtTy = LLT::scalar(AnyExtSize);
- auto LargeLoad = MIRBuilder.buildLoadInstr(
- TargetOpcode::G_ZEXTLOAD, AnyExtTy, PtrReg, *LargeMMO);
+ auto LargeLoad = MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, AnyExtTy,
+ PtrReg, *LargeMMO);
- auto OffsetCst = MIRBuilder.buildConstant(
- LLT::scalar(PtrTy.getSizeInBits()), LargeSplitSize / 8);
+ auto OffsetCst = MIRBuilder.buildConstant(LLT::scalar(PtrTy.getSizeInBits()),
+ LargeSplitSize / 8);
Register PtrAddReg = MRI.createGenericVirtualRegister(PtrTy);
- auto SmallPtr =
- MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst);
- auto SmallLoad = MIRBuilder.buildLoadInstr(
- MI.getOpcode(), AnyExtTy, SmallPtr, *SmallMMO);
+ auto SmallPtr = MIRBuilder.buildPtrAdd(PtrAddReg, PtrReg, OffsetCst);
+ auto SmallLoad = MIRBuilder.buildLoadInstr(LoadMI.getOpcode(), AnyExtTy,
+ SmallPtr, *SmallMMO);
auto ShiftAmt = MIRBuilder.buildConstant(AnyExtTy, LargeSplitSize);
auto Shift = MIRBuilder.buildShl(AnyExtTy, SmallLoad, ShiftAmt);
@@ -2903,23 +2899,22 @@
MIRBuilder.buildTrunc(DstReg, {Or});
}
- MI.eraseFromParent();
+ LoadMI.eraseFromParent();
return Legalized;
}
-LegalizerHelper::LegalizeResult
-LegalizerHelper::lowerStore(MachineInstr &MI) {
+LegalizerHelper::LegalizeResult LegalizerHelper::lowerStore(GStore &StoreMI) {
// Lower a non-power of 2 store into multiple pow-2 stores.
// E.g. split an i24 store into an i16 store + i8 store.
// We do this by first extending the stored value to the next largest power
// of 2 type, and then using truncating stores to store the components.
// By doing this, likewise with G_LOAD, generate an extend that can be
// artifact-combined away instead of leaving behind extracts.
- Register SrcReg = MI.getOperand(0).getReg();
- Register PtrReg = MI.getOperand(1).getReg();
+ Register SrcReg = StoreMI.getValueReg();
+ Register PtrReg = StoreMI.getPointerReg();
LLT SrcTy = MRI.getType(SrcReg);
MachineFunction &MF = MIRBuilder.getMF();
- MachineMemOperand &MMO = **MI.memoperands_begin();
+ MachineMemOperand &MMO = **StoreMI.memoperands_begin();
LLT MemTy = MMO.getMemoryType();
if (SrcTy.isVector())
@@ -2945,7 +2940,7 @@
MachineMemOperand *NewMMO =
MF.getMachineMemOperand(&MMO, MMO.getPointerInfo(), WideTy);
MIRBuilder.buildStore(ZextInReg, PtrReg, *NewMMO);
- MI.eraseFromParent();
+ StoreMI.eraseFromParent();
return Legalized;
}
@@ -2976,7 +2971,7 @@
MF.getMachineMemOperand(&MMO, LargeSplitSize / 8, SmallSplitSize / 8);
MIRBuilder.buildStore(ExtVal, PtrReg, *LargeMMO);
MIRBuilder.buildStore(SmallVal, SmallPtr, *SmallMMO);
- MI.eraseFromParent();
+ StoreMI.eraseFromParent();
return Legalized;
}
@@ -3167,9 +3162,9 @@
case TargetOpcode::G_LOAD:
case TargetOpcode::G_SEXTLOAD:
case TargetOpcode::G_ZEXTLOAD:
- return lowerLoad(MI);
+ return lowerLoad(cast<GAnyLoad>(MI));
case TargetOpcode::G_STORE:
- return lowerStore(MI);
+ return lowerStore(cast<GStore>(MI));
case TargetOpcode::G_CTLZ_ZERO_UNDEF:
case TargetOpcode::G_CTTZ_ZERO_UNDEF:
case TargetOpcode::G_CTLZ:
@@ -3966,26 +3961,24 @@
}
LegalizerHelper::LegalizeResult
-LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
+LegalizerHelper::reduceLoadStoreWidth(GLoadStore &LdStMI, unsigned TypeIdx,
LLT NarrowTy) {
// FIXME: Don't know how to handle secondary types yet.
if (TypeIdx != 0)
return UnableToLegalize;
- MachineMemOperand *MMO = *MI.memoperands_begin();
-
// This implementation doesn't work for atomics. Give up instead of doing
// something invalid.
- if (MMO->isAtomic())
+ if (LdStMI.isAtomic())
return UnableToLegalize;
- bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
- Register ValReg = MI.getOperand(0).getReg();
- Register AddrReg = MI.getOperand(1).getReg();
+ bool IsLoad = isa<GLoad>(LdStMI);
+ Register ValReg = LdStMI.getReg(0);
+ Register AddrReg = LdStMI.getPointerReg();
LLT ValTy = MRI.getType(ValReg);
// FIXME: Do we need a distinct NarrowMemory legalize action?
- if (ValTy.getSizeInBits() != 8 * MMO->getSize()) {
+ if (ValTy.getSizeInBits() != 8 * LdStMI.getMemSize()) {
LLVM_DEBUG(dbgs() << "Can't narrow extload/truncstore\n");
return UnableToLegalize;
}
@@ -4016,6 +4009,7 @@
// is a load, return the new registers in ValRegs. For a store, each elements
// of ValRegs should be PartTy. Returns the next offset that needs to be
// handled.
+ auto MMO = LdStMI.getMMO();
auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs,
unsigned Offset) -> unsigned {
MachineFunction &MF = MIRBuilder.getMF();
@@ -4028,7 +4022,7 @@
MIRBuilder.materializePtrAdd(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
MachineMemOperand *NewMMO =
- MF.getMachineMemOperand(MMO, ByteOffset, PartTy);
+ MF.getMachineMemOperand(&MMO, ByteOffset, PartTy);
if (IsLoad) {
Register Dst = MRI.createGenericVirtualRegister(PartTy);
@@ -4053,7 +4047,7 @@
LeftoverTy, NarrowLeftoverRegs);
}
- MI.eraseFromParent();
+ LdStMI.eraseFromParent();
return Legalized;
}
@@ -4327,7 +4321,7 @@
return fewerElementsVectorExtractInsertVectorElt(MI, TypeIdx, NarrowTy);
case G_LOAD:
case G_STORE:
- return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy);
+ return reduceLoadStoreWidth(cast<GLoadStore>(MI), TypeIdx, NarrowTy);
case G_SEXT_INREG:
return fewerElementsVectorSextInReg(MI, TypeIdx, NarrowTy);
GISEL_VECREDUCE_CASES_NONSEQ