| //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// |
| // |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| // See https://llvm.org/LICENSE.txt for license information. |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| // |
| //===----------------------------------------------------------------------===// |
| /// \file |
| /// This file implements the targeting of the InstructionSelector class for |
| /// AMDGPU. |
| /// \todo This should be generated by TableGen. |
| //===----------------------------------------------------------------------===// |
| |
| #include "AMDGPUInstructionSelector.h" |
| #include "AMDGPU.h" |
| #include "AMDGPUGlobalISelUtils.h" |
| #include "AMDGPUInstrInfo.h" |
| #include "AMDGPURegisterBankInfo.h" |
| #include "AMDGPUTargetMachine.h" |
| #include "SIMachineFunctionInfo.h" |
| #include "Utils/AMDGPUBaseInfo.h" |
| #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" |
| #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" |
| #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" |
| #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" |
| #include "llvm/IR/DiagnosticInfo.h" |
| #include "llvm/IR/IntrinsicsAMDGPU.h" |
| |
| #define DEBUG_TYPE "amdgpu-isel" |
| |
| using namespace llvm; |
| using namespace MIPatternMatch; |
| |
| static cl::opt<bool> AllowRiskySelect( |
| "amdgpu-global-isel-risky-select", |
| cl::desc("Allow GlobalISel to select cases that are likely to not work yet"), |
| cl::init(false), |
| cl::ReallyHidden); |
| |
| #define GET_GLOBALISEL_IMPL |
| #define AMDGPUSubtarget GCNSubtarget |
| #include "AMDGPUGenGlobalISel.inc" |
| #undef GET_GLOBALISEL_IMPL |
| #undef AMDGPUSubtarget |
| |
| AMDGPUInstructionSelector::AMDGPUInstructionSelector( |
| const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, |
| const AMDGPUTargetMachine &TM) |
| : InstructionSelector(), TII(*STI.getInstrInfo()), |
| TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), |
| STI(STI), |
| EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG), |
| #define GET_GLOBALISEL_PREDICATES_INIT |
| #include "AMDGPUGenGlobalISel.inc" |
| #undef GET_GLOBALISEL_PREDICATES_INIT |
| #define GET_GLOBALISEL_TEMPORARIES_INIT |
| #include "AMDGPUGenGlobalISel.inc" |
| #undef GET_GLOBALISEL_TEMPORARIES_INIT |
| { |
| } |
| |
| const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; } |
| |
| void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB, |
| CodeGenCoverage &CoverageInfo, |
| ProfileSummaryInfo *PSI, |
| BlockFrequencyInfo *BFI) { |
| MRI = &MF.getRegInfo(); |
| Subtarget = &MF.getSubtarget<GCNSubtarget>(); |
| InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); |
| } |
| |
| bool AMDGPUInstructionSelector::isVCC(Register Reg, |
| const MachineRegisterInfo &MRI) const { |
| // The verifier is oblivious to s1 being a valid value for wavesize registers. |
| if (Reg.isPhysical()) |
| return false; |
| |
| auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); |
| const TargetRegisterClass *RC = |
| RegClassOrBank.dyn_cast<const TargetRegisterClass*>(); |
| if (RC) { |
| const LLT Ty = MRI.getType(Reg); |
| return RC->hasSuperClassEq(TRI.getBoolRC()) && |
| Ty.isValid() && Ty.getSizeInBits() == 1; |
| } |
| |
| const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>(); |
| return RB->getID() == AMDGPU::VCCRegBankID; |
| } |
| |
| bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI, |
| unsigned NewOpc) const { |
| MI.setDesc(TII.get(NewOpc)); |
| MI.RemoveOperand(1); // Remove intrinsic ID. |
| MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); |
| |
| MachineOperand &Dst = MI.getOperand(0); |
| MachineOperand &Src = MI.getOperand(1); |
| |
| // TODO: This should be legalized to s32 if needed |
| if (MRI->getType(Dst.getReg()) == LLT::scalar(1)) |
| return false; |
| |
| const TargetRegisterClass *DstRC |
| = TRI.getConstrainedRegClassForOperand(Dst, *MRI); |
| const TargetRegisterClass *SrcRC |
| = TRI.getConstrainedRegClassForOperand(Src, *MRI); |
| if (!DstRC || DstRC != SrcRC) |
| return false; |
| |
| return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) && |
| RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI); |
| } |
| |
| bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { |
| const DebugLoc &DL = I.getDebugLoc(); |
| MachineBasicBlock *BB = I.getParent(); |
| I.setDesc(TII.get(TargetOpcode::COPY)); |
| |
| const MachineOperand &Src = I.getOperand(1); |
| MachineOperand &Dst = I.getOperand(0); |
| Register DstReg = Dst.getReg(); |
| Register SrcReg = Src.getReg(); |
| |
| if (isVCC(DstReg, *MRI)) { |
| if (SrcReg == AMDGPU::SCC) { |
| const TargetRegisterClass *RC |
| = TRI.getConstrainedRegClassForOperand(Dst, *MRI); |
| if (!RC) |
| return true; |
| return RBI.constrainGenericRegister(DstReg, *RC, *MRI); |
| } |
| |
| if (!isVCC(SrcReg, *MRI)) { |
| // TODO: Should probably leave the copy and let copyPhysReg expand it. |
| if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI)) |
| return false; |
| |
| const TargetRegisterClass *SrcRC |
| = TRI.getConstrainedRegClassForOperand(Src, *MRI); |
| |
| Optional<ValueAndVReg> ConstVal = |
| getIConstantVRegValWithLookThrough(SrcReg, *MRI, true); |
| if (ConstVal) { |
| unsigned MovOpc = |
| STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; |
| BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg) |
| .addImm(ConstVal->Value.getBoolValue() ? -1 : 0); |
| } else { |
| Register MaskedReg = MRI->createVirtualRegister(SrcRC); |
| |
| // We can't trust the high bits at this point, so clear them. |
| |
| // TODO: Skip masking high bits if def is known boolean. |
| |
| unsigned AndOpc = |
| TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; |
| BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg) |
| .addImm(1) |
| .addReg(SrcReg); |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg) |
| .addImm(0) |
| .addReg(MaskedReg); |
| } |
| |
| if (!MRI->getRegClassOrNull(SrcReg)) |
| MRI->setRegClass(SrcReg, SrcRC); |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| const TargetRegisterClass *RC = |
| TRI.getConstrainedRegClassForOperand(Dst, *MRI); |
| if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) |
| return false; |
| |
| return true; |
| } |
| |
| for (const MachineOperand &MO : I.operands()) { |
| if (MO.getReg().isPhysical()) |
| continue; |
| |
| const TargetRegisterClass *RC = |
| TRI.getConstrainedRegClassForOperand(MO, *MRI); |
| if (!RC) |
| continue; |
| RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI); |
| } |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const { |
| const Register DefReg = I.getOperand(0).getReg(); |
| const LLT DefTy = MRI->getType(DefReg); |
| if (DefTy == LLT::scalar(1)) { |
| if (!AllowRiskySelect) { |
| LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n"); |
| return false; |
| } |
| |
| LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n"); |
| } |
| |
| // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy) |
| |
| const RegClassOrRegBank &RegClassOrBank = |
| MRI->getRegClassOrRegBank(DefReg); |
| |
| const TargetRegisterClass *DefRC |
| = RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); |
| if (!DefRC) { |
| if (!DefTy.isValid()) { |
| LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); |
| return false; |
| } |
| |
| const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); |
| DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI); |
| if (!DefRC) { |
| LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); |
| return false; |
| } |
| } |
| |
| // TODO: Verify that all registers have the same bank |
| I.setDesc(TII.get(TargetOpcode::PHI)); |
| return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI); |
| } |
| |
| MachineOperand |
| AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, |
| const TargetRegisterClass &SubRC, |
| unsigned SubIdx) const { |
| |
| MachineInstr *MI = MO.getParent(); |
| MachineBasicBlock *BB = MO.getParent()->getParent(); |
| Register DstReg = MRI->createVirtualRegister(&SubRC); |
| |
| if (MO.isReg()) { |
| unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); |
| Register Reg = MO.getReg(); |
| BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) |
| .addReg(Reg, 0, ComposedSubIdx); |
| |
| return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), |
| MO.isKill(), MO.isDead(), MO.isUndef(), |
| MO.isEarlyClobber(), 0, MO.isDebug(), |
| MO.isInternalRead()); |
| } |
| |
| assert(MO.isImm()); |
| |
| APInt Imm(64, MO.getImm()); |
| |
| switch (SubIdx) { |
| default: |
| llvm_unreachable("do not know to split immediate with this sub index."); |
| case AMDGPU::sub0: |
| return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); |
| case AMDGPU::sub1: |
| return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); |
| } |
| } |
| |
| static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) { |
| switch (Opc) { |
| case AMDGPU::G_AND: |
| return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; |
| case AMDGPU::G_OR: |
| return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32; |
| case AMDGPU::G_XOR: |
| return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32; |
| default: |
| llvm_unreachable("not a bit op"); |
| } |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const { |
| Register DstReg = I.getOperand(0).getReg(); |
| unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); |
| |
| const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
| if (DstRB->getID() != AMDGPU::SGPRRegBankID && |
| DstRB->getID() != AMDGPU::VCCRegBankID) |
| return false; |
| |
| bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID && |
| STI.isWave64()); |
| I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64))); |
| |
| // Dead implicit-def of scc |
| I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef |
| true, // isImp |
| false, // isKill |
| true)); // isDead |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const { |
| MachineBasicBlock *BB = I.getParent(); |
| MachineFunction *MF = BB->getParent(); |
| Register DstReg = I.getOperand(0).getReg(); |
| const DebugLoc &DL = I.getDebugLoc(); |
| LLT Ty = MRI->getType(DstReg); |
| if (Ty.isVector()) |
| return false; |
| |
| unsigned Size = Ty.getSizeInBits(); |
| const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
| const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID; |
| const bool Sub = I.getOpcode() == TargetOpcode::G_SUB; |
| |
| if (Size == 32) { |
| if (IsSALU) { |
| const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; |
| MachineInstr *Add = |
| BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) |
| .add(I.getOperand(1)) |
| .add(I.getOperand(2)); |
| I.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); |
| } |
| |
| if (STI.hasAddNoCarry()) { |
| const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64; |
| I.setDesc(TII.get(Opc)); |
| I.addOperand(*MF, MachineOperand::CreateImm(0)); |
| I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64; |
| |
| Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass()); |
| MachineInstr *Add |
| = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) |
| .addDef(UnusedCarry, RegState::Dead) |
| .add(I.getOperand(1)) |
| .add(I.getOperand(2)) |
| .addImm(0); |
| I.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); |
| } |
| |
| assert(!Sub && "illegal sub should not reach here"); |
| |
| const TargetRegisterClass &RC |
| = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass; |
| const TargetRegisterClass &HalfRC |
| = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass; |
| |
| MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0)); |
| MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0)); |
| MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1)); |
| MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1)); |
| |
| Register DstLo = MRI->createVirtualRegister(&HalfRC); |
| Register DstHi = MRI->createVirtualRegister(&HalfRC); |
| |
| if (IsSALU) { |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) |
| .add(Lo1) |
| .add(Lo2); |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) |
| .add(Hi1) |
| .add(Hi2); |
| } else { |
| const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass(); |
| Register CarryReg = MRI->createVirtualRegister(CarryRC); |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo) |
| .addDef(CarryReg) |
| .add(Lo1) |
| .add(Lo2) |
| .addImm(0); |
| MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi) |
| .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead) |
| .add(Hi1) |
| .add(Hi2) |
| .addReg(CarryReg, RegState::Kill) |
| .addImm(0); |
| |
| if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI)) |
| return false; |
| } |
| |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) |
| .addReg(DstLo) |
| .addImm(AMDGPU::sub0) |
| .addReg(DstHi) |
| .addImm(AMDGPU::sub1); |
| |
| |
| if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) |
| return false; |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE( |
| MachineInstr &I) const { |
| MachineBasicBlock *BB = I.getParent(); |
| MachineFunction *MF = BB->getParent(); |
| const DebugLoc &DL = I.getDebugLoc(); |
| Register Dst0Reg = I.getOperand(0).getReg(); |
| Register Dst1Reg = I.getOperand(1).getReg(); |
| const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO || |
| I.getOpcode() == AMDGPU::G_UADDE; |
| const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE || |
| I.getOpcode() == AMDGPU::G_USUBE; |
| |
| if (isVCC(Dst1Reg, *MRI)) { |
| unsigned NoCarryOpc = |
| IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; |
| unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; |
| I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc)); |
| I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); |
| I.addOperand(*MF, MachineOperand::CreateImm(0)); |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| Register Src0Reg = I.getOperand(2).getReg(); |
| Register Src1Reg = I.getOperand(3).getReg(); |
| |
| if (HasCarryIn) { |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) |
| .addReg(I.getOperand(4).getReg()); |
| } |
| |
| unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; |
| unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; |
| |
| BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg) |
| .add(I.getOperand(2)) |
| .add(I.getOperand(3)); |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg) |
| .addReg(AMDGPU::SCC); |
| |
| if (!MRI->getRegClassOrNull(Dst1Reg)) |
| MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass); |
| |
| if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) || |
| !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) || |
| !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI)) |
| return false; |
| |
| if (HasCarryIn && |
| !RBI.constrainGenericRegister(I.getOperand(4).getReg(), |
| AMDGPU::SReg_32RegClass, *MRI)) |
| return false; |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| // TODO: We should probably legalize these to only using 32-bit results. |
| bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const { |
| MachineBasicBlock *BB = I.getParent(); |
| Register DstReg = I.getOperand(0).getReg(); |
| Register SrcReg = I.getOperand(1).getReg(); |
| LLT DstTy = MRI->getType(DstReg); |
| LLT SrcTy = MRI->getType(SrcReg); |
| const unsigned SrcSize = SrcTy.getSizeInBits(); |
| unsigned DstSize = DstTy.getSizeInBits(); |
| |
| // TODO: Should handle any multiple of 32 offset. |
| unsigned Offset = I.getOperand(2).getImm(); |
| if (Offset % 32 != 0 || DstSize > 128) |
| return false; |
| |
| // 16-bit operations really use 32-bit registers. |
| // FIXME: Probably should not allow 16-bit G_EXTRACT results. |
| if (DstSize == 16) |
| DstSize = 32; |
| |
| const TargetRegisterClass *DstRC = |
| TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI); |
| if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) |
| return false; |
| |
| const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); |
| const TargetRegisterClass *SrcRC = |
| TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI); |
| if (!SrcRC) |
| return false; |
| unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32, |
| DstSize / 32); |
| SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg); |
| if (!SrcRC) |
| return false; |
| |
| SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I, |
| *SrcRC, I.getOperand(1)); |
| const DebugLoc &DL = I.getDebugLoc(); |
| BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg) |
| .addReg(SrcReg, 0, SubReg); |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const { |
| MachineBasicBlock *BB = MI.getParent(); |
| Register DstReg = MI.getOperand(0).getReg(); |
| LLT DstTy = MRI->getType(DstReg); |
| LLT SrcTy = MRI->getType(MI.getOperand(1).getReg()); |
| |
| const unsigned SrcSize = SrcTy.getSizeInBits(); |
| if (SrcSize < 32) |
| return selectImpl(MI, *CoverageInfo); |
| |
| const DebugLoc &DL = MI.getDebugLoc(); |
| const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); |
| const unsigned DstSize = DstTy.getSizeInBits(); |
| const TargetRegisterClass *DstRC = |
| TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); |
| if (!DstRC) |
| return false; |
| |
| ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8); |
| MachineInstrBuilder MIB = |
| BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg); |
| for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) { |
| MachineOperand &Src = MI.getOperand(I + 1); |
| MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef())); |
| MIB.addImm(SubRegs[I]); |
| |
| const TargetRegisterClass *SrcRC |
| = TRI.getConstrainedRegClassForOperand(Src, *MRI); |
| if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI)) |
| return false; |
| } |
| |
| if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) |
| return false; |
| |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const { |
| MachineBasicBlock *BB = MI.getParent(); |
| const int NumDst = MI.getNumOperands() - 1; |
| |
| MachineOperand &Src = MI.getOperand(NumDst); |
| |
| Register SrcReg = Src.getReg(); |
| Register DstReg0 = MI.getOperand(0).getReg(); |
| LLT DstTy = MRI->getType(DstReg0); |
| LLT SrcTy = MRI->getType(SrcReg); |
| |
| const unsigned DstSize = DstTy.getSizeInBits(); |
| const unsigned SrcSize = SrcTy.getSizeInBits(); |
| const DebugLoc &DL = MI.getDebugLoc(); |
| const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); |
| |
| const TargetRegisterClass *SrcRC = |
| TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI); |
| if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) |
| return false; |
| |
| // Note we could have mixed SGPR and VGPR destination banks for an SGPR |
| // source, and this relies on the fact that the same subregister indices are |
| // used for both. |
| ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8); |
| for (int I = 0, E = NumDst; I != E; ++I) { |
| MachineOperand &Dst = MI.getOperand(I); |
| BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg()) |
| .addReg(SrcReg, 0, SubRegs[I]); |
| |
| // Make sure the subregister index is valid for the source register. |
| SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]); |
| if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) |
| return false; |
| |
| const TargetRegisterClass *DstRC = |
| TRI.getConstrainedRegClassForOperand(Dst, *MRI); |
| if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI)) |
| return false; |
| } |
| |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC( |
| MachineInstr &MI) const { |
| if (selectImpl(MI, *CoverageInfo)) |
| return true; |
| |
| const LLT S32 = LLT::scalar(32); |
| const LLT V2S16 = LLT::fixed_vector(2, 16); |
| |
| Register Dst = MI.getOperand(0).getReg(); |
| if (MRI->getType(Dst) != V2S16) |
| return false; |
| |
| const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI); |
| if (DstBank->getID() != AMDGPU::SGPRRegBankID) |
| return false; |
| |
| Register Src0 = MI.getOperand(1).getReg(); |
| Register Src1 = MI.getOperand(2).getReg(); |
| if (MRI->getType(Src0) != S32) |
| return false; |
| |
| const DebugLoc &DL = MI.getDebugLoc(); |
| MachineBasicBlock *BB = MI.getParent(); |
| |
| auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true); |
| if (ConstSrc1) { |
| auto ConstSrc0 = |
| getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true); |
| if (ConstSrc0) { |
| const int64_t K0 = ConstSrc0->Value.getSExtValue(); |
| const int64_t K1 = ConstSrc1->Value.getSExtValue(); |
| uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff; |
| uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff; |
| |
| BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst) |
| .addImm(Lo16 | (Hi16 << 16)); |
| MI.eraseFromParent(); |
| return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); |
| } |
| } |
| |
| // TODO: This should probably be a combine somewhere |
| // (build_vector_trunc $src0, undef -> copy $src0 |
| MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI); |
| if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) { |
| MI.setDesc(TII.get(AMDGPU::COPY)); |
| MI.RemoveOperand(2); |
| return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) && |
| RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI); |
| } |
| |
| Register ShiftSrc0; |
| Register ShiftSrc1; |
| |
| // With multiple uses of the shift, this will duplicate the shift and |
| // increase register pressure. |
| // |
| // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16) |
| // => (S_PACK_HH_B32_B16 $src0, $src1) |
| // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16)) |
| // => (S_PACK_LH_B32_B16 $src0, $src1) |
| // (build_vector_trunc $src0, $src1) |
| // => (S_PACK_LL_B32_B16 $src0, $src1) |
| |
| bool Shift0 = mi_match( |
| Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16)))); |
| |
| bool Shift1 = mi_match( |
| Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16)))); |
| |
| unsigned Opc = AMDGPU::S_PACK_LL_B32_B16; |
| if (Shift0 && Shift1) { |
| Opc = AMDGPU::S_PACK_HH_B32_B16; |
| MI.getOperand(1).setReg(ShiftSrc0); |
| MI.getOperand(2).setReg(ShiftSrc1); |
| } else if (Shift1) { |
| Opc = AMDGPU::S_PACK_LH_B32_B16; |
| MI.getOperand(2).setReg(ShiftSrc1); |
| } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) { |
| // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16 |
| auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst) |
| .addReg(ShiftSrc0) |
| .addImm(16); |
| |
| MI.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
| } |
| |
| MI.setDesc(TII.get(Opc)); |
| return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const { |
| return selectG_ADD_SUB(I); |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const { |
| const MachineOperand &MO = I.getOperand(0); |
| |
| // FIXME: Interface for getConstrainedRegClassForOperand needs work. The |
| // regbank check here is to know why getConstrainedRegClassForOperand failed. |
| const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI); |
| if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) || |
| (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) { |
| I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const { |
| MachineBasicBlock *BB = I.getParent(); |
| |
| Register DstReg = I.getOperand(0).getReg(); |
| Register Src0Reg = I.getOperand(1).getReg(); |
| Register Src1Reg = I.getOperand(2).getReg(); |
| LLT Src1Ty = MRI->getType(Src1Reg); |
| |
| unsigned DstSize = MRI->getType(DstReg).getSizeInBits(); |
| unsigned InsSize = Src1Ty.getSizeInBits(); |
| |
| int64_t Offset = I.getOperand(3).getImm(); |
| |
| // FIXME: These cases should have been illegal and unnecessary to check here. |
| if (Offset % 32 != 0 || InsSize % 32 != 0) |
| return false; |
| |
| // Currently not handled by getSubRegFromChannel. |
| if (InsSize > 128) |
| return false; |
| |
| unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32); |
| if (SubReg == AMDGPU::NoSubRegister) |
| return false; |
| |
| const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); |
| const TargetRegisterClass *DstRC = |
| TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); |
| if (!DstRC) |
| return false; |
| |
| const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI); |
| const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI); |
| const TargetRegisterClass *Src0RC = |
| TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI); |
| const TargetRegisterClass *Src1RC = |
| TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI); |
| |
| // Deal with weird cases where the class only partially supports the subreg |
| // index. |
| Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg); |
| if (!Src0RC || !Src1RC) |
| return false; |
| |
| if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || |
| !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) || |
| !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI)) |
| return false; |
| |
| const DebugLoc &DL = I.getDebugLoc(); |
| BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg) |
| .addReg(Src0Reg) |
| .addReg(Src1Reg) |
| .addImm(SubReg); |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const { |
| Register DstReg = MI.getOperand(0).getReg(); |
| Register SrcReg = MI.getOperand(1).getReg(); |
| Register OffsetReg = MI.getOperand(2).getReg(); |
| Register WidthReg = MI.getOperand(3).getReg(); |
| |
| assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID && |
| "scalar BFX instructions are expanded in regbankselect"); |
| assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 && |
| "64-bit vector BFX instructions are expanded in regbankselect"); |
| |
| const DebugLoc &DL = MI.getDebugLoc(); |
| MachineBasicBlock *MBB = MI.getParent(); |
| |
| bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX; |
| unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; |
| auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg) |
| .addReg(SrcReg) |
| .addReg(OffsetReg) |
| .addReg(WidthReg); |
| MI.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
| } |
| |
| bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const { |
| if (STI.getLDSBankCount() != 16) |
| return selectImpl(MI, *CoverageInfo); |
| |
| Register Dst = MI.getOperand(0).getReg(); |
| Register Src0 = MI.getOperand(2).getReg(); |
| Register M0Val = MI.getOperand(6).getReg(); |
| if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) || |
| !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) || |
| !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI)) |
| return false; |
| |
| // This requires 2 instructions. It is possible to write a pattern to support |
| // this, but the generated isel emitter doesn't correctly deal with multiple |
| // output instructions using the same physical register input. The copy to m0 |
| // is incorrectly placed before the second instruction. |
| // |
| // TODO: Match source modifiers. |
| |
| Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| const DebugLoc &DL = MI.getDebugLoc(); |
| MachineBasicBlock *MBB = MI.getParent(); |
| |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
| .addReg(M0Val); |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov) |
| .addImm(2) |
| .addImm(MI.getOperand(4).getImm()) // $attr |
| .addImm(MI.getOperand(3).getImm()); // $attrchan |
| |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst) |
| .addImm(0) // $src0_modifiers |
| .addReg(Src0) // $src0 |
| .addImm(MI.getOperand(4).getImm()) // $attr |
| .addImm(MI.getOperand(3).getImm()) // $attrchan |
| .addImm(0) // $src2_modifiers |
| .addReg(InterpMov) // $src2 - 2 f16 values selected by high |
| .addImm(MI.getOperand(5).getImm()) // $high |
| .addImm(0) // $clamp |
| .addImm(0); // $omod |
| |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| // Writelane is special in that it can use SGPR and M0 (which would normally |
| // count as using the constant bus twice - but in this case it is allowed since |
| // the lane selector doesn't count as a use of the constant bus). However, it is |
| // still required to abide by the 1 SGPR rule. Fix this up if we might have |
| // multiple SGPRs. |
| bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const { |
| // With a constant bus limit of at least 2, there's no issue. |
| if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1) |
| return selectImpl(MI, *CoverageInfo); |
| |
| MachineBasicBlock *MBB = MI.getParent(); |
| const DebugLoc &DL = MI.getDebugLoc(); |
| Register VDst = MI.getOperand(0).getReg(); |
| Register Val = MI.getOperand(2).getReg(); |
| Register LaneSelect = MI.getOperand(3).getReg(); |
| Register VDstIn = MI.getOperand(4).getReg(); |
| |
| auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst); |
| |
| Optional<ValueAndVReg> ConstSelect = |
| getIConstantVRegValWithLookThrough(LaneSelect, *MRI); |
| if (ConstSelect) { |
| // The selector has to be an inline immediate, so we can use whatever for |
| // the other operands. |
| MIB.addReg(Val); |
| MIB.addImm(ConstSelect->Value.getSExtValue() & |
| maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2())); |
| } else { |
| Optional<ValueAndVReg> ConstVal = |
| getIConstantVRegValWithLookThrough(Val, *MRI); |
| |
| // If the value written is an inline immediate, we can get away without a |
| // copy to m0. |
| if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(), |
| STI.hasInv2PiInlineImm())) { |
| MIB.addImm(ConstVal->Value.getSExtValue()); |
| MIB.addReg(LaneSelect); |
| } else { |
| MIB.addReg(Val); |
| |
| // If the lane selector was originally in a VGPR and copied with |
| // readfirstlane, there's a hazard to read the same SGPR from the |
| // VALU. Constrain to a different SGPR to help avoid needing a nop later. |
| RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI); |
| |
| BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
| .addReg(LaneSelect); |
| MIB.addReg(AMDGPU::M0); |
| } |
| } |
| |
| MIB.addReg(VDstIn); |
| |
| MI.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
| } |
| |
| // We need to handle this here because tablegen doesn't support matching |
| // instructions with multiple outputs. |
| bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const { |
| Register Dst0 = MI.getOperand(0).getReg(); |
| Register Dst1 = MI.getOperand(1).getReg(); |
| |
| LLT Ty = MRI->getType(Dst0); |
| unsigned Opc; |
| if (Ty == LLT::scalar(32)) |
| Opc = AMDGPU::V_DIV_SCALE_F32_e64; |
| else if (Ty == LLT::scalar(64)) |
| Opc = AMDGPU::V_DIV_SCALE_F64_e64; |
| else |
| return false; |
| |
| // TODO: Match source modifiers. |
| |
| const DebugLoc &DL = MI.getDebugLoc(); |
| MachineBasicBlock *MBB = MI.getParent(); |
| |
| Register Numer = MI.getOperand(3).getReg(); |
| Register Denom = MI.getOperand(4).getReg(); |
| unsigned ChooseDenom = MI.getOperand(5).getImm(); |
| |
| Register Src0 = ChooseDenom != 0 ? Numer : Denom; |
| |
| auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0) |
| .addDef(Dst1) |
| .addImm(0) // $src0_modifiers |
| .addUse(Src0) // $src0 |
| .addImm(0) // $src1_modifiers |
| .addUse(Denom) // $src1 |
| .addImm(0) // $src2_modifiers |
| .addUse(Numer) // $src2 |
| .addImm(0) // $clamp |
| .addImm(0); // $omod |
| |
| MI.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const { |
| unsigned IntrinsicID = I.getIntrinsicID(); |
| switch (IntrinsicID) { |
| case Intrinsic::amdgcn_if_break: { |
| MachineBasicBlock *BB = I.getParent(); |
| |
| // FIXME: Manually selecting to avoid dealing with the SReg_1 trick |
| // SelectionDAG uses for wave32 vs wave64. |
| BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK)) |
| .add(I.getOperand(0)) |
| .add(I.getOperand(2)) |
| .add(I.getOperand(3)); |
| |
| Register DstReg = I.getOperand(0).getReg(); |
| Register Src0Reg = I.getOperand(2).getReg(); |
| Register Src1Reg = I.getOperand(3).getReg(); |
| |
| I.eraseFromParent(); |
| |
| for (Register Reg : { DstReg, Src0Reg, Src1Reg }) |
| MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); |
| |
| return true; |
| } |
| case Intrinsic::amdgcn_interp_p1_f16: |
| return selectInterpP1F16(I); |
| case Intrinsic::amdgcn_wqm: |
| return constrainCopyLikeIntrin(I, AMDGPU::WQM); |
| case Intrinsic::amdgcn_softwqm: |
| return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM); |
| case Intrinsic::amdgcn_strict_wwm: |
| case Intrinsic::amdgcn_wwm: |
| return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM); |
| case Intrinsic::amdgcn_strict_wqm: |
| return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM); |
| case Intrinsic::amdgcn_writelane: |
| return selectWritelane(I); |
| case Intrinsic::amdgcn_div_scale: |
| return selectDivScale(I); |
| case Intrinsic::amdgcn_icmp: |
| return selectIntrinsicIcmp(I); |
| case Intrinsic::amdgcn_ballot: |
| return selectBallot(I); |
| case Intrinsic::amdgcn_reloc_constant: |
| return selectRelocConstant(I); |
| case Intrinsic::amdgcn_groupstaticsize: |
| return selectGroupStaticSize(I); |
| case Intrinsic::returnaddress: |
| return selectReturnAddress(I); |
| default: |
| return selectImpl(I, *CoverageInfo); |
| } |
| } |
| |
| static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) { |
| if (Size != 32 && Size != 64) |
| return -1; |
| switch (P) { |
| default: |
| llvm_unreachable("Unknown condition code!"); |
| case CmpInst::ICMP_NE: |
| return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64; |
| case CmpInst::ICMP_EQ: |
| return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64; |
| case CmpInst::ICMP_SGT: |
| return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64; |
| case CmpInst::ICMP_SGE: |
| return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64; |
| case CmpInst::ICMP_SLT: |
| return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64; |
| case CmpInst::ICMP_SLE: |
| return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64; |
| case CmpInst::ICMP_UGT: |
| return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64; |
| case CmpInst::ICMP_UGE: |
| return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64; |
| case CmpInst::ICMP_ULT: |
| return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64; |
| case CmpInst::ICMP_ULE: |
| return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64; |
| } |
| } |
| |
| int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P, |
| unsigned Size) const { |
| if (Size == 64) { |
| if (!STI.hasScalarCompareEq64()) |
| return -1; |
| |
| switch (P) { |
| case CmpInst::ICMP_NE: |
| return AMDGPU::S_CMP_LG_U64; |
| case CmpInst::ICMP_EQ: |
| return AMDGPU::S_CMP_EQ_U64; |
| default: |
| return -1; |
| } |
| } |
| |
| if (Size != 32) |
| return -1; |
| |
| switch (P) { |
| case CmpInst::ICMP_NE: |
| return AMDGPU::S_CMP_LG_U32; |
| case CmpInst::ICMP_EQ: |
| return AMDGPU::S_CMP_EQ_U32; |
| case CmpInst::ICMP_SGT: |
| return AMDGPU::S_CMP_GT_I32; |
| case CmpInst::ICMP_SGE: |
| return AMDGPU::S_CMP_GE_I32; |
| case CmpInst::ICMP_SLT: |
| return AMDGPU::S_CMP_LT_I32; |
| case CmpInst::ICMP_SLE: |
| return AMDGPU::S_CMP_LE_I32; |
| case CmpInst::ICMP_UGT: |
| return AMDGPU::S_CMP_GT_U32; |
| case CmpInst::ICMP_UGE: |
| return AMDGPU::S_CMP_GE_U32; |
| case CmpInst::ICMP_ULT: |
| return AMDGPU::S_CMP_LT_U32; |
| case CmpInst::ICMP_ULE: |
| return AMDGPU::S_CMP_LE_U32; |
| default: |
| llvm_unreachable("Unknown condition code!"); |
| } |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const { |
| MachineBasicBlock *BB = I.getParent(); |
| const DebugLoc &DL = I.getDebugLoc(); |
| |
| Register SrcReg = I.getOperand(2).getReg(); |
| unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); |
| |
| auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate(); |
| |
| Register CCReg = I.getOperand(0).getReg(); |
| if (!isVCC(CCReg, *MRI)) { |
| int Opcode = getS_CMPOpcode(Pred, Size); |
| if (Opcode == -1) |
| return false; |
| MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode)) |
| .add(I.getOperand(2)) |
| .add(I.getOperand(3)); |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg) |
| .addReg(AMDGPU::SCC); |
| bool Ret = |
| constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) && |
| RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI); |
| I.eraseFromParent(); |
| return Ret; |
| } |
| |
| int Opcode = getV_CMPOpcode(Pred, Size); |
| if (Opcode == -1) |
| return false; |
| |
| MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), |
| I.getOperand(0).getReg()) |
| .add(I.getOperand(2)) |
| .add(I.getOperand(3)); |
| RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), |
| *TRI.getBoolRC(), *MRI); |
| bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); |
| I.eraseFromParent(); |
| return Ret; |
| } |
| |
| bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const { |
| Register Dst = I.getOperand(0).getReg(); |
| if (isVCC(Dst, *MRI)) |
| return false; |
| |
| if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize()) |
| return false; |
| |
| MachineBasicBlock *BB = I.getParent(); |
| const DebugLoc &DL = I.getDebugLoc(); |
| Register SrcReg = I.getOperand(2).getReg(); |
| unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); |
| auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm()); |
| |
| int Opcode = getV_CMPOpcode(Pred, Size); |
| if (Opcode == -1) |
| return false; |
| |
| MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst) |
| .add(I.getOperand(2)) |
| .add(I.getOperand(3)); |
| RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(), |
| *MRI); |
| bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); |
| I.eraseFromParent(); |
| return Ret; |
| } |
| |
| bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const { |
| MachineBasicBlock *BB = I.getParent(); |
| const DebugLoc &DL = I.getDebugLoc(); |
| Register DstReg = I.getOperand(0).getReg(); |
| const unsigned Size = MRI->getType(DstReg).getSizeInBits(); |
| const bool Is64 = Size == 64; |
| |
| if (Size != STI.getWavefrontSize()) |
| return false; |
| |
| Optional<ValueAndVReg> Arg = |
| getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI); |
| |
| if (Arg.hasValue()) { |
| const int64_t Value = Arg.getValue().Value.getSExtValue(); |
| if (Value == 0) { |
| unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; |
| BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0); |
| } else if (Value == -1) { // all ones |
| Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); |
| } else |
| return false; |
| } else { |
| Register SrcReg = I.getOperand(2).getReg(); |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); |
| } |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const { |
| Register DstReg = I.getOperand(0).getReg(); |
| const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); |
| const TargetRegisterClass *DstRC = |
| TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI); |
| if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) |
| return false; |
| |
| const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID; |
| |
| Module *M = MF->getFunction().getParent(); |
| const MDNode *Metadata = I.getOperand(2).getMetadata(); |
| auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); |
| auto RelocSymbol = cast<GlobalVariable>( |
| M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); |
| |
| MachineBasicBlock *BB = I.getParent(); |
| BuildMI(*BB, &I, I.getDebugLoc(), |
| TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg) |
| .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO); |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const { |
| Triple::OSType OS = MF->getTarget().getTargetTriple().getOS(); |
| |
| Register DstReg = I.getOperand(0).getReg(); |
| const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
| unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ? |
| AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; |
| |
| MachineBasicBlock *MBB = I.getParent(); |
| const DebugLoc &DL = I.getDebugLoc(); |
| |
| auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg); |
| |
| if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) { |
| const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); |
| MIB.addImm(MFI->getLDSSize()); |
| } else { |
| Module *M = MF->getFunction().getParent(); |
| const GlobalValue *GV |
| = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize); |
| MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO); |
| } |
| |
| I.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
| } |
| |
| bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const { |
| MachineBasicBlock *MBB = I.getParent(); |
| MachineFunction &MF = *MBB->getParent(); |
| const DebugLoc &DL = I.getDebugLoc(); |
| |
| MachineOperand &Dst = I.getOperand(0); |
| Register DstReg = Dst.getReg(); |
| unsigned Depth = I.getOperand(2).getImm(); |
| |
| const TargetRegisterClass *RC |
| = TRI.getConstrainedRegClassForOperand(Dst, *MRI); |
| if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) || |
| !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) |
| return false; |
| |
| // Check for kernel and shader functions |
| if (Depth != 0 || |
| MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { |
| BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) |
| .addImm(0); |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| MachineFrameInfo &MFI = MF.getFrameInfo(); |
| // There is a call to @llvm.returnaddress in this function |
| MFI.setReturnAddressIsTaken(true); |
| |
| // Get the return address reg and mark it as an implicit live-in |
| Register ReturnAddrReg = TRI.getReturnAddressReg(MF); |
| Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg, |
| AMDGPU::SReg_64RegClass); |
| BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg) |
| .addReg(LiveIn); |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const { |
| // FIXME: Manually selecting to avoid dealing with the SReg_1 trick |
| // SelectionDAG uses for wave32 vs wave64. |
| MachineBasicBlock *BB = MI.getParent(); |
| BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF)) |
| .add(MI.getOperand(1)); |
| |
| Register Reg = MI.getOperand(1).getReg(); |
| MI.eraseFromParent(); |
| |
| if (!MRI->getRegClassOrNull(Reg)) |
| MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic( |
| MachineInstr &MI, Intrinsic::ID IntrID) const { |
| MachineBasicBlock *MBB = MI.getParent(); |
| MachineFunction *MF = MBB->getParent(); |
| const DebugLoc &DL = MI.getDebugLoc(); |
| |
| unsigned IndexOperand = MI.getOperand(7).getImm(); |
| bool WaveRelease = MI.getOperand(8).getImm() != 0; |
| bool WaveDone = MI.getOperand(9).getImm() != 0; |
| |
| if (WaveDone && !WaveRelease) |
| report_fatal_error("ds_ordered_count: wave_done requires wave_release"); |
| |
| unsigned OrderedCountIndex = IndexOperand & 0x3f; |
| IndexOperand &= ~0x3f; |
| unsigned CountDw = 0; |
| |
| if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) { |
| CountDw = (IndexOperand >> 24) & 0xf; |
| IndexOperand &= ~(0xf << 24); |
| |
| if (CountDw < 1 || CountDw > 4) { |
| report_fatal_error( |
| "ds_ordered_count: dword count must be between 1 and 4"); |
| } |
| } |
| |
| if (IndexOperand) |
| report_fatal_error("ds_ordered_count: bad index operand"); |
| |
| unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; |
| unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF); |
| |
| unsigned Offset0 = OrderedCountIndex << 2; |
| unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | |
| (Instruction << 4); |
| |
| if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) |
| Offset1 |= (CountDw - 1) << 6; |
| |
| unsigned Offset = Offset0 | (Offset1 << 8); |
| |
| Register M0Val = MI.getOperand(2).getReg(); |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
| .addReg(M0Val); |
| |
| Register DstReg = MI.getOperand(0).getReg(); |
| Register ValReg = MI.getOperand(3).getReg(); |
| MachineInstrBuilder DS = |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg) |
| .addReg(ValReg) |
| .addImm(Offset) |
| .cloneMemRefs(MI); |
| |
| if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI)) |
| return false; |
| |
| bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI); |
| MI.eraseFromParent(); |
| return Ret; |
| } |
| |
| static unsigned gwsIntrinToOpcode(unsigned IntrID) { |
| switch (IntrID) { |
| case Intrinsic::amdgcn_ds_gws_init: |
| return AMDGPU::DS_GWS_INIT; |
| case Intrinsic::amdgcn_ds_gws_barrier: |
| return AMDGPU::DS_GWS_BARRIER; |
| case Intrinsic::amdgcn_ds_gws_sema_v: |
| return AMDGPU::DS_GWS_SEMA_V; |
| case Intrinsic::amdgcn_ds_gws_sema_br: |
| return AMDGPU::DS_GWS_SEMA_BR; |
| case Intrinsic::amdgcn_ds_gws_sema_p: |
| return AMDGPU::DS_GWS_SEMA_P; |
| case Intrinsic::amdgcn_ds_gws_sema_release_all: |
| return AMDGPU::DS_GWS_SEMA_RELEASE_ALL; |
| default: |
| llvm_unreachable("not a gws intrinsic"); |
| } |
| } |
| |
| bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI, |
| Intrinsic::ID IID) const { |
| if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all && |
| !STI.hasGWSSemaReleaseAll()) |
| return false; |
| |
| // intrinsic ID, vsrc, offset |
| const bool HasVSrc = MI.getNumOperands() == 3; |
| assert(HasVSrc || MI.getNumOperands() == 2); |
| |
| Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg(); |
| const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI); |
| if (OffsetRB->getID() != AMDGPU::SGPRRegBankID) |
| return false; |
| |
| MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); |
| assert(OffsetDef); |
| |
| unsigned ImmOffset; |
| |
| MachineBasicBlock *MBB = MI.getParent(); |
| const DebugLoc &DL = MI.getDebugLoc(); |
| |
| MachineInstr *Readfirstlane = nullptr; |
| |
| // If we legalized the VGPR input, strip out the readfirstlane to analyze the |
| // incoming offset, in case there's an add of a constant. We'll have to put it |
| // back later. |
| if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) { |
| Readfirstlane = OffsetDef; |
| BaseOffset = OffsetDef->getOperand(1).getReg(); |
| OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); |
| } |
| |
| if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) { |
| // If we have a constant offset, try to use the 0 in m0 as the base. |
| // TODO: Look into changing the default m0 initialization value. If the |
| // default -1 only set the low 16-bits, we could leave it as-is and add 1 to |
| // the immediate offset. |
| |
| ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue(); |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
| .addImm(0); |
| } else { |
| std::tie(BaseOffset, ImmOffset) = |
| AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset); |
| |
| if (Readfirstlane) { |
| // We have the constant offset now, so put the readfirstlane back on the |
| // variable component. |
| if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI)) |
| return false; |
| |
| Readfirstlane->getOperand(1).setReg(BaseOffset); |
| BaseOffset = Readfirstlane->getOperand(0).getReg(); |
| } else { |
| if (!RBI.constrainGenericRegister(BaseOffset, |
| AMDGPU::SReg_32RegClass, *MRI)) |
| return false; |
| } |
| |
| Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base) |
| .addReg(BaseOffset) |
| .addImm(16); |
| |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
| .addReg(M0Base); |
| } |
| |
| // The resource id offset is computed as (<isa opaque base> + M0[21:16] + |
| // offset field) % 64. Some versions of the programming guide omit the m0 |
| // part, or claim it's from offset 0. |
| auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID))); |
| |
| if (HasVSrc) { |
| Register VSrc = MI.getOperand(1).getReg(); |
| |
| if (STI.needsAlignedVGPRs()) { |
| // Add implicit aligned super-reg to force alignment on the data operand. |
| Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef); |
| Register NewVR = |
| MRI->createVirtualRegister(&AMDGPU::VReg_64_Align2RegClass); |
| BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), NewVR) |
| .addReg(VSrc, 0, MI.getOperand(1).getSubReg()) |
| .addImm(AMDGPU::sub0) |
| .addReg(Undef) |
| .addImm(AMDGPU::sub1); |
| MIB.addReg(NewVR, 0, AMDGPU::sub0); |
| MIB.addReg(NewVR, RegState::Implicit); |
| } else { |
| MIB.addReg(VSrc); |
| } |
| |
| if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI)) |
| return false; |
| } |
| |
| MIB.addImm(ImmOffset) |
| .cloneMemRefs(MI); |
| |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI, |
| bool IsAppend) const { |
| Register PtrBase = MI.getOperand(2).getReg(); |
| LLT PtrTy = MRI->getType(PtrBase); |
| bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS; |
| |
| unsigned Offset; |
| std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2)); |
| |
| // TODO: Should this try to look through readfirstlane like GWS? |
| if (!isDSOffsetLegal(PtrBase, Offset)) { |
| PtrBase = MI.getOperand(2).getReg(); |
| Offset = 0; |
| } |
| |
| MachineBasicBlock *MBB = MI.getParent(); |
| const DebugLoc &DL = MI.getDebugLoc(); |
| const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; |
| |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) |
| .addReg(PtrBase); |
| if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI)) |
| return false; |
| |
| auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg()) |
| .addImm(Offset) |
| .addImm(IsGDS ? -1 : 0) |
| .cloneMemRefs(MI); |
| MI.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
| } |
| |
| bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const { |
| if (TM.getOptLevel() > CodeGenOpt::None) { |
| unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second; |
| if (WGSize <= STI.getWavefrontSize()) { |
| MachineBasicBlock *MBB = MI.getParent(); |
| const DebugLoc &DL = MI.getDebugLoc(); |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER)); |
| MI.eraseFromParent(); |
| return true; |
| } |
| } |
| return selectImpl(MI, *CoverageInfo); |
| } |
| |
| static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE, |
| bool &IsTexFail) { |
| if (TexFailCtrl) |
| IsTexFail = true; |
| |
| TFE = (TexFailCtrl & 0x1) ? 1 : 0; |
| TexFailCtrl &= ~(uint64_t)0x1; |
| LWE = (TexFailCtrl & 0x2) ? 1 : 0; |
| TexFailCtrl &= ~(uint64_t)0x2; |
| |
| return TexFailCtrl == 0; |
| } |
| |
| bool AMDGPUInstructionSelector::selectImageIntrinsic( |
| MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const { |
| MachineBasicBlock *MBB = MI.getParent(); |
| const DebugLoc &DL = MI.getDebugLoc(); |
| |
| const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = |
| AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); |
| |
| const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); |
| const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = |
| AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); |
| const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo = |
| AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode); |
| unsigned IntrOpcode = Intr->BaseOpcode; |
| const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI); |
| |
| const unsigned ArgOffset = MI.getNumExplicitDefs() + 1; |
| |
| Register VDataIn, VDataOut; |
| LLT VDataTy; |
| int NumVDataDwords = -1; |
| bool IsD16 = false; |
| |
| bool Unorm; |
| if (!BaseOpcode->Sampler) |
| Unorm = true; |
| else |
| Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0; |
| |
| bool TFE; |
| bool LWE; |
| bool IsTexFail = false; |
| if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(), |
| TFE, LWE, IsTexFail)) |
| return false; |
| |
| const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm(); |
| const bool IsA16 = (Flags & 1) != 0; |
| const bool IsG16 = (Flags & 2) != 0; |
| |
| // A16 implies 16 bit gradients if subtarget doesn't support G16 |
| if (IsA16 && !STI.hasG16() && !IsG16) |
| return false; |
| |
| unsigned DMask = 0; |
| unsigned DMaskLanes = 0; |
| |
| if (BaseOpcode->Atomic) { |
| VDataOut = MI.getOperand(0).getReg(); |
| VDataIn = MI.getOperand(2).getReg(); |
| LLT Ty = MRI->getType(VDataIn); |
| |
| // Be careful to allow atomic swap on 16-bit element vectors. |
| const bool Is64Bit = BaseOpcode->AtomicX2 ? |
| Ty.getSizeInBits() == 128 : |
| Ty.getSizeInBits() == 64; |
| |
| if (BaseOpcode->AtomicX2) { |
| assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister); |
| |
| DMask = Is64Bit ? 0xf : 0x3; |
| NumVDataDwords = Is64Bit ? 4 : 2; |
| } else { |
| DMask = Is64Bit ? 0x3 : 0x1; |
| NumVDataDwords = Is64Bit ? 2 : 1; |
| } |
| } else { |
| DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm(); |
| DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); |
| |
| // One memoperand is mandatory, except for getresinfo. |
| // FIXME: Check this in verifier. |
| if (!MI.memoperands_empty()) { |
| const MachineMemOperand *MMO = *MI.memoperands_begin(); |
| |
| // Infer d16 from the memory size, as the register type will be mangled by |
| // unpacked subtargets, or by TFE. |
| IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32; |
| } |
| |
| if (BaseOpcode->Store) { |
| VDataIn = MI.getOperand(1).getReg(); |
| VDataTy = MRI->getType(VDataIn); |
| NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32; |
| } else { |
| VDataOut = MI.getOperand(0).getReg(); |
| VDataTy = MRI->getType(VDataOut); |
| NumVDataDwords = DMaskLanes; |
| |
| if (IsD16 && !STI.hasUnpackedD16VMem()) |
| NumVDataDwords = (DMaskLanes + 1) / 2; |
| } |
| } |
| |
| // Optimize _L to _LZ when _L is zero |
| if (LZMappingInfo) { |
| // The legalizer replaced the register with an immediate 0 if we need to |
| // change the opcode. |
| const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex); |
| if (Lod.isImm()) { |
| assert(Lod.getImm() == 0); |
| IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l |
| } |
| } |
| |
| // Optimize _mip away, when 'lod' is zero |
| if (MIPMappingInfo) { |
| const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex); |
| if (Lod.isImm()) { |
| assert(Lod.getImm() == 0); |
| IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip |
| } |
| } |
| |
| // Set G16 opcode |
| if (IsG16 && !IsA16) { |
| const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = |
| AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); |
| assert(G16MappingInfo); |
| IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16 |
| } |
| |
| // TODO: Check this in verifier. |
| assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this"); |
| |
| unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(); |
| if (BaseOpcode->Atomic) |
| CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization |
| if (CPol & ~AMDGPU::CPol::ALL) |
| return false; |
| |
| int NumVAddrRegs = 0; |
| int NumVAddrDwords = 0; |
| for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) { |
| // Skip the $noregs and 0s inserted during legalization. |
| MachineOperand &AddrOp = MI.getOperand(ArgOffset + I); |
| if (!AddrOp.isReg()) |
| continue; // XXX - Break? |
| |
| Register Addr = AddrOp.getReg(); |
| if (!Addr) |
| break; |
| |
| ++NumVAddrRegs; |
| NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32; |
| } |
| |
| // The legalizer preprocessed the intrinsic arguments. If we aren't using |
| // NSA, these should have beeen packed into a single value in the first |
| // address register |
| const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs; |
| if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) { |
| LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n"); |
| return false; |
| } |
| |
| if (IsTexFail) |
| ++NumVDataDwords; |
| |
| int Opcode = -1; |
| if (IsGFX10Plus) { |
| Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, |
| UseNSA ? AMDGPU::MIMGEncGfx10NSA |
| : AMDGPU::MIMGEncGfx10Default, |
| NumVDataDwords, NumVAddrDwords); |
| } else { |
| if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) |
| Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, |
| NumVDataDwords, NumVAddrDwords); |
| if (Opcode == -1) |
| Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, |
| NumVDataDwords, NumVAddrDwords); |
| } |
| assert(Opcode != -1); |
| |
| auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode)) |
| .cloneMemRefs(MI); |
| |
| if (VDataOut) { |
| if (BaseOpcode->AtomicX2) { |
| const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64; |
| |
| Register TmpReg = MRI->createVirtualRegister( |
| Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); |
| unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; |
| |
| MIB.addDef(TmpReg); |
| if (!MRI->use_empty(VDataOut)) { |
| BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut) |
| .addReg(TmpReg, RegState::Kill, SubReg); |
| } |
| |
| } else { |
| MIB.addDef(VDataOut); // vdata output |
| } |
| } |
| |
| if (VDataIn) |
| MIB.addReg(VDataIn); // vdata input |
| |
| for (int I = 0; I != NumVAddrRegs; ++I) { |
| MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I); |
| if (SrcOp.isReg()) { |
| assert(SrcOp.getReg() != 0); |
| MIB.addReg(SrcOp.getReg()); |
| } |
| } |
| |
| MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg()); |
| if (BaseOpcode->Sampler) |
| MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg()); |
| |
| MIB.addImm(DMask); // dmask |
| |
| if (IsGFX10Plus) |
| MIB.addImm(DimInfo->Encoding); |
| MIB.addImm(Unorm); |
| |
| MIB.addImm(CPol); |
| MIB.addImm(IsA16 && // a16 or r128 |
| STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0); |
| if (IsGFX10Plus) |
| MIB.addImm(IsA16 ? -1 : 0); |
| |
| MIB.addImm(TFE); // tfe |
| MIB.addImm(LWE); // lwe |
| if (!IsGFX10Plus) |
| MIB.addImm(DimInfo->DA ? -1 : 0); |
| if (BaseOpcode->HasD16) |
| MIB.addImm(IsD16 ? -1 : 0); |
| |
| if (IsTexFail) { |
| // An image load instruction with TFE/LWE only conditionally writes to its |
| // result registers. Initialize them to zero so that we always get well |
| // defined result values. |
| assert(VDataOut && !VDataIn); |
| Register Tied = MRI->cloneVirtualRegister(VDataOut); |
| Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero) |
| .addImm(0); |
| auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4); |
| if (STI.usePRTStrictNull()) { |
| // With enable-prt-strict-null enabled, initialize all result registers to |
| // zero. |
| auto RegSeq = |
| BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied); |
| for (auto Sub : Parts) |
| RegSeq.addReg(Zero).addImm(Sub); |
| } else { |
| // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE |
| // result register. |
| Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef); |
| auto RegSeq = |
| BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied); |
| for (auto Sub : Parts.drop_back(1)) |
| RegSeq.addReg(Undef).addImm(Sub); |
| RegSeq.addReg(Zero).addImm(Parts.back()); |
| } |
| MIB.addReg(Tied, RegState::Implicit); |
| MIB->tieOperands(0, MIB->getNumOperands() - 1); |
| } |
| |
| MI.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( |
| MachineInstr &I) const { |
| unsigned IntrinsicID = I.getIntrinsicID(); |
| switch (IntrinsicID) { |
| case Intrinsic::amdgcn_end_cf: |
| return selectEndCfIntrinsic(I); |
| case Intrinsic::amdgcn_ds_ordered_add: |
| case Intrinsic::amdgcn_ds_ordered_swap: |
| return selectDSOrderedIntrinsic(I, IntrinsicID); |
| case Intrinsic::amdgcn_ds_gws_init: |
| case Intrinsic::amdgcn_ds_gws_barrier: |
| case Intrinsic::amdgcn_ds_gws_sema_v: |
| case Intrinsic::amdgcn_ds_gws_sema_br: |
| case Intrinsic::amdgcn_ds_gws_sema_p: |
| case Intrinsic::amdgcn_ds_gws_sema_release_all: |
| return selectDSGWSIntrinsic(I, IntrinsicID); |
| case Intrinsic::amdgcn_ds_append: |
| return selectDSAppendConsume(I, true); |
| case Intrinsic::amdgcn_ds_consume: |
| return selectDSAppendConsume(I, false); |
| case Intrinsic::amdgcn_s_barrier: |
| return selectSBarrier(I); |
| case Intrinsic::amdgcn_global_atomic_fadd: |
| return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3)); |
| default: { |
| return selectImpl(I, *CoverageInfo); |
| } |
| } |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const { |
| if (selectImpl(I, *CoverageInfo)) |
| return true; |
| |
| MachineBasicBlock *BB = I.getParent(); |
| const DebugLoc &DL = I.getDebugLoc(); |
| |
| Register DstReg = I.getOperand(0).getReg(); |
| unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); |
| assert(Size <= 32 || Size == 64); |
| const MachineOperand &CCOp = I.getOperand(1); |
| Register CCReg = CCOp.getReg(); |
| if (!isVCC(CCReg, *MRI)) { |
| unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 : |
| AMDGPU::S_CSELECT_B32; |
| MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) |
| .addReg(CCReg); |
| |
| // The generic constrainSelectedInstRegOperands doesn't work for the scc register |
| // bank, because it does not cover the register class that we used to represent |
| // for it. So we need to manually set the register class here. |
| if (!MRI->getRegClassOrNull(CCReg)) |
| MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI)); |
| MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg) |
| .add(I.getOperand(2)) |
| .add(I.getOperand(3)); |
| |
| bool Ret = false; |
| Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); |
| Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI); |
| I.eraseFromParent(); |
| return Ret; |
| } |
| |
| // Wide VGPR select should have been split in RegBankSelect. |
| if (Size > 32) |
| return false; |
| |
| MachineInstr *Select = |
| BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg) |
| .addImm(0) |
| .add(I.getOperand(3)) |
| .addImm(0) |
| .add(I.getOperand(2)) |
| .add(I.getOperand(1)); |
| |
| bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); |
| I.eraseFromParent(); |
| return Ret; |
| } |
| |
| static int sizeToSubRegIndex(unsigned Size) { |
| switch (Size) { |
| case 32: |
| return AMDGPU::sub0; |
| case 64: |
| return AMDGPU::sub0_sub1; |
| case 96: |
| return AMDGPU::sub0_sub1_sub2; |
| case 128: |
| return AMDGPU::sub0_sub1_sub2_sub3; |
| case 256: |
| return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; |
| default: |
| if (Size < 32) |
| return AMDGPU::sub0; |
| if (Size > 256) |
| return -1; |
| return sizeToSubRegIndex(PowerOf2Ceil(Size)); |
| } |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const { |
| Register DstReg = I.getOperand(0).getReg(); |
| Register SrcReg = I.getOperand(1).getReg(); |
| const LLT DstTy = MRI->getType(DstReg); |
| const LLT SrcTy = MRI->getType(SrcReg); |
| const LLT S1 = LLT::scalar(1); |
| |
| const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); |
| const RegisterBank *DstRB; |
| if (DstTy == S1) { |
| // This is a special case. We don't treat s1 for legalization artifacts as |
| // vcc booleans. |
| DstRB = SrcRB; |
| } else { |
| DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
| if (SrcRB != DstRB) |
| return false; |
| } |
| |
| const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; |
| |
| unsigned DstSize = DstTy.getSizeInBits(); |
| unsigned SrcSize = SrcTy.getSizeInBits(); |
| |
| const TargetRegisterClass *SrcRC |
| = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI); |
| const TargetRegisterClass *DstRC |
| = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI); |
| if (!SrcRC || !DstRC) |
| return false; |
| |
| if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || |
| !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { |
| LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n"); |
| return false; |
| } |
| |
| if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) { |
| MachineBasicBlock *MBB = I.getParent(); |
| const DebugLoc &DL = I.getDebugLoc(); |
| |
| Register LoReg = MRI->createVirtualRegister(DstRC); |
| Register HiReg = MRI->createVirtualRegister(DstRC); |
| BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg) |
| .addReg(SrcReg, 0, AMDGPU::sub0); |
| BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg) |
| .addReg(SrcReg, 0, AMDGPU::sub1); |
| |
| if (IsVALU && STI.hasSDWA()) { |
| // Write the low 16-bits of the high element into the high 16-bits of the |
| // low element. |
| MachineInstr *MovSDWA = |
| BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) |
| .addImm(0) // $src0_modifiers |
| .addReg(HiReg) // $src0 |
| .addImm(0) // $clamp |
| .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel |
| .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused |
| .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel |
| .addReg(LoReg, RegState::Implicit); |
| MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); |
| } else { |
| Register TmpReg0 = MRI->createVirtualRegister(DstRC); |
| Register TmpReg1 = MRI->createVirtualRegister(DstRC); |
| Register ImmReg = MRI->createVirtualRegister(DstRC); |
| if (IsVALU) { |
| BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0) |
| .addImm(16) |
| .addReg(HiReg); |
| } else { |
| BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0) |
| .addReg(HiReg) |
| .addImm(16); |
| } |
| |
| unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; |
| unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; |
| unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32; |
| |
| BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg) |
| .addImm(0xffff); |
| BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1) |
| .addReg(LoReg) |
| .addReg(ImmReg); |
| BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg) |
| .addReg(TmpReg0) |
| .addReg(TmpReg1); |
| } |
| |
| I.eraseFromParent(); |
| return true; |
| } |
| |
| if (!DstTy.isScalar()) |
| return false; |
| |
| if (SrcSize > 32) { |
| int SubRegIdx = sizeToSubRegIndex(DstSize); |
| if (SubRegIdx == -1) |
| return false; |
| |
| // Deal with weird cases where the class only partially supports the subreg |
| // index. |
| const TargetRegisterClass *SrcWithSubRC |
| = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx); |
| if (!SrcWithSubRC) |
| return false; |
| |
| if (SrcWithSubRC != SrcRC) { |
| if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI)) |
| return false; |
| } |
| |
| I.getOperand(1).setSubReg(SubRegIdx); |
| } |
| |
| I.setDesc(TII.get(TargetOpcode::COPY)); |
| return true; |
| } |
| |
| /// \returns true if a bitmask for \p Size bits will be an inline immediate. |
| static bool shouldUseAndMask(unsigned Size, unsigned &Mask) { |
| Mask = maskTrailingOnes<unsigned>(Size); |
| int SignedMask = static_cast<int>(Mask); |
| return SignedMask >= -16 && SignedMask <= 64; |
| } |
| |
| // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1. |
| const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank( |
| Register Reg, const MachineRegisterInfo &MRI, |
| const TargetRegisterInfo &TRI) const { |
| const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); |
| if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>()) |
| return RB; |
| |
| // Ignore the type, since we don't use vcc in artifacts. |
| if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>()) |
| return &RBI.getRegBankFromRegClass(*RC, LLT()); |
| return nullptr; |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { |
| bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG; |
| bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg; |
| const DebugLoc &DL = I.getDebugLoc(); |
| MachineBasicBlock &MBB = *I.getParent(); |
| const Register DstReg = I.getOperand(0).getReg(); |
| const Register SrcReg = I.getOperand(1).getReg(); |
| |
| const LLT DstTy = MRI->getType(DstReg); |
| const LLT SrcTy = MRI->getType(SrcReg); |
| const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ? |
| I.getOperand(2).getImm() : SrcTy.getSizeInBits(); |
| const unsigned DstSize = DstTy.getSizeInBits(); |
| if (!DstTy.isScalar()) |
| return false; |
| |
| // Artifact casts should never use vcc. |
| const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI); |
| |
| // FIXME: This should probably be illegal and split earlier. |
| if (I.getOpcode() == AMDGPU::G_ANYEXT) { |
| if (DstSize <= 32) |
| return selectCOPY(I); |
| |
| const TargetRegisterClass *SrcRC = |
| TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI); |
| const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); |
| const TargetRegisterClass *DstRC = |
| TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); |
| |
| Register UndefReg = MRI->createVirtualRegister(SrcRC); |
| BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); |
| BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) |
| .addReg(SrcReg) |
| .addImm(AMDGPU::sub0) |
| .addReg(UndefReg) |
| .addImm(AMDGPU::sub1); |
| I.eraseFromParent(); |
| |
| return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) && |
| RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI); |
| } |
| |
| if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) { |
| // 64-bit should have been split up in RegBankSelect |
| |
| // Try to use an and with a mask if it will save code size. |
| unsigned Mask; |
| if (!Signed && shouldUseAndMask(SrcSize, Mask)) { |
| MachineInstr *ExtI = |
| BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg) |
| .addImm(Mask) |
| .addReg(SrcReg); |
| I.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); |
| } |
| |
| const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; |
| MachineInstr *ExtI = |
| BuildMI(MBB, I, DL, TII.get(BFE), DstReg) |
| .addReg(SrcReg) |
| .addImm(0) // Offset |
| .addImm(SrcSize); // Width |
| I.eraseFromParent(); |
| return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); |
| } |
| |
| if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) { |
| const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ? |
| AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass; |
| if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI)) |
| return false; |
| |
| if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) { |
| const unsigned SextOpc = SrcSize == 8 ? |
| AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16; |
| BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg) |
| .addReg(SrcReg); |
| I.eraseFromParent(); |
| return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); |
| } |
| |
| const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64; |
| const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; |
| |
| // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width. |
| if (DstSize > 32 && (SrcSize <= 32 || InReg)) { |
| // We need a 64-bit register source, but the high bits don't matter. |
| Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); |
| Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| unsigned SubReg = InReg ? AMDGPU::sub0 : 0; |
| |
| BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); |
| BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg) |
| .addReg(SrcReg, 0, SubReg) |
| .addImm(AMDGPU::sub0) |
| .addReg(UndefReg) |
| .addImm(AMDGPU::sub1); |
| |
| BuildMI(MBB, I, DL, TII.get(BFE64), DstReg) |
| .addReg(ExtReg) |
| .addImm(SrcSize << 16); |
| |
| I.eraseFromParent(); |
| return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI); |
| } |
| |
| unsigned Mask; |
| if (!Signed && shouldUseAndMask(SrcSize, Mask)) { |
| BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg) |
| .addReg(SrcReg) |
| .addImm(Mask); |
| } else { |
| BuildMI(MBB, I, DL, TII.get(BFE32), DstReg) |
| .addReg(SrcReg) |
| .addImm(SrcSize << 16); |
| } |
| |
| I.eraseFromParent(); |
| return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); |
| } |
| |
| return false; |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { |
| MachineBasicBlock *BB = I.getParent(); |
| MachineOperand &ImmOp = I.getOperand(1); |
| Register DstReg = I.getOperand(0).getReg(); |
| unsigned Size = MRI->getType(DstReg).getSizeInBits(); |
| |
| // The AMDGPU backend only supports Imm operands and not CImm or FPImm. |
| if (ImmOp.isFPImm()) { |
| const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt(); |
| ImmOp.ChangeToImmediate(Imm.getZExtValue()); |
| } else if (ImmOp.isCImm()) { |
| ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue()); |
| } else { |
| llvm_unreachable("Not supported by g_constants"); |
| } |
| |
| const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); |
| const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID; |
| |
| unsigned Opcode; |
| if (DstRB->getID() == AMDGPU::VCCRegBankID) { |
| Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; |
| } else { |
| Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; |
| |
| // We should never produce s1 values on banks other than VCC. If the user of |
| // this already constrained the register, we may incorrectly think it's VCC |
| // if it wasn't originally. |
| if (Size == 1) |
| return false; |
| } |
| |
| if (Size != 64) { |
| I.setDesc(TII.get(Opcode)); |
| I.addImplicitDefUseOperands(*MF); |
| return constrainSelectedInstRegOperands(I, TII, TRI, RBI); |
| } |
| |
| const DebugLoc &DL = I.getDebugLoc(); |
| |
| APInt Imm(Size, I.getOperand(1).getImm()); |
| |
| MachineInstr *ResInst; |
| if (IsSgpr && TII.isInlineConstant(Imm)) { |
| ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) |
| .addImm(I.getOperand(1).getImm()); |
| } else { |
| const TargetRegisterClass *RC = IsSgpr ? |
| &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass; |
| Register LoReg = MRI->createVirtualRegister(RC); |
| Register HiReg = MRI->createVirtualRegister(RC); |
| |
| BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) |
| .addImm(Imm.trunc(32).getZExtValue()); |
| |
| BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg) |
| .addImm(Imm.ashr(32).getZExtValue()); |
| |
| ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) |
| .addReg(LoReg) |
| .addImm(AMDGPU::sub0) |
| .addReg(HiReg) |
| .addImm(AMDGPU::sub1); |
| } |
| |
| // We can't call constrainSelectedInstRegOperands here, because it doesn't |
| // work for target independent opcodes |
| I.eraseFromParent(); |
| const TargetRegisterClass *DstRC = |
| TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI); |
| if (!DstRC) |
| return true; |
| return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const { |
| // Only manually handle the f64 SGPR case. |
| // |
| // FIXME: This is a workaround for 2.5 different tablegen problems. Because |
| // the bit ops theoretically have a second result due to the implicit def of |
| // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing |
| // that is easy by disabling the check. The result works, but uses a |
| // nonsensical sreg32orlds_and_sreg_1 regclass. |
| // |
| // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to |
| // the variadic REG_SEQUENCE operands. |
| |
| Register Dst = MI.getOperand(0).getReg(); |
| const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); |
| if (DstRB->getID() != AMDGPU::SGPRRegBankID || |
| MRI->getType(Dst) != LLT::scalar(64)) |
| return false; |
| |
| Register Src = MI.getOperand(1).getReg(); |
| MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI); |
| if (Fabs) |
| Src = Fabs->getOperand(1).getReg(); |
| |
| if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || |
| !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) |
| return false; |
| |
| MachineBasicBlock *BB = MI.getParent(); |
| const DebugLoc &DL = MI.getDebugLoc(); |
| Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| |
| BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) |
| .addReg(Src, 0, AMDGPU::sub0); |
| BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) |
| .addReg(Src, 0, AMDGPU::sub1); |
| BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) |
| .addImm(0x80000000); |
| |
| // Set or toggle sign bit. |
| unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32; |
| BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg) |
| .addReg(HiReg) |
| .addReg(ConstReg); |
| BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) |
| .addReg(LoReg) |
| .addImm(AMDGPU::sub0) |
| .addReg(OpReg) |
| .addImm(AMDGPU::sub1); |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| // FIXME: This is a workaround for the same tablegen problems as G_FNEG |
| bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const { |
| Register Dst = MI.getOperand(0).getReg(); |
| const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); |
| if (DstRB->getID() != AMDGPU::SGPRRegBankID || |
| MRI->getType(Dst) != LLT::scalar(64)) |
| return false; |
| |
| Register Src = MI.getOperand(1).getReg(); |
| MachineBasicBlock *BB = MI.getParent(); |
| const DebugLoc &DL = MI.getDebugLoc(); |
| Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); |
| |
| if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || |
| !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) |
| return false; |
| |
| BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) |
| .addReg(Src, 0, AMDGPU::sub0); |
| BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) |
| .addReg(Src, 0, AMDGPU::sub1); |
| BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) |
| .addImm(0x7fffffff); |
| |
| // Clear sign bit. |
| // TODO: Should this used S_BITSET0_*? |
| BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg) |
| .addReg(HiReg) |
| .addReg(ConstReg); |
| BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) |
| .addReg(LoReg) |
| .addImm(AMDGPU::sub0) |
| .addReg(OpReg) |
| .addImm(AMDGPU::sub1); |
| |
| MI.eraseFromParent(); |
| return true; |
| } |
| |
| static bool isConstant(const MachineInstr &MI) { |
| return MI.getOpcode() == TargetOpcode::G_CONSTANT; |
| } |
| |
| void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, |
| const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { |
| |
| const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); |
| |
| assert(PtrMI); |
| |
| if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD) |
| return; |
| |
| GEPInfo GEPInfo(*PtrMI); |
| |
| for (unsigned i = 1; i != 3; ++i) { |
| const MachineOperand &GEPOp = PtrMI->getOperand(i); |
| const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); |
| assert(OpDef); |
| if (i == 2 && isConstant(*OpDef)) { |
| // TODO: Could handle constant base + variable offset, but a combine |
| // probably should have commuted it. |
| assert(GEPInfo.Imm == 0); |
| GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); |
| continue; |
| } |
| const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); |
| if (OpBank->getID() == AMDGPU::SGPRRegBankID) |
| GEPInfo.SgprParts.push_back(GEPOp.getReg()); |
| else |
| GEPInfo.VgprParts.push_back(GEPOp.getReg()); |
| } |
| |
| AddrInfo.push_back(GEPInfo); |
| getAddrModeInfo(*PtrMI, MRI, AddrInfo); |
| } |
| |
| bool AMDGPUInstructionSelector::isSGPR(Register Reg) const { |
| return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID; |
| } |
| |
| bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const { |
| if (!MI.hasOneMemOperand()) |
| return false; |
| |
| const MachineMemOperand *MMO = *MI.memoperands_begin(); |
| const Value *Ptr = MMO->getValue(); |
| |
| // UndefValue means this is a load of a kernel input. These are uniform. |
| // Sometimes LDS instructions have constant pointers. |
| // If Ptr is null, then that means this mem operand contains a |
| // PseudoSourceValue like GOT. |
| if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || |
| isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) |
| return true; |
| |
| if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) |
| return true; |
| |
| const Instruction *I = dyn_cast<Instruction>(Ptr); |
| return I && I->getMetadata("amdgpu.uniform"); |
| } |
| |
| bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { |
| for (const GEPInfo &GEPInfo : AddrInfo) { |
| if (!GEPInfo.VgprParts.empty()) |
| return true; |
| } |
| return false; |
| } |
| |
| void AMDGPUInstructionSelector::initM0(MachineInstr &I) const { |
| const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); |
| unsigned AS = PtrTy.getAddressSpace(); |
| if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) && |
| STI.ldsRequiresM0Init()) { |
| MachineBasicBlock *BB = I.getParent(); |
| |
| // If DS instructions require M0 initialization, insert it before selecting. |
| BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) |
| .addImm(-1); |
| } |
| } |
| |
| bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW( |
| MachineInstr &I) const { |
| if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) { |
| const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); |
| unsigned AS = PtrTy.getAddressSpace(); |
| if (AS == AMDGPUAS::GLOBAL_ADDRESS) |
| return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2)); |
| } |
| |
| initM0(I); |
| return selectImpl(I, *CoverageInfo); |
| } |
| |
| // TODO: No rtn optimization. |
| bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG( |
| MachineInstr &MI) const { |
| Register PtrReg = MI.getOperand(1).getReg(); |
| const LLT PtrTy = MRI->getType(PtrReg); |
| if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS || |
| STI.useFlatForGlobal()) |
| return selectImpl(MI, *CoverageInfo); |
| |
| Register DstReg = MI.getOperand(0).getReg(); |
| const LLT Ty = MRI->getType(DstReg); |
| const bool Is64 = Ty.getSizeInBits() == 64; |
| const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; |
| Register TmpReg = MRI->createVirtualRegister( |
| Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); |
| |
| const DebugLoc &DL = MI.getDebugLoc(); |
| MachineBasicBlock *BB = MI.getParent(); |
| |
| Register VAddr, RSrcReg, SOffset; |
| int64_t Offset = 0; |
| |
| unsigned Opcode; |
| if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) { |
| Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN : |
| AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN; |
| } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr, |
| RSrcReg, SOffset, Offset)) { |
| Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN : |
|