| //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===// |
| // |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| // See https://llvm.org/LICENSE.txt for license information. |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This file contains the AArch64 implementation of the TargetInstrInfo class. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "AArch64InstrInfo.h" |
| #include "AArch64MachineFunctionInfo.h" |
| #include "AArch64Subtarget.h" |
| #include "MCTargetDesc/AArch64AddressingModes.h" |
| #include "Utils/AArch64BaseInfo.h" |
| #include "llvm/ADT/ArrayRef.h" |
| #include "llvm/ADT/STLExtras.h" |
| #include "llvm/ADT/SmallVector.h" |
| #include "llvm/CodeGen/MachineBasicBlock.h" |
| #include "llvm/CodeGen/MachineFrameInfo.h" |
| #include "llvm/CodeGen/MachineFunction.h" |
| #include "llvm/CodeGen/MachineInstr.h" |
| #include "llvm/CodeGen/MachineInstrBuilder.h" |
| #include "llvm/CodeGen/MachineMemOperand.h" |
| #include "llvm/CodeGen/MachineModuleInfo.h" |
| #include "llvm/CodeGen/MachineOperand.h" |
| #include "llvm/CodeGen/MachineRegisterInfo.h" |
| #include "llvm/CodeGen/StackMaps.h" |
| #include "llvm/CodeGen/TargetRegisterInfo.h" |
| #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| #include "llvm/IR/DebugInfoMetadata.h" |
| #include "llvm/IR/DebugLoc.h" |
| #include "llvm/IR/GlobalValue.h" |
| #include "llvm/MC/MCAsmInfo.h" |
| #include "llvm/MC/MCInst.h" |
| #include "llvm/MC/MCInstBuilder.h" |
| #include "llvm/MC/MCInstrDesc.h" |
| #include "llvm/Support/Casting.h" |
| #include "llvm/Support/CodeGen.h" |
| #include "llvm/Support/CommandLine.h" |
| #include "llvm/Support/Compiler.h" |
| #include "llvm/Support/ErrorHandling.h" |
| #include "llvm/Support/MathExtras.h" |
| #include "llvm/Target/TargetMachine.h" |
| #include "llvm/Target/TargetOptions.h" |
| #include <cassert> |
| #include <cstdint> |
| #include <iterator> |
| #include <utility> |
| |
| using namespace llvm; |
| |
| #define GET_INSTRINFO_CTOR_DTOR |
| #include "AArch64GenInstrInfo.inc" |
| |
| static cl::opt<unsigned> TBZDisplacementBits( |
| "aarch64-tbz-offset-bits", cl::Hidden, cl::init(14), |
| cl::desc("Restrict range of TB[N]Z instructions (DEBUG)")); |
| |
| static cl::opt<unsigned> CBZDisplacementBits( |
| "aarch64-cbz-offset-bits", cl::Hidden, cl::init(19), |
| cl::desc("Restrict range of CB[N]Z instructions (DEBUG)")); |
| |
| static cl::opt<unsigned> |
| BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19), |
| cl::desc("Restrict range of Bcc instructions (DEBUG)")); |
| |
| AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI) |
| : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP, |
| AArch64::CATCHRET), |
| RI(STI.getTargetTriple()), Subtarget(STI) {} |
| |
| /// GetInstSize - Return the number of bytes of code the specified |
| /// instruction may be. This returns the maximum number of bytes. |
| unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { |
| const MachineBasicBlock &MBB = *MI.getParent(); |
| const MachineFunction *MF = MBB.getParent(); |
| const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); |
| |
| { |
| auto Op = MI.getOpcode(); |
| if (Op == AArch64::INLINEASM || Op == AArch64::INLINEASM_BR) |
| return getInlineAsmLength(MI.getOperand(0).getSymbolName(), *MAI); |
| } |
| |
| // Meta-instructions emit no code. |
| if (MI.isMetaInstruction()) |
| return 0; |
| |
| // FIXME: We currently only handle pseudoinstructions that don't get expanded |
| // before the assembly printer. |
| unsigned NumBytes = 0; |
| const MCInstrDesc &Desc = MI.getDesc(); |
| switch (Desc.getOpcode()) { |
| default: |
| // Anything not explicitly designated otherwise is a normal 4-byte insn. |
| NumBytes = 4; |
| break; |
| case TargetOpcode::STACKMAP: |
| // The upper bound for a stackmap intrinsic is the full length of its shadow |
| NumBytes = StackMapOpers(&MI).getNumPatchBytes(); |
| assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!"); |
| break; |
| case TargetOpcode::PATCHPOINT: |
| // The size of the patchpoint intrinsic is the number of bytes requested |
| NumBytes = PatchPointOpers(&MI).getNumPatchBytes(); |
| assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!"); |
| break; |
| case TargetOpcode::STATEPOINT: |
| NumBytes = StatepointOpers(&MI).getNumPatchBytes(); |
| assert(NumBytes % 4 == 0 && "Invalid number of NOP bytes requested!"); |
| // No patch bytes means a normal call inst is emitted |
| if (NumBytes == 0) |
| NumBytes = 4; |
| break; |
| case AArch64::TLSDESC_CALLSEQ: |
| // This gets lowered to an instruction sequence which takes 16 bytes |
| NumBytes = 16; |
| break; |
| case AArch64::SpeculationBarrierISBDSBEndBB: |
| // This gets lowered to 2 4-byte instructions. |
| NumBytes = 8; |
| break; |
| case AArch64::SpeculationBarrierSBEndBB: |
| // This gets lowered to 1 4-byte instructions. |
| NumBytes = 4; |
| break; |
| case AArch64::JumpTableDest32: |
| case AArch64::JumpTableDest16: |
| case AArch64::JumpTableDest8: |
| NumBytes = 12; |
| break; |
| case AArch64::SPACE: |
| NumBytes = MI.getOperand(1).getImm(); |
| break; |
| case AArch64::StoreSwiftAsyncContext: |
| NumBytes = 20; |
| break; |
| case TargetOpcode::BUNDLE: |
| NumBytes = getInstBundleLength(MI); |
| break; |
| } |
| |
| return NumBytes; |
| } |
| |
| unsigned AArch64InstrInfo::getInstBundleLength(const MachineInstr &MI) const { |
| unsigned Size = 0; |
| MachineBasicBlock::const_instr_iterator I = MI.getIterator(); |
| MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); |
| while (++I != E && I->isInsideBundle()) { |
| assert(!I->isBundle() && "No nested bundle!"); |
| Size += getInstSizeInBytes(*I); |
| } |
| return Size; |
| } |
| |
| static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, |
| SmallVectorImpl<MachineOperand> &Cond) { |
| // Block ends with fall-through condbranch. |
| switch (LastInst->getOpcode()) { |
| default: |
| llvm_unreachable("Unknown branch instruction?"); |
| case AArch64::Bcc: |
| Target = LastInst->getOperand(1).getMBB(); |
| Cond.push_back(LastInst->getOperand(0)); |
| break; |
| case AArch64::CBZW: |
| case AArch64::CBZX: |
| case AArch64::CBNZW: |
| case AArch64::CBNZX: |
| Target = LastInst->getOperand(1).getMBB(); |
| Cond.push_back(MachineOperand::CreateImm(-1)); |
| Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode())); |
| Cond.push_back(LastInst->getOperand(0)); |
| break; |
| case AArch64::TBZW: |
| case AArch64::TBZX: |
| case AArch64::TBNZW: |
| case AArch64::TBNZX: |
| Target = LastInst->getOperand(2).getMBB(); |
| Cond.push_back(MachineOperand::CreateImm(-1)); |
| Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode())); |
| Cond.push_back(LastInst->getOperand(0)); |
| Cond.push_back(LastInst->getOperand(1)); |
| } |
| } |
| |
| static unsigned getBranchDisplacementBits(unsigned Opc) { |
| switch (Opc) { |
| default: |
| llvm_unreachable("unexpected opcode!"); |
| case AArch64::B: |
| return 64; |
| case AArch64::TBNZW: |
| case AArch64::TBZW: |
| case AArch64::TBNZX: |
| case AArch64::TBZX: |
| return TBZDisplacementBits; |
| case AArch64::CBNZW: |
| case AArch64::CBZW: |
| case AArch64::CBNZX: |
| case AArch64::CBZX: |
| return CBZDisplacementBits; |
| case AArch64::Bcc: |
| return BCCDisplacementBits; |
| } |
| } |
| |
| bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp, |
| int64_t BrOffset) const { |
| unsigned Bits = getBranchDisplacementBits(BranchOp); |
| assert(Bits >= 3 && "max branch displacement must be enough to jump" |
| "over conditional branch expansion"); |
| return isIntN(Bits, BrOffset / 4); |
| } |
| |
| MachineBasicBlock * |
| AArch64InstrInfo::getBranchDestBlock(const MachineInstr &MI) const { |
| switch (MI.getOpcode()) { |
| default: |
| llvm_unreachable("unexpected opcode!"); |
| case AArch64::B: |
| return MI.getOperand(0).getMBB(); |
| case AArch64::TBZW: |
| case AArch64::TBNZW: |
| case AArch64::TBZX: |
| case AArch64::TBNZX: |
| return MI.getOperand(2).getMBB(); |
| case AArch64::CBZW: |
| case AArch64::CBNZW: |
| case AArch64::CBZX: |
| case AArch64::CBNZX: |
| case AArch64::Bcc: |
| return MI.getOperand(1).getMBB(); |
| } |
| } |
| |
| // Branch analysis. |
| bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB, |
| MachineBasicBlock *&TBB, |
| MachineBasicBlock *&FBB, |
| SmallVectorImpl<MachineOperand> &Cond, |
| bool AllowModify) const { |
| // If the block has no terminators, it just falls into the block after it. |
| MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
| if (I == MBB.end()) |
| return false; |
| |
| // Skip over SpeculationBarrierEndBB terminators |
| if (I->getOpcode() == AArch64::SpeculationBarrierISBDSBEndBB || |
| I->getOpcode() == AArch64::SpeculationBarrierSBEndBB) { |
| --I; |
| } |
| |
| if (!isUnpredicatedTerminator(*I)) |
| return false; |
| |
| // Get the last instruction in the block. |
| MachineInstr *LastInst = &*I; |
| |
| // If there is only one terminator instruction, process it. |
| unsigned LastOpc = LastInst->getOpcode(); |
| if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { |
| if (isUncondBranchOpcode(LastOpc)) { |
| TBB = LastInst->getOperand(0).getMBB(); |
| return false; |
| } |
| if (isCondBranchOpcode(LastOpc)) { |
| // Block ends with fall-through condbranch. |
| parseCondBranch(LastInst, TBB, Cond); |
| return false; |
| } |
| return true; // Can't handle indirect branch. |
| } |
| |
| // Get the instruction before it if it is a terminator. |
| MachineInstr *SecondLastInst = &*I; |
| unsigned SecondLastOpc = SecondLastInst->getOpcode(); |
| |
| // If AllowModify is true and the block ends with two or more unconditional |
| // branches, delete all but the first unconditional branch. |
| if (AllowModify && isUncondBranchOpcode(LastOpc)) { |
| while (isUncondBranchOpcode(SecondLastOpc)) { |
| LastInst->eraseFromParent(); |
| LastInst = SecondLastInst; |
| LastOpc = LastInst->getOpcode(); |
| if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { |
| // Return now the only terminator is an unconditional branch. |
| TBB = LastInst->getOperand(0).getMBB(); |
| return false; |
| } else { |
| SecondLastInst = &*I; |
| SecondLastOpc = SecondLastInst->getOpcode(); |
| } |
| } |
| } |
| |
| // If we're allowed to modify and the block ends in a unconditional branch |
| // which could simply fallthrough, remove the branch. (Note: This case only |
| // matters when we can't understand the whole sequence, otherwise it's also |
| // handled by BranchFolding.cpp.) |
| if (AllowModify && isUncondBranchOpcode(LastOpc) && |
| MBB.isLayoutSuccessor(getBranchDestBlock(*LastInst))) { |
| LastInst->eraseFromParent(); |
| LastInst = SecondLastInst; |
| LastOpc = LastInst->getOpcode(); |
| if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) { |
| assert(!isUncondBranchOpcode(LastOpc) && |
| "unreachable unconditional branches removed above"); |
| |
| if (isCondBranchOpcode(LastOpc)) { |
| // Block ends with fall-through condbranch. |
| parseCondBranch(LastInst, TBB, Cond); |
| return false; |
| } |
| return true; // Can't handle indirect branch. |
| } else { |
| SecondLastInst = &*I; |
| SecondLastOpc = SecondLastInst->getOpcode(); |
| } |
| } |
| |
| // If there are three terminators, we don't know what sort of block this is. |
| if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I)) |
| return true; |
| |
| // If the block ends with a B and a Bcc, handle it. |
| if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { |
| parseCondBranch(SecondLastInst, TBB, Cond); |
| FBB = LastInst->getOperand(0).getMBB(); |
| return false; |
| } |
| |
| // If the block ends with two unconditional branches, handle it. The second |
| // one is not executed, so remove it. |
| if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { |
| TBB = SecondLastInst->getOperand(0).getMBB(); |
| I = LastInst; |
| if (AllowModify) |
| I->eraseFromParent(); |
| return false; |
| } |
| |
| // ...likewise if it ends with an indirect branch followed by an unconditional |
| // branch. |
| if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) { |
| I = LastInst; |
| if (AllowModify) |
| I->eraseFromParent(); |
| return true; |
| } |
| |
| // Otherwise, can't handle this. |
| return true; |
| } |
| |
| bool AArch64InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB, |
| MachineBranchPredicate &MBP, |
| bool AllowModify) const { |
| // For the moment, handle only a block which ends with a cb(n)zx followed by |
| // a fallthrough. Why this? Because it is a common form. |
| // TODO: Should we handle b.cc? |
| |
| MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
| if (I == MBB.end()) |
| return true; |
| |
| // Skip over SpeculationBarrierEndBB terminators |
| if (I->getOpcode() == AArch64::SpeculationBarrierISBDSBEndBB || |
| I->getOpcode() == AArch64::SpeculationBarrierSBEndBB) { |
| --I; |
| } |
| |
| if (!isUnpredicatedTerminator(*I)) |
| return true; |
| |
| // Get the last instruction in the block. |
| MachineInstr *LastInst = &*I; |
| unsigned LastOpc = LastInst->getOpcode(); |
| if (!isCondBranchOpcode(LastOpc)) |
| return true; |
| |
| switch (LastOpc) { |
| default: |
| return true; |
| case AArch64::CBZW: |
| case AArch64::CBZX: |
| case AArch64::CBNZW: |
| case AArch64::CBNZX: |
| break; |
| }; |
| |
| MBP.TrueDest = LastInst->getOperand(1).getMBB(); |
| assert(MBP.TrueDest && "expected!"); |
| MBP.FalseDest = MBB.getNextNode(); |
| |
| MBP.ConditionDef = nullptr; |
| MBP.SingleUseCondition = false; |
| |
| MBP.LHS = LastInst->getOperand(0); |
| MBP.RHS = MachineOperand::CreateImm(0); |
| MBP.Predicate = LastOpc == AArch64::CBNZX ? MachineBranchPredicate::PRED_NE |
| : MachineBranchPredicate::PRED_EQ; |
| return false; |
| } |
| |
| bool AArch64InstrInfo::reverseBranchCondition( |
| SmallVectorImpl<MachineOperand> &Cond) const { |
| if (Cond[0].getImm() != -1) { |
| // Regular Bcc |
| AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm(); |
| Cond[0].setImm(AArch64CC::getInvertedCondCode(CC)); |
| } else { |
| // Folded compare-and-branch |
| switch (Cond[1].getImm()) { |
| default: |
| llvm_unreachable("Unknown conditional branch!"); |
| case AArch64::CBZW: |
| Cond[1].setImm(AArch64::CBNZW); |
| break; |
| case AArch64::CBNZW: |
| Cond[1].setImm(AArch64::CBZW); |
| break; |
| case AArch64::CBZX: |
| Cond[1].setImm(AArch64::CBNZX); |
| break; |
| case AArch64::CBNZX: |
| Cond[1].setImm(AArch64::CBZX); |
| break; |
| case AArch64::TBZW: |
| Cond[1].setImm(AArch64::TBNZW); |
| break; |
| case AArch64::TBNZW: |
| Cond[1].setImm(AArch64::TBZW); |
| break; |
| case AArch64::TBZX: |
| Cond[1].setImm(AArch64::TBNZX); |
| break; |
| case AArch64::TBNZX: |
| Cond[1].setImm(AArch64::TBZX); |
| break; |
| } |
| } |
| |
| return false; |
| } |
| |
| unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB, |
| int *BytesRemoved) const { |
| MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); |
| if (I == MBB.end()) |
| return 0; |
| |
| if (!isUncondBranchOpcode(I->getOpcode()) && |
| !isCondBranchOpcode(I->getOpcode())) |
| return 0; |
| |
| // Remove the branch. |
| I->eraseFromParent(); |
| |
| I = MBB.end(); |
| |
| if (I == MBB.begin()) { |
| if (BytesRemoved) |
| *BytesRemoved = 4; |
| return 1; |
| } |
| --I; |
| if (!isCondBranchOpcode(I->getOpcode())) { |
| if (BytesRemoved) |
| *BytesRemoved = 4; |
| return 1; |
| } |
| |
| // Remove the branch. |
| I->eraseFromParent(); |
| if (BytesRemoved) |
| *BytesRemoved = 8; |
| |
| return 2; |
| } |
| |
| void AArch64InstrInfo::instantiateCondBranch( |
| MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB, |
| ArrayRef<MachineOperand> Cond) const { |
| if (Cond[0].getImm() != -1) { |
| // Regular Bcc |
| BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB); |
| } else { |
| // Folded compare-and-branch |
| // Note that we use addOperand instead of addReg to keep the flags. |
| const MachineInstrBuilder MIB = |
| BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]); |
| if (Cond.size() > 3) |
| MIB.addImm(Cond[3].getImm()); |
| MIB.addMBB(TBB); |
| } |
| } |
| |
| unsigned AArch64InstrInfo::insertBranch( |
| MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, |
| ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const { |
| // Shouldn't be a fall through. |
| assert(TBB && "insertBranch must not be told to insert a fallthrough"); |
| |
| if (!FBB) { |
| if (Cond.empty()) // Unconditional branch? |
| BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB); |
| else |
| instantiateCondBranch(MBB, DL, TBB, Cond); |
| |
| if (BytesAdded) |
| *BytesAdded = 4; |
| |
| return 1; |
| } |
| |
| // Two-way conditional branch. |
| instantiateCondBranch(MBB, DL, TBB, Cond); |
| BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB); |
| |
| if (BytesAdded) |
| *BytesAdded = 8; |
| |
| return 2; |
| } |
| |
| // Find the original register that VReg is copied from. |
| static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) { |
| while (Register::isVirtualRegister(VReg)) { |
| const MachineInstr *DefMI = MRI.getVRegDef(VReg); |
| if (!DefMI->isFullCopy()) |
| return VReg; |
| VReg = DefMI->getOperand(1).getReg(); |
| } |
| return VReg; |
| } |
| |
| // Determine if VReg is defined by an instruction that can be folded into a |
| // csel instruction. If so, return the folded opcode, and the replacement |
| // register. |
| static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg, |
| unsigned *NewVReg = nullptr) { |
| VReg = removeCopies(MRI, VReg); |
| if (!Register::isVirtualRegister(VReg)) |
| return 0; |
| |
| bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg)); |
| const MachineInstr *DefMI = MRI.getVRegDef(VReg); |
| unsigned Opc = 0; |
| unsigned SrcOpNum = 0; |
| switch (DefMI->getOpcode()) { |
| case AArch64::ADDSXri: |
| case AArch64::ADDSWri: |
| // if NZCV is used, do not fold. |
| if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1) |
| return 0; |
| // fall-through to ADDXri and ADDWri. |
| LLVM_FALLTHROUGH; |
| case AArch64::ADDXri: |
| case AArch64::ADDWri: |
| // add x, 1 -> csinc. |
| if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 || |
| DefMI->getOperand(3).getImm() != 0) |
| return 0; |
| SrcOpNum = 1; |
| Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr; |
| break; |
| |
| case AArch64::ORNXrr: |
| case AArch64::ORNWrr: { |
| // not x -> csinv, represented as orn dst, xzr, src. |
| unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg()); |
| if (ZReg != AArch64::XZR && ZReg != AArch64::WZR) |
| return 0; |
| SrcOpNum = 2; |
| Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr; |
| break; |
| } |
| |
| case AArch64::SUBSXrr: |
| case AArch64::SUBSWrr: |
| // if NZCV is used, do not fold. |
| if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1) |
| return 0; |
| // fall-through to SUBXrr and SUBWrr. |
| LLVM_FALLTHROUGH; |
| case AArch64::SUBXrr: |
| case AArch64::SUBWrr: { |
| // neg x -> csneg, represented as sub dst, xzr, src. |
| unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg()); |
| if (ZReg != AArch64::XZR && ZReg != AArch64::WZR) |
| return 0; |
| SrcOpNum = 2; |
| Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr; |
| break; |
| } |
| default: |
| return 0; |
| } |
| assert(Opc && SrcOpNum && "Missing parameters"); |
| |
| if (NewVReg) |
| *NewVReg = DefMI->getOperand(SrcOpNum).getReg(); |
| return Opc; |
| } |
| |
| bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock &MBB, |
| ArrayRef<MachineOperand> Cond, |
| Register DstReg, Register TrueReg, |
| Register FalseReg, int &CondCycles, |
| int &TrueCycles, |
| int &FalseCycles) const { |
| // Check register classes. |
| const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
| const TargetRegisterClass *RC = |
| RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); |
| if (!RC) |
| return false; |
| |
| // Also need to check the dest regclass, in case we're trying to optimize |
| // something like: |
| // %1(gpr) = PHI %2(fpr), bb1, %(fpr), bb2 |
| if (!RI.getCommonSubClass(RC, MRI.getRegClass(DstReg))) |
| return false; |
| |
| // Expanding cbz/tbz requires an extra cycle of latency on the condition. |
| unsigned ExtraCondLat = Cond.size() != 1; |
| |
| // GPRs are handled by csel. |
| // FIXME: Fold in x+1, -x, and ~x when applicable. |
| if (AArch64::GPR64allRegClass.hasSubClassEq(RC) || |
| AArch64::GPR32allRegClass.hasSubClassEq(RC)) { |
| // Single-cycle csel, csinc, csinv, and csneg. |
| CondCycles = 1 + ExtraCondLat; |
| TrueCycles = FalseCycles = 1; |
| if (canFoldIntoCSel(MRI, TrueReg)) |
| TrueCycles = 0; |
| else if (canFoldIntoCSel(MRI, FalseReg)) |
| FalseCycles = 0; |
| return true; |
| } |
| |
| // Scalar floating point is handled by fcsel. |
| // FIXME: Form fabs, fmin, and fmax when applicable. |
| if (AArch64::FPR64RegClass.hasSubClassEq(RC) || |
| AArch64::FPR32RegClass.hasSubClassEq(RC)) { |
| CondCycles = 5 + ExtraCondLat; |
| TrueCycles = FalseCycles = 2; |
| return true; |
| } |
| |
| // Can't do vectors. |
| return false; |
| } |
| |
| void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB, |
| MachineBasicBlock::iterator I, |
| const DebugLoc &DL, Register DstReg, |
| ArrayRef<MachineOperand> Cond, |
| Register TrueReg, Register FalseReg) const { |
| MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); |
| |
| // Parse the condition code, see parseCondBranch() above. |
| AArch64CC::CondCode CC; |
| switch (Cond.size()) { |
| default: |
| llvm_unreachable("Unknown condition opcode in Cond"); |
| case 1: // b.cc |
| CC = AArch64CC::CondCode(Cond[0].getImm()); |
| break; |
| case 3: { // cbz/cbnz |
| // We must insert a compare against 0. |
| bool Is64Bit; |
| switch (Cond[1].getImm()) { |
| default: |
| llvm_unreachable("Unknown branch opcode in Cond"); |
| case AArch64::CBZW: |
| Is64Bit = false; |
| CC = AArch64CC::EQ; |
| break; |
| case AArch64::CBZX: |
| Is64Bit = true; |
| CC = AArch64CC::EQ; |
| break; |
| case AArch64::CBNZW: |
| Is64Bit = false; |
| CC = AArch64CC::NE; |
| break; |
| case AArch64::CBNZX: |
| Is64Bit = true; |
| CC = AArch64CC::NE; |
| break; |
| } |
| Register SrcReg = Cond[2].getReg(); |
| if (Is64Bit) { |
| // cmp reg, #0 is actually subs xzr, reg, #0. |
| MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass); |
| BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR) |
| .addReg(SrcReg) |
| .addImm(0) |
| .addImm(0); |
| } else { |
| MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass); |
| BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR) |
| .addReg(SrcReg) |
| .addImm(0) |
| .addImm(0); |
| } |
| break; |
| } |
| case 4: { // tbz/tbnz |
| // We must insert a tst instruction. |
| switch (Cond[1].getImm()) { |
| default: |
| llvm_unreachable("Unknown branch opcode in Cond"); |
| case AArch64::TBZW: |
| case AArch64::TBZX: |
| CC = AArch64CC::EQ; |
| break; |
| case AArch64::TBNZW: |
| case AArch64::TBNZX: |
| CC = AArch64CC::NE; |
| break; |
| } |
| // cmp reg, #foo is actually ands xzr, reg, #1<<foo. |
| if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW) |
| BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR) |
| .addReg(Cond[2].getReg()) |
| .addImm( |
| AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32)); |
| else |
| BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR) |
| .addReg(Cond[2].getReg()) |
| .addImm( |
| AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64)); |
| break; |
| } |
| } |
| |
| unsigned Opc = 0; |
| const TargetRegisterClass *RC = nullptr; |
| bool TryFold = false; |
| if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) { |
| RC = &AArch64::GPR64RegClass; |
| Opc = AArch64::CSELXr; |
| TryFold = true; |
| } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) { |
| RC = &AArch64::GPR32RegClass; |
| Opc = AArch64::CSELWr; |
| TryFold = true; |
| } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) { |
| RC = &AArch64::FPR64RegClass; |
| Opc = AArch64::FCSELDrrr; |
| } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) { |
| RC = &AArch64::FPR32RegClass; |
| Opc = AArch64::FCSELSrrr; |
| } |
| assert(RC && "Unsupported regclass"); |
| |
| // Try folding simple instructions into the csel. |
| if (TryFold) { |
| unsigned NewVReg = 0; |
| unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg); |
| if (FoldedOpc) { |
| // The folded opcodes csinc, csinc and csneg apply the operation to |
| // FalseReg, so we need to invert the condition. |
| CC = AArch64CC::getInvertedCondCode(CC); |
| TrueReg = FalseReg; |
| } else |
| FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg); |
| |
| // Fold the operation. Leave any dead instructions for DCE to clean up. |
| if (FoldedOpc) { |
| FalseReg = NewVReg; |
| Opc = FoldedOpc; |
| // The extends the live range of NewVReg. |
| MRI.clearKillFlags(NewVReg); |
| } |
| } |
| |
| // Pull all virtual register into the appropriate class. |
| MRI.constrainRegClass(TrueReg, RC); |
| MRI.constrainRegClass(FalseReg, RC); |
| |
| // Insert the csel. |
| BuildMI(MBB, I, DL, get(Opc), DstReg) |
| .addReg(TrueReg) |
| .addReg(FalseReg) |
| .addImm(CC); |
| } |
| |
| /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx. |
| static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize) { |
| uint64_t Imm = MI.getOperand(1).getImm(); |
| uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize); |
| uint64_t Encoding; |
| return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding); |
| } |
| |
| // FIXME: this implementation should be micro-architecture dependent, so a |
| // micro-architecture target hook should be introduced here in future. |
| bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const { |
| if (!Subtarget.hasCustomCheapAsMoveHandling()) |
| return MI.isAsCheapAsAMove(); |
| |
| const unsigned Opcode = MI.getOpcode(); |
| |
| // Firstly, check cases gated by features. |
| |
| if (Subtarget.hasZeroCycleZeroingFP()) { |
| if (Opcode == AArch64::FMOVH0 || |
| Opcode == AArch64::FMOVS0 || |
| Opcode == AArch64::FMOVD0) |
| return true; |
| } |
| |
| if (Subtarget.hasZeroCycleZeroingGP()) { |
| if (Opcode == TargetOpcode::COPY && |
| (MI.getOperand(1).getReg() == AArch64::WZR || |
| MI.getOperand(1).getReg() == AArch64::XZR)) |
| return true; |
| } |
| |
| // Secondly, check cases specific to sub-targets. |
| |
| if (Subtarget.hasExynosCheapAsMoveHandling()) { |
| if (isExynosCheapAsMove(MI)) |
| return true; |
| |
| return MI.isAsCheapAsAMove(); |
| } |
| |
| // Finally, check generic cases. |
| |
| switch (Opcode) { |
| default: |
| return false; |
| |
| // add/sub on register without shift |
| case AArch64::ADDWri: |
| case AArch64::ADDXri: |
| case AArch64::SUBWri: |
| case AArch64::SUBXri: |
| return (MI.getOperand(3).getImm() == 0); |
| |
| // logical ops on immediate |
| case AArch64::ANDWri: |
| case AArch64::ANDXri: |
| case AArch64::EORWri: |
| case AArch64::EORXri: |
| case AArch64::ORRWri: |
| case AArch64::ORRXri: |
| return true; |
| |
| // logical ops on register without shift |
| case AArch64::ANDWrr: |
| case AArch64::ANDXrr: |
| case AArch64::BICWrr: |
| case AArch64::BICXrr: |
| case AArch64::EONWrr: |
| case AArch64::EONXrr: |
| case AArch64::EORWrr: |
| case AArch64::EORXrr: |
| case AArch64::ORNWrr: |
| case AArch64::ORNXrr: |
| case AArch64::ORRWrr: |
| case AArch64::ORRXrr: |
| return true; |
| |
| // If MOVi32imm or MOVi64imm can be expanded into ORRWri or |
| // ORRXri, it is as cheap as MOV |
| case AArch64::MOVi32imm: |
| return canBeExpandedToORR(MI, 32); |
| case AArch64::MOVi64imm: |
| return canBeExpandedToORR(MI, 64); |
| } |
| |
| llvm_unreachable("Unknown opcode to check as cheap as a move!"); |
| } |
| |
| bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr &MI) { |
| switch (MI.getOpcode()) { |
| default: |
| return false; |
| |
| case AArch64::ADDWrs: |
| case AArch64::ADDXrs: |
| case AArch64::ADDSWrs: |
| case AArch64::ADDSXrs: { |
| unsigned Imm = MI.getOperand(3).getImm(); |
| unsigned ShiftVal = AArch64_AM::getShiftValue(Imm); |
| if (ShiftVal == 0) |
| return true; |
| return AArch64_AM::getShiftType(Imm) == AArch64_AM::LSL && ShiftVal <= 5; |
| } |
| |
| case AArch64::ADDWrx: |
| case AArch64::ADDXrx: |
| case AArch64::ADDXrx64: |
| case AArch64::ADDSWrx: |
| case AArch64::ADDSXrx: |
| case AArch64::ADDSXrx64: { |
| unsigned Imm = MI.getOperand(3).getImm(); |
| switch (AArch64_AM::getArithExtendType(Imm)) { |
| default: |
| return false; |
| case AArch64_AM::UXTB: |
| case AArch64_AM::UXTH: |
| case AArch64_AM::UXTW: |
| case AArch64_AM::UXTX: |
| return AArch64_AM::getArithShiftValue(Imm) <= 4; |
| } |
| } |
| |
| case AArch64::SUBWrs: |
| case AArch64::SUBSWrs: { |
| unsigned Imm = MI.getOperand(3).getImm(); |
| unsigned ShiftVal = AArch64_AM::getShiftValue(Imm); |
| return ShiftVal == 0 || |
| (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 31); |
| } |
| |
| case AArch64::SUBXrs: |
| case AArch64::SUBSXrs: { |
| unsigned Imm = MI.getOperand(3).getImm(); |
| unsigned ShiftVal = AArch64_AM::getShiftValue(Imm); |
| return ShiftVal == 0 || |
| (AArch64_AM::getShiftType(Imm) == AArch64_AM::ASR && ShiftVal == 63); |
| } |
| |
| case AArch64::SUBWrx: |
| case AArch64::SUBXrx: |
| case AArch64::SUBXrx64: |
| case AArch64::SUBSWrx: |
| case AArch64::SUBSXrx: |
| case AArch64::SUBSXrx64: { |
| unsigned Imm = MI.getOperand(3).getImm(); |
| switch (AArch64_AM::getArithExtendType(Imm)) { |
| default: |
| return false; |
| case AArch64_AM::UXTB: |
| case AArch64_AM::UXTH: |
| case AArch64_AM::UXTW: |
| case AArch64_AM::UXTX: |
| return AArch64_AM::getArithShiftValue(Imm) == 0; |
| } |
| } |
| |
| case AArch64::LDRBBroW: |
| case AArch64::LDRBBroX: |
| case AArch64::LDRBroW: |
| case AArch64::LDRBroX: |
| case AArch64::LDRDroW: |
| case AArch64::LDRDroX: |
| case AArch64::LDRHHroW: |
| case AArch64::LDRHHroX: |
| case AArch64::LDRHroW: |
| case AArch64::LDRHroX: |
| case AArch64::LDRQroW: |
| case AArch64::LDRQroX: |
| case AArch64::LDRSBWroW: |
| case AArch64::LDRSBWroX: |
| case AArch64::LDRSBXroW: |
| case AArch64::LDRSBXroX: |
| case AArch64::LDRSHWroW: |
| case AArch64::LDRSHWroX: |
| case AArch64::LDRSHXroW: |
| case AArch64::LDRSHXroX: |
| case AArch64::LDRSWroW: |
| case AArch64::LDRSWroX: |
| case AArch64::LDRSroW: |
| case AArch64::LDRSroX: |
| case AArch64::LDRWroW: |
| case AArch64::LDRWroX: |
| case AArch64::LDRXroW: |
| case AArch64::LDRXroX: |
| case AArch64::PRFMroW: |
| case AArch64::PRFMroX: |
| case AArch64::STRBBroW: |
| case AArch64::STRBBroX: |
| case AArch64::STRBroW: |
| case AArch64::STRBroX: |
| case AArch64::STRDroW: |
| case AArch64::STRDroX: |
| case AArch64::STRHHroW: |
| case AArch64::STRHHroX: |
| case AArch64::STRHroW: |
| case AArch64::STRHroX: |
| case AArch64::STRQroW: |
| case AArch64::STRQroX: |
| case AArch64::STRSroW: |
| case AArch64::STRSroX: |
| case AArch64::STRWroW: |
| case AArch64::STRWroX: |
| case AArch64::STRXroW: |
| case AArch64::STRXroX: { |
| unsigned IsSigned = MI.getOperand(3).getImm(); |
| return !IsSigned; |
| } |
| } |
| } |
| |
| bool AArch64InstrInfo::isSEHInstruction(const MachineInstr &MI) { |
| unsigned Opc = MI.getOpcode(); |
| switch (Opc) { |
| default: |
| return false; |
| case AArch64::SEH_StackAlloc: |
| case AArch64::SEH_SaveFPLR: |
| case AArch64::SEH_SaveFPLR_X: |
| case AArch64::SEH_SaveReg: |
| case AArch64::SEH_SaveReg_X: |
| case AArch64::SEH_SaveRegP: |
| case AArch64::SEH_SaveRegP_X: |
| case AArch64::SEH_SaveFReg: |
| case AArch64::SEH_SaveFReg_X: |
| case AArch64::SEH_SaveFRegP: |
| case AArch64::SEH_SaveFRegP_X: |
| case AArch64::SEH_SetFP: |
| case AArch64::SEH_AddFP: |
| case AArch64::SEH_Nop: |
| case AArch64::SEH_PrologEnd: |
| case AArch64::SEH_EpilogStart: |
| case AArch64::SEH_EpilogEnd: |
| return true; |
| } |
| } |
| |
| bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, |
| Register &SrcReg, Register &DstReg, |
| unsigned &SubIdx) const { |
| switch (MI.getOpcode()) { |
| default: |
| return false; |
| case AArch64::SBFMXri: // aka sxtw |
| case AArch64::UBFMXri: // aka uxtw |
| // Check for the 32 -> 64 bit extension case, these instructions can do |
| // much more. |
| if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31) |
| return false; |
| // This is a signed or unsigned 32 -> 64 bit extension. |
| SrcReg = MI.getOperand(1).getReg(); |
| DstReg = MI.getOperand(0).getReg(); |
| SubIdx = AArch64::sub_32; |
| return true; |
| } |
| } |
| |
| bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint( |
| const MachineInstr &MIa, const MachineInstr &MIb) const { |
| const TargetRegisterInfo *TRI = &getRegisterInfo(); |
| const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr; |
| int64_t OffsetA = 0, OffsetB = 0; |
| unsigned WidthA = 0, WidthB = 0; |
| bool OffsetAIsScalable = false, OffsetBIsScalable = false; |
| |
| assert(MIa.mayLoadOrStore() && "MIa must be a load or store."); |
| assert(MIb.mayLoadOrStore() && "MIb must be a load or store."); |
| |
| if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() || |
| MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) |
| return false; |
| |
| // Retrieve the base, offset from the base and width. Width |
| // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If |
| // base are identical, and the offset of a lower memory access + |
| // the width doesn't overlap the offset of a higher memory access, |
| // then the memory accesses are different. |
| // If OffsetAIsScalable and OffsetBIsScalable are both true, they |
| // are assumed to have the same scale (vscale). |
| if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, OffsetAIsScalable, |
| WidthA, TRI) && |
| getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, OffsetBIsScalable, |
| WidthB, TRI)) { |
| if (BaseOpA->isIdenticalTo(*BaseOpB) && |
| OffsetAIsScalable == OffsetBIsScalable) { |
| int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; |
| int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; |
| int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; |
| if (LowOffset + LowWidth <= HighOffset) |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| bool AArch64InstrInfo::isSchedulingBoundary(const MachineInstr &MI, |
| const MachineBasicBlock *MBB, |
| const MachineFunction &MF) const { |
| if (TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF)) |
| return true; |
| switch (MI.getOpcode()) { |
| case AArch64::HINT: |
| // CSDB hints are scheduling barriers. |
| if (MI.getOperand(0).getImm() == 0x14) |
| return true; |
| break; |
| case AArch64::DSB: |
| case AArch64::ISB: |
| // DSB and ISB also are scheduling barriers. |
| return true; |
| default:; |
| } |
| return isSEHInstruction(MI); |
| } |
| |
| /// analyzeCompare - For a comparison instruction, return the source registers |
| /// in SrcReg and SrcReg2, and the value it compares against in CmpValue. |
| /// Return true if the comparison instruction can be analyzed. |
| bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, |
| Register &SrcReg2, int64_t &CmpMask, |
| int64_t &CmpValue) const { |
| // The first operand can be a frame index where we'd normally expect a |
| // register. |
| assert(MI.getNumOperands() >= 2 && "All AArch64 cmps should have 2 operands"); |
| if (!MI.getOperand(1).isReg()) |
| return false; |
| |
| switch (MI.getOpcode()) { |
| default: |
| break; |
| case AArch64::PTEST_PP: |
| SrcReg = MI.getOperand(0).getReg(); |
| SrcReg2 = MI.getOperand(1).getReg(); |
| // Not sure about the mask and value for now... |
| CmpMask = ~0; |
| CmpValue = 0; |
| return true; |
| case AArch64::SUBSWrr: |
| case AArch64::SUBSWrs: |
| case AArch64::SUBSWrx: |
| case AArch64::SUBSXrr: |
| case AArch64::SUBSXrs: |
| case AArch64::SUBSXrx: |
| case AArch64::ADDSWrr: |
| case AArch64::ADDSWrs: |
| case AArch64::ADDSWrx: |
| case AArch64::ADDSXrr: |
| case AArch64::ADDSXrs: |
| case AArch64::ADDSXrx: |
| // Replace SUBSWrr with SUBWrr if NZCV is not used. |
| SrcReg = MI.getOperand(1).getReg(); |
| SrcReg2 = MI.getOperand(2).getReg(); |
| CmpMask = ~0; |
| CmpValue = 0; |
| return true; |
| case AArch64::SUBSWri: |
| case AArch64::ADDSWri: |
| case AArch64::SUBSXri: |
| case AArch64::ADDSXri: |
| SrcReg = MI.getOperand(1).getReg(); |
| SrcReg2 = 0; |
| CmpMask = ~0; |
| CmpValue = MI.getOperand(2).getImm(); |
| return true; |
| case AArch64::ANDSWri: |
| case AArch64::ANDSXri: |
| // ANDS does not use the same encoding scheme as the others xxxS |
| // instructions. |
| SrcReg = MI.getOperand(1).getReg(); |
| SrcReg2 = 0; |
| CmpMask = ~0; |
| CmpValue = AArch64_AM::decodeLogicalImmediate( |
| MI.getOperand(2).getImm(), |
| MI.getOpcode() == AArch64::ANDSWri ? 32 : 64); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| static bool UpdateOperandRegClass(MachineInstr &Instr) { |
| MachineBasicBlock *MBB = Instr.getParent(); |
| assert(MBB && "Can't get MachineBasicBlock here"); |
| MachineFunction *MF = MBB->getParent(); |
| assert(MF && "Can't get MachineFunction here"); |
| const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); |
| const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); |
| MachineRegisterInfo *MRI = &MF->getRegInfo(); |
| |
| for (unsigned OpIdx = 0, EndIdx = Instr.getNumOperands(); OpIdx < EndIdx; |
| ++OpIdx) { |
| MachineOperand &MO = Instr.getOperand(OpIdx); |
| const TargetRegisterClass *OpRegCstraints = |
| Instr.getRegClassConstraint(OpIdx, TII, TRI); |
| |
| // If there's no constraint, there's nothing to do. |
| if (!OpRegCstraints) |
| continue; |
| // If the operand is a frame index, there's nothing to do here. |
| // A frame index operand will resolve correctly during PEI. |
| if (MO.isFI()) |
| continue; |
| |
| assert(MO.isReg() && |
| "Operand has register constraints without being a register!"); |
| |
| Register Reg = MO.getReg(); |
| if (Register::isPhysicalRegister(Reg)) { |
| if (!OpRegCstraints->contains(Reg)) |
| return false; |
| } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) && |
| !MRI->constrainRegClass(Reg, OpRegCstraints)) |
| return false; |
| } |
| |
| return true; |
| } |
| |
| /// Return the opcode that does not set flags when possible - otherwise |
| /// return the original opcode. The caller is responsible to do the actual |
| /// substitution and legality checking. |
| static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) { |
| // Don't convert all compare instructions, because for some the zero register |
| // encoding becomes the sp register. |
| bool MIDefinesZeroReg = false; |
| if (MI.definesRegister(AArch64::WZR) || MI.definesRegister(AArch64::XZR)) |
| MIDefinesZeroReg = true; |
| |
| switch (MI.getOpcode()) { |
| default: |
| return MI.getOpcode(); |
| case AArch64::ADDSWrr: |
| return AArch64::ADDWrr; |
| case AArch64::ADDSWri: |
| return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri; |
| case AArch64::ADDSWrs: |
| return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs; |
| case AArch64::ADDSWrx: |
| return AArch64::ADDWrx; |
| case AArch64::ADDSXrr: |
| return AArch64::ADDXrr; |
| case AArch64::ADDSXri: |
| return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri; |
| case AArch64::ADDSXrs: |
| return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs; |
| case AArch64::ADDSXrx: |
| return AArch64::ADDXrx; |
| case AArch64::SUBSWrr: |
| return AArch64::SUBWrr; |
| case AArch64::SUBSWri: |
| return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri; |
| case AArch64::SUBSWrs: |
| return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs; |
| case AArch64::SUBSWrx: |
| return AArch64::SUBWrx; |
| case AArch64::SUBSXrr: |
| return AArch64::SUBXrr; |
| case AArch64::SUBSXri: |
| return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri; |
| case AArch64::SUBSXrs: |
| return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs; |
| case AArch64::SUBSXrx: |
| return AArch64::SUBXrx; |
| } |
| } |
| |
| enum AccessKind { AK_Write = 0x01, AK_Read = 0x10, AK_All = 0x11 }; |
| |
| /// True when condition flags are accessed (either by writing or reading) |
| /// on the instruction trace starting at From and ending at To. |
| /// |
| /// Note: If From and To are from different blocks it's assumed CC are accessed |
| /// on the path. |
| static bool areCFlagsAccessedBetweenInstrs( |
| MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, |
| const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) { |
| // Early exit if To is at the beginning of the BB. |
| if (To == To->getParent()->begin()) |
| return true; |
| |
| // Check whether the instructions are in the same basic block |
| // If not, assume the condition flags might get modified somewhere. |
| if (To->getParent() != From->getParent()) |
| return true; |
| |
| // From must be above To. |
| assert(std::any_of( |
| ++To.getReverse(), To->getParent()->rend(), |
| [From](MachineInstr &MI) { return MI.getIterator() == From; })); |
| |
| // We iterate backward starting at \p To until we hit \p From. |
| for (const MachineInstr &Instr : |
| instructionsWithoutDebug(++To.getReverse(), From.getReverse())) { |
| if (((AccessToCheck & AK_Write) && |
| Instr.modifiesRegister(AArch64::NZCV, TRI)) || |
| ((AccessToCheck & AK_Read) && Instr.readsRegister(AArch64::NZCV, TRI))) |
| return true; |
| } |
| return false; |
| } |
| |
| /// optimizePTestInstr - Attempt to remove a ptest of a predicate-generating |
| /// operation which could set the flags in an identical manner |
| bool AArch64InstrInfo::optimizePTestInstr( |
| MachineInstr *PTest, unsigned MaskReg, unsigned PredReg, |
| const MachineRegisterInfo *MRI) const { |
| auto *Mask = MRI->getUniqueVRegDef(MaskReg); |
| auto *Pred = MRI->getUniqueVRegDef(PredReg); |
| auto NewOp = Pred->getOpcode(); |
| bool OpChanged = false; |
| |
| unsigned MaskOpcode = Mask->getOpcode(); |
| unsigned PredOpcode = Pred->getOpcode(); |
| bool PredIsPTestLike = isPTestLikeOpcode(PredOpcode); |
| bool PredIsWhileLike = isWhileOpcode(PredOpcode); |
| |
| if (isPTrueOpcode(MaskOpcode) && (PredIsPTestLike || PredIsWhileLike)) { |
| // For PTEST(PTRUE, OTHER_INST), PTEST is redundant when PTRUE doesn't |
| // deactivate any lanes OTHER_INST might set. |
| uint64_t MaskElementSize = getElementSizeForOpcode(MaskOpcode); |
| uint64_t PredElementSize = getElementSizeForOpcode(PredOpcode); |
| |
| // Must be an all active predicate of matching element size. |
| if ((PredElementSize != MaskElementSize) || |
| (Mask->getOperand(1).getImm() != 31)) |
| return false; |
| |
| // Fallthough to simply remove the PTEST. |
| } else if ((Mask == Pred) && (PredIsPTestLike || PredIsWhileLike)) { |
| // For PTEST(PG, PG), PTEST is redundant when PG is the result of an |
| // instruction that sets the flags as PTEST would. |
| |
| // Fallthough to simply remove the PTEST. |
| } else if (PredIsPTestLike) { |
| // For PTEST(PG_1, PTEST_LIKE(PG2, ...)), PTEST is redundant when both |
| // instructions use the same predicate. |
| auto PTestLikeMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg()); |
| if (Mask != PTestLikeMask) |
| return false; |
| |
| // Fallthough to simply remove the PTEST. |
| } else { |
| switch (Pred->getOpcode()) { |
| case AArch64::BRKB_PPzP: |
| case AArch64::BRKPB_PPzPP: { |
| // Op 0 is chain, 1 is the mask, 2 the previous predicate to |
| // propagate, 3 the new predicate. |
| |
| // Check to see if our mask is the same as the brkpb's. If |
| // not the resulting flag bits may be different and we |
| // can't remove the ptest. |
| auto *PredMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg()); |
| if (Mask != PredMask) |
| return false; |
| |
| // Switch to the new opcode |
| NewOp = Pred->getOpcode() == AArch64::BRKB_PPzP ? AArch64::BRKBS_PPzP |
| : AArch64::BRKPBS_PPzPP; |
| OpChanged = true; |
| break; |
| } |
| case AArch64::BRKN_PPzP: { |
| auto *PredMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg()); |
| if (Mask != PredMask) |
| return false; |
| |
| NewOp = AArch64::BRKNS_PPzP; |
| OpChanged = true; |
| break; |
| } |
| case AArch64::RDFFR_PPz: { |
| // rdffr p1.b, PredMask=p0/z <--- Definition of Pred |
| // ptest Mask=p0, Pred=p1.b <--- If equal masks, remove this and use |
| // `rdffrs p1.b, p0/z` above. |
| auto *PredMask = MRI->getUniqueVRegDef(Pred->getOperand(1).getReg()); |
| if (Mask != PredMask) |
| return false; |
| |
| NewOp = AArch64::RDFFRS_PPz; |
| OpChanged = true; |
| break; |
| } |
| default: |
| // Bail out if we don't recognize the input |
| return false; |
| } |
| } |
| |
| const TargetRegisterInfo *TRI = &getRegisterInfo(); |
| |
| // If another instruction between Pred and PTest accesses flags, don't remove |
| // the ptest or update the earlier instruction to modify them. |
| if (areCFlagsAccessedBetweenInstrs(Pred, PTest, TRI)) |
| return false; |
| |
| // If we pass all the checks, it's safe to remove the PTEST and use the flags |
| // as they are prior to PTEST. Sometimes this requires the tested PTEST |
| // operand to be replaced with an equivalent instruction that also sets the |
| // flags. |
| Pred->setDesc(get(NewOp)); |
| PTest->eraseFromParent(); |
| if (OpChanged) { |
| bool succeeded = UpdateOperandRegClass(*Pred); |
| (void)succeeded; |
| assert(succeeded && "Operands have incompatible register classes!"); |
| Pred->addRegisterDefined(AArch64::NZCV, TRI); |
| } |
| |
| // Ensure that the flags def is live. |
| if (Pred->registerDefIsDead(AArch64::NZCV, TRI)) { |
| unsigned i = 0, e = Pred->getNumOperands(); |
| for (; i != e; ++i) { |
| MachineOperand &MO = Pred->getOperand(i); |
| if (MO.isReg() && MO.isDef() && MO.getReg() == AArch64::NZCV) { |
| MO.setIsDead(false); |
| break; |
| } |
| } |
| } |
| return true; |
| } |
| |
| /// Try to optimize a compare instruction. A compare instruction is an |
| /// instruction which produces AArch64::NZCV. It can be truly compare |
| /// instruction |
| /// when there are no uses of its destination register. |
| /// |
| /// The following steps are tried in order: |
| /// 1. Convert CmpInstr into an unconditional version. |
| /// 2. Remove CmpInstr if above there is an instruction producing a needed |
| /// condition code or an instruction which can be converted into such an |
| /// instruction. |
| /// Only comparison with zero is supported. |
| bool AArch64InstrInfo::optimizeCompareInstr( |
| MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, |
| int64_t CmpValue, const MachineRegisterInfo *MRI) const { |
| assert(CmpInstr.getParent()); |
| assert(MRI); |
| |
| // Replace SUBSWrr with SUBWrr if NZCV is not used. |
| int DeadNZCVIdx = CmpInstr.findRegisterDefOperandIdx(AArch64::NZCV, true); |
| if (DeadNZCVIdx != -1) { |
| if (CmpInstr.definesRegister(AArch64::WZR) || |
| CmpInstr.definesRegister(AArch64::XZR)) { |
| CmpInstr.eraseFromParent(); |
| return true; |
| } |
| unsigned Opc = CmpInstr.getOpcode(); |
| unsigned NewOpc = convertToNonFlagSettingOpc(CmpInstr); |
| if (NewOpc == Opc) |
| return false; |
| const MCInstrDesc &MCID = get(NewOpc); |
| CmpInstr.setDesc(MCID); |
| CmpInstr.RemoveOperand(DeadNZCVIdx); |
| bool succeeded = UpdateOperandRegClass(CmpInstr); |
| (void)succeeded; |
| assert(succeeded && "Some operands reg class are incompatible!"); |
| return true; |
| } |
| |
| if (CmpInstr.getOpcode() == AArch64::PTEST_PP) |
| return optimizePTestInstr(&CmpInstr, SrcReg, SrcReg2, MRI); |
| |
| if (SrcReg2 != 0) |
| return false; |
| |
| // CmpInstr is a Compare instruction if destination register is not used. |
| if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg())) |
| return false; |
| |
| if (CmpValue == 0 && substituteCmpToZero(CmpInstr, SrcReg, *MRI)) |
| return true; |
| return (CmpValue == 0 || CmpValue == 1) && |
| removeCmpToZeroOrOne(CmpInstr, SrcReg, CmpValue, *MRI); |
| } |
| |
| /// Get opcode of S version of Instr. |
| /// If Instr is S version its opcode is returned. |
| /// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version |
| /// or we are not interested in it. |
| static unsigned sForm(MachineInstr &Instr) { |
| switch (Instr.getOpcode()) { |
| default: |
| return AArch64::INSTRUCTION_LIST_END; |
| |
| case AArch64::ADDSWrr: |
| case AArch64::ADDSWri: |
| case AArch64::ADDSXrr: |
| case AArch64::ADDSXri: |
| case AArch64::SUBSWrr: |
| case AArch64::SUBSWri: |
| case AArch64::SUBSXrr: |
| case AArch64::SUBSXri: |
| return Instr.getOpcode(); |
| |
| case AArch64::ADDWrr: |
| return AArch64::ADDSWrr; |
| case AArch64::ADDWri: |
| return AArch64::ADDSWri; |
| case AArch64::ADDXrr: |
| return AArch64::ADDSXrr; |
| case AArch64::ADDXri: |
| return AArch64::ADDSXri; |
| case AArch64::ADCWr: |
| return AArch64::ADCSWr; |
| case AArch64::ADCXr: |
| return AArch64::ADCSXr; |
| case AArch64::SUBWrr: |
| return AArch64::SUBSWrr; |
| case AArch64::SUBWri: |
| return AArch64::SUBSWri; |
| case AArch64::SUBXrr: |
| return AArch64::SUBSXrr; |
| case AArch64::SUBXri: |
| return AArch64::SUBSXri; |
| case AArch64::SBCWr: |
| return AArch64::SBCSWr; |
| case AArch64::SBCXr: |
| return AArch64::SBCSXr; |
| case AArch64::ANDWri: |
| return AArch64::ANDSWri; |
| case AArch64::ANDXri: |
| return AArch64::ANDSXri; |
| } |
| } |
| |
| /// Check if AArch64::NZCV should be alive in successors of MBB. |
| static bool areCFlagsAliveInSuccessors(const MachineBasicBlock *MBB) { |
| for (auto *BB : MBB->successors()) |
| if (BB->isLiveIn(AArch64::NZCV)) |
| return true; |
| return false; |
| } |
| |
| /// \returns The condition code operand index for \p Instr if it is a branch |
| /// or select and -1 otherwise. |
| static int |
| findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr &Instr) { |
| switch (Instr.getOpcode()) { |
| default: |
| return -1; |
| |
| case AArch64::Bcc: { |
| int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV); |
| assert(Idx >= 2); |
| return Idx - 2; |
| } |
| |
| case AArch64::CSINVWr: |
| case AArch64::CSINVXr: |
| case AArch64::CSINCWr: |
| case AArch64::CSINCXr: |
| case AArch64::CSELWr: |
| case AArch64::CSELXr: |
| case AArch64::CSNEGWr: |
| case AArch64::CSNEGXr: |
| case AArch64::FCSELSrrr: |
| case AArch64::FCSELDrrr: { |
| int Idx = Instr.findRegisterUseOperandIdx(AArch64::NZCV); |
| assert(Idx >= 1); |
| return Idx - 1; |
| } |
| } |
| } |
| |
| namespace { |
| |
| struct UsedNZCV { |
| bool N = false; |
| bool Z = false; |
| bool C = false; |
| bool V = false; |
| |
| UsedNZCV() = default; |
| |
| UsedNZCV &operator|=(const UsedNZCV &UsedFlags) { |
| this->N |= UsedFlags.N; |
| this->Z |= UsedFlags.Z; |
| this->C |= UsedFlags.C; |
| this->V |= UsedFlags.V; |
| return *this; |
| } |
| }; |
| |
| } // end anonymous namespace |
| |
| /// Find a condition code used by the instruction. |
| /// Returns AArch64CC::Invalid if either the instruction does not use condition |
| /// codes or we don't optimize CmpInstr in the presence of such instructions. |
| static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) { |
| int CCIdx = findCondCodeUseOperandIdxForBranchOrSelect(Instr); |
| return CCIdx >= 0 ? static_cast<AArch64CC::CondCode>( |
| Instr.getOperand(CCIdx).getImm()) |
| : AArch64CC::Invalid; |
| } |
| |
| static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) { |
| assert(CC != AArch64CC::Invalid); |
| UsedNZCV UsedFlags; |
| switch (CC) { |
| default: |
| break; |
| |
| case AArch64CC::EQ: // Z set |
| case AArch64CC::NE: // Z clear |
| UsedFlags.Z = true; |
| break; |
| |
| case AArch64CC::HI: // Z clear and C set |
| case AArch64CC::LS: // Z set or C clear |
| UsedFlags.Z = true; |
| LLVM_FALLTHROUGH; |
| case AArch64CC::HS: // C set |
| case AArch64CC::LO: // C clear |
| UsedFlags.C = true; |
| break; |
| |
| case AArch64CC::MI: // N set |
| case AArch64CC::PL: // N clear |
| UsedFlags.N = true; |
| break; |
| |
| case AArch64CC::VS: // V set |
| case AArch64CC::VC: // V clear |
| UsedFlags.V = true; |
| break; |
| |
| case AArch64CC::GT: // Z clear, N and V the same |
| case AArch64CC::LE: // Z set, N and V differ |
| UsedFlags.Z = true; |
| LLVM_FALLTHROUGH; |
| case AArch64CC::GE: // N and V the same |
| case AArch64CC::LT: // N and V differ |
| UsedFlags.N = true; |
| UsedFlags.V = true; |
| break; |
| } |
| return UsedFlags; |
| } |
| |
| /// \returns Conditions flags used after \p CmpInstr in its MachineBB if they |
| /// are not containing C or V flags and NZCV flags are not alive in successors |
| /// of the same \p CmpInstr and \p MI parent. \returns None otherwise. |
| /// |
| /// Collect instructions using that flags in \p CCUseInstrs if provided. |
| static Optional<UsedNZCV> |
| examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr, |
| const TargetRegisterInfo &TRI, |
| SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr) { |
| MachineBasicBlock *CmpParent = CmpInstr.getParent(); |
| if (MI.getParent() != CmpParent) |
| return None; |
| |
| if (areCFlagsAliveInSuccessors(CmpParent)) |
| return None; |
| |
| UsedNZCV NZCVUsedAfterCmp; |
| for (MachineInstr &Instr : instructionsWithoutDebug( |
| std::next(CmpInstr.getIterator()), CmpParent->instr_end())) { |
| if (Instr.readsRegister(AArch64::NZCV, &TRI)) { |
| AArch64CC::CondCode CC = findCondCodeUsedByInstr(Instr); |
| if (CC == AArch64CC::Invalid) // Unsupported conditional instruction |
| return None; |
| NZCVUsedAfterCmp |= getUsedNZCV(CC); |
| if (CCUseInstrs) |
| CCUseInstrs->push_back(&Instr); |
| } |
| if (Instr.modifiesRegister(AArch64::NZCV, &TRI)) |
| break; |
| } |
| if (NZCVUsedAfterCmp.C || NZCVUsedAfterCmp.V) |
| return None; |
| return NZCVUsedAfterCmp; |
| } |
| |
| static bool isADDSRegImm(unsigned Opcode) { |
| return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri; |
| } |
| |
| static bool isSUBSRegImm(unsigned Opcode) { |
| return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri; |
| } |
| |
| /// Check if CmpInstr can be substituted by MI. |
| /// |
| /// CmpInstr can be substituted: |
| /// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0' |
| /// - and, MI and CmpInstr are from the same MachineBB |
| /// - and, condition flags are not alive in successors of the CmpInstr parent |
| /// - and, if MI opcode is the S form there must be no defs of flags between |
| /// MI and CmpInstr |
| /// or if MI opcode is not the S form there must be neither defs of flags |
| /// nor uses of flags between MI and CmpInstr. |
| /// - and C/V flags are not used after CmpInstr |
| static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr, |
| const TargetRegisterInfo &TRI) { |
| assert(sForm(MI) != AArch64::INSTRUCTION_LIST_END); |
| |
| const unsigned CmpOpcode = CmpInstr.getOpcode(); |
| if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode)) |
| return false; |
| |
| if (!examineCFlagsUse(MI, CmpInstr, TRI)) |
| return false; |
| |
| AccessKind AccessToCheck = AK_Write; |
| if (sForm(MI) != MI.getOpcode()) |
| AccessToCheck = AK_All; |
| return !areCFlagsAccessedBetweenInstrs(&MI, &CmpInstr, &TRI, AccessToCheck); |
| } |
| |
| /// Substitute an instruction comparing to zero with another instruction |
| /// which produces needed condition flags. |
| /// |
| /// Return true on success. |
| bool AArch64InstrInfo::substituteCmpToZero( |
| MachineInstr &CmpInstr, unsigned SrcReg, |
| const MachineRegisterInfo &MRI) const { |
| // Get the unique definition of SrcReg. |
| MachineInstr *MI = MRI.getUniqueVRegDef(SrcReg); |
| if (!MI) |
| return false; |
| |
| const TargetRegisterInfo &TRI = getRegisterInfo(); |
| |
| unsigned NewOpc = sForm(*MI); |
| if (NewOpc == AArch64::INSTRUCTION_LIST_END) |
| return false; |
| |
| if (!canInstrSubstituteCmpInstr(*MI, CmpInstr, TRI)) |
| return false; |
| |
| // Update the instruction to set NZCV. |
| MI->setDesc(get(NewOpc)); |
| CmpInstr.eraseFromParent(); |
| bool succeeded = UpdateOperandRegClass(*MI); |
| (void)succeeded; |
| assert(succeeded && "Some operands reg class are incompatible!"); |
| MI->addRegisterDefined(AArch64::NZCV, &TRI); |
| return true; |
| } |
| |
| /// \returns True if \p CmpInstr can be removed. |
| /// |
| /// \p IsInvertCC is true if, after removing \p CmpInstr, condition |
| /// codes used in \p CCUseInstrs must be inverted. |
| static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr, |
| int CmpValue, const TargetRegisterInfo &TRI, |
| SmallVectorImpl<MachineInstr *> &CCUseInstrs, |
| bool &IsInvertCC) { |
| assert((CmpValue == 0 || CmpValue == 1) && |
| "Only comparisons to 0 or 1 considered for removal!"); |
| |
| // MI is 'CSINCWr %vreg, wzr, wzr, <cc>' or 'CSINCXr %vreg, xzr, xzr, <cc>' |
| unsigned MIOpc = MI.getOpcode(); |
| if (MIOpc == AArch64::CSINCWr) { |
| if (MI.getOperand(1).getReg() != AArch64::WZR || |
| MI.getOperand(2).getReg() != AArch64::WZR) |
| return false; |
| } else if (MIOpc == AArch64::CSINCXr) { |
| if (MI.getOperand(1).getReg() != AArch64::XZR || |
| MI.getOperand(2).getReg() != AArch64::XZR) |
| return false; |
| } else { |
| return false; |
| } |
| AArch64CC::CondCode MICC = findCondCodeUsedByInstr(MI); |
| if (MICC == AArch64CC::Invalid) |
| return false; |
| |
| // NZCV needs to be defined |
| if (MI.findRegisterDefOperandIdx(AArch64::NZCV, true) != -1) |
| return false; |
| |
| // CmpInstr is 'ADDS %vreg, 0' or 'SUBS %vreg, 0' or 'SUBS %vreg, 1' |
| const unsigned CmpOpcode = CmpInstr.getOpcode(); |
| bool IsSubsRegImm = isSUBSRegImm(CmpOpcode); |
| if (CmpValue && !IsSubsRegImm) |
| return false; |
| if (!CmpValue && !IsSubsRegImm && !isADDSRegImm(CmpOpcode)) |
| return false; |
| |
| // MI conditions allowed: eq, ne, mi, pl |
| UsedNZCV MIUsedNZCV = getUsedNZCV(MICC); |
| if (MIUsedNZCV.C || MIUsedNZCV.V) |
| return false; |
| |
| Optional<UsedNZCV> NZCVUsedAfterCmp = |
| examineCFlagsUse(MI, CmpInstr, TRI, &CCUseInstrs); |
| // Condition flags are not used in CmpInstr basic block successors and only |
| // Z or N flags allowed to be used after CmpInstr within its basic block |
| if (!NZCVUsedAfterCmp) |
| return false; |
| // Z or N flag used after CmpInstr must correspond to the flag used in MI |
| if ((MIUsedNZCV.Z && NZCVUsedAfterCmp->N) || |
| (MIUsedNZCV.N && NZCVUsedAfterCmp->Z)) |
| return false; |
| // If CmpInstr is comparison to zero MI conditions are limited to eq, ne |
| if (MIUsedNZCV.N && !CmpValue) |
| return false; |
| |
| // There must be no defs of flags between MI and CmpInstr |
| if (areCFlagsAccessedBetweenInstrs(&MI, &CmpInstr, &TRI, AK_Write)) |
| return false; |
| |
| // Condition code is inverted in the following cases: |
| // 1. MI condition is ne; CmpInstr is 'ADDS %vreg, 0' or 'SUBS %vreg, 0' |
| // 2. MI condition is eq, pl; CmpInstr is 'SUBS %vreg, 1' |
| IsInvertCC = (CmpValue && (MICC == AArch64CC::EQ || MICC == AArch64CC::PL)) || |
| (!CmpValue && MICC == AArch64CC::NE); |
| return true; |
| } |
| |
| /// Remove comparision in csinc-cmp sequence |
| /// |
| /// Examples: |
| /// 1. \code |
| /// csinc w9, wzr, wzr, ne |
| /// cmp w9, #0 |
| /// b.eq |
| /// \endcode |
| /// to |
| /// \code |
| /// csinc w9, wzr, wzr, ne |
| /// b.ne |
| /// \endcode |
| /// |
| /// 2. \code |
| /// csinc x2, xzr, xzr, mi |
| /// cmp x2, #1 |
| /// b.pl |
| /// \endcode |
| /// to |
| /// \code |
| /// csinc x2, xzr, xzr, mi |
| /// b.pl |
| /// \endcode |
| /// |
| /// \param CmpInstr comparison instruction |
| /// \return True when comparison removed |
| bool AArch64InstrInfo::removeCmpToZeroOrOne( |
| MachineInstr &CmpInstr, unsigned SrcReg, int CmpValue, |
| const MachineRegisterInfo &MRI) const { |
| MachineInstr *MI = MRI.getUniqueVRegDef(SrcReg); |
| if (!MI) |
| return false; |
| const TargetRegisterInfo &TRI = getRegisterInfo(); |
| SmallVector<MachineInstr *, 4> CCUseInstrs; |
| bool IsInvertCC = false; |
| if (!canCmpInstrBeRemoved(*MI, CmpInstr, CmpValue, TRI, CCUseInstrs, |
| IsInvertCC)) |
| return false; |
| // Make transformation |
| CmpInstr.eraseFromParent(); |
| if (IsInvertCC) { |
| // Invert condition codes in CmpInstr CC users |
| for (MachineInstr *CCUseInstr : CCUseInstrs) { |
| int Idx = findCondCodeUseOperandIdxForBranchOrSelect(*CCUseInstr); |
| assert(Idx >= 0 && "Unexpected instruction using CC."); |
| MachineOperand &CCOperand = CCUseInstr->getOperand(Idx); |
| AArch64CC::CondCode CCUse = AArch64CC::getInvertedCondCode( |
| static_cast<AArch64CC::CondCode>(CCOperand.getImm())); |
| CCOperand.setImm(CCUse); |
| } |
| } |
| return true; |
| } |
| |
| bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { |
| if (MI.getOpcode() != TargetOpcode::LOAD_STACK_GUARD && |
| MI.getOpcode() != AArch64::CATCHRET) |
| return false; |
| |
| MachineBasicBlock &MBB = *MI.getParent(); |
| auto &Subtarget = MBB.getParent()->getSubtarget<AArch64Subtarget>(); |
| auto TRI = Subtarget.getRegisterInfo(); |
| DebugLoc DL = MI.getDebugLoc(); |
| |
| if (MI.getOpcode() == AArch64::CATCHRET) { |
| // Skip to the first instruction before the epilog. |
| const TargetInstrInfo *TII = |
| MBB.getParent()->getSubtarget().getInstrInfo(); |
| MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB(); |
| auto MBBI = MachineBasicBlock::iterator(MI); |
| MachineBasicBlock::iterator FirstEpilogSEH = std::prev(MBBI); |
| while (FirstEpilogSEH->getFlag(MachineInstr::FrameDestroy) && |
| FirstEpilogSEH != MBB.begin()) |
| FirstEpilogSEH = std::prev(FirstEpilogSEH); |
| if (FirstEpilogSEH != MBB.begin()) |
| FirstEpilogSEH = std::next(FirstEpilogSEH); |
| BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADRP)) |
| .addReg(AArch64::X0, RegState::Define) |
| .addMBB(TargetMBB); |
| BuildMI(MBB, FirstEpilogSEH, DL, TII->get(AArch64::ADDXri)) |
| .addReg(AArch64::X0, RegState::Define) |
| .addReg(AArch64::X0) |
| .addMBB(TargetMBB) |
| .addImm(0); |
| return true; |
| } |
| |
| Register Reg = MI.getOperand(0).getReg(); |
| Module &M = *MBB.getParent()->getFunction().getParent(); |
| if (M.getStackProtectorGuard() == "sysreg") { |
| const AArch64SysReg::SysReg *SrcReg = |
| AArch64SysReg::lookupSysRegByName(M.getStackProtectorGuardReg()); |
| if (!SrcReg) |
| report_fatal_error("Unknown SysReg for Stack Protector Guard Register"); |
| |
| // mrs xN, sysreg |
| BuildMI(MBB, MI, DL, get(AArch64::MRS)) |
| .addDef(Reg, RegState::Renamable) |
| .addImm(SrcReg->Encoding); |
| int Offset = M.getStackProtectorGuardOffset(); |
| if (Offset >= 0 && Offset <= 32760 && Offset % 8 == 0) { |
| // ldr xN, [xN, #offset] |
| BuildMI(MBB, MI, DL, get(AArch64::LDRXui)) |
| .addDef(Reg) |
| .addUse(Reg, RegState::Kill) |
| .addImm(Offset / 8); |
| } else if (Offset >= -256 && Offset <= 255) { |
| // ldur xN, [xN, #offset] |
| BuildMI(MBB, MI, DL, get(AArch64::LDURXi)) |
| .addDef(Reg) |
| .addUse(Reg, RegState::Kill) |
| .addImm(Offset); |
| } else if (Offset >= -4095 && Offset <= 4095) { |
| if (Offset > 0) { |
| // add xN, xN, #offset |
| BuildMI(MBB, MI, DL, get(AArch64::ADDXri)) |
| .addDef(Reg) |
| .addUse(Reg, RegState::Kill) |
| .addImm(Offset) |
| .addImm(0); |
| } else { |
| // sub xN, xN, #offset |
| BuildMI(MBB, MI, DL, get(AArch64::SUBXri)) |
| .addDef(Reg) |
| .addUse(Reg, RegState::Kill) |
| .addImm(-Offset) |
| .addImm(0); |
| } |
| // ldr xN, [xN] |
| BuildMI(MBB, MI, DL, get(AArch64::LDRXui)) |
| .addDef(Reg) |
| .addUse(Reg, RegState::Kill) |
| .addImm(0); |
| } else { |
| // Cases that are larger than +/- 4095 and not a multiple of 8, or larger |
| // than 23760. |
| // It might be nice to use AArch64::MOVi32imm here, which would get |
| // expanded in PreSched2 after PostRA, but our lone scratch Reg already |
| // contains the MRS result. findScratchNonCalleeSaveRegister() in |
| // AArch64FrameLowering might help us find such a scratch register |
| // though. If we failed to find a scratch register, we could emit a |
| // stream of add instructions to build up the immediate. Or, we could try |
| // to insert a AArch64::MOVi32imm before register allocation so that we |
| // didn't need to scavenge for a scratch register. |
| report_fatal_error("Unable to encode Stack Protector Guard Offset"); |
| } |
| MBB.erase(MI); |
| return true; |
| } |
| |
| const GlobalValue *GV = |
| cast<GlobalValue>((*MI.memoperands_begin())->getValue()); |
| const TargetMachine &TM = MBB.getParent()->getTarget(); |
| unsigned OpFlags = Subtarget.ClassifyGlobalReference(GV, TM); |
| const unsigned char MO_NC = AArch64II::MO_NC; |
| |
| if ((OpFlags & AArch64II::MO_GOT) != 0) { |
| BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg) |
| .addGlobalAddress(GV, 0, OpFlags); |
| if (Subtarget.isTargetILP32()) { |
| unsigned Reg32 = TRI->getSubReg(Reg, AArch64::sub_32); |
| BuildMI(MBB, MI, DL, get(AArch64::LDRWui)) |
| .addDef(Reg32, RegState::Dead) |
| .addUse(Reg, RegState::Kill) |
| .addImm(0) |
| .addMemOperand(*MI.memoperands_begin()) |
| .addDef(Reg, RegState::Implicit); |
| } else { |
| BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg) |
| .addReg(Reg, RegState::Kill) |
| .addImm(0) |
| .addMemOperand(*MI.memoperands_begin()); |
| } |
| } else if (TM.getCodeModel() == CodeModel::Large) { |
| assert(!Subtarget.isTargetILP32() && "how can large exist in ILP32?"); |
| BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg) |
| .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC) |
| .addImm(0); |
| BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg) |
| .addReg(Reg, RegState::Kill) |
| .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC) |
| .addImm(16); |
| BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg) |
| .addReg(Reg, RegState::Kill) |
| .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC) |
| .addImm(32); |
| BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg) |
| .addReg(Reg, RegState::Kill) |
| .addGlobalAddress(GV, 0, AArch64II::MO_G3) |
| .addImm(48); |
| BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg) |
| .addReg(Reg, RegState::Kill) |
| .addImm(0) |
| .addMemOperand(*MI.memoperands_begin()); |
| } else if (TM.getCodeModel() == CodeModel::Tiny) { |
| BuildMI(MBB, MI, DL, get(AArch64::ADR), Reg) |
| .addGlobalAddress(GV, 0, OpFlags); |
| } else { |
| BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg) |
| .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE); |
| unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC; |
| if (Subtarget.isTargetILP32()) { |
| unsigned Reg32 = TRI->getSubReg(Reg, AArch64::sub_32); |
| BuildMI(MBB, MI, DL, get(AArch64::LDRWui)) |
| .addDef(Reg32, RegState::Dead) |
| .addUse(Reg, RegState::Kill) |
| .addGlobalAddress(GV, 0, LoFlags) |
| .addMemOperand(*MI.memoperands_begin()) |
| .addDef(Reg, RegState::Implicit); |
| } else { |
| BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg) |
| .addReg(Reg, RegState::Kill) |
| .addGlobalAddress(GV, 0, LoFlags) |
| .addMemOperand(*MI.memoperands_begin()); |
| } |
| } |
| |
| MBB.erase(MI); |
| |
| return true; |
| } |
| |
| // Return true if this instruction simply sets its single destination register |
| // to zero. This is equivalent to a register rename of the zero-register. |
| bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) { |
| switch (MI.getOpcode()) { |
| default: |
| break; |
| case AArch64::MOVZWi: |
| case AArch64::MOVZXi: // movz Rd, #0 (LSL #0) |
| if (MI.getOperand(1).isImm() && MI.getOperand(1).getImm() == 0) { |
| assert(MI.getDesc().getNumOperands() == 3 && |
| MI.getOperand(2).getImm() == 0 && "invalid MOVZi operands"); |
| return true; |
| } |
| break; |
| case AArch64::ANDWri: // and Rd, Rzr, #imm |
| return MI.getOperand(1).getReg() == AArch64::WZR; |
| case AArch64::ANDXri: |
| return MI.getOperand(1).getReg() == AArch64::XZR; |
| case TargetOpcode::COPY: |
| return MI.getOperand(1).getReg() == AArch64::WZR; |
| } |
| return false; |
| } |
| |
| // Return true if this instruction simply renames a general register without |
| // modifying bits. |
| bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) { |
| switch (MI.getOpcode()) { |
| default: |
| break; |
| case TargetOpcode::COPY: { |
| // GPR32 copies will by lowered to ORRXrs |
| Register DstReg = MI.getOperand(0).getReg(); |
| return (AArch64::GPR32RegClass.contains(DstReg) || |
| AArch64::GPR64RegClass.contains(DstReg)); |
| } |
| case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0) |
| if (MI.getOperand(1).getReg() == AArch64::XZR) { |
| assert(MI.getDesc().getNumOperands() == 4 && |
| MI.getOperand(3).getImm() == 0 && "invalid ORRrs operands"); |
| return true; |
| } |
| break; |
| case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0) |
| if (MI.getOperand(2).getImm() == 0) { |
| assert(MI.getDesc().getNumOperands() == 4 && |
| MI.getOperand(3).getImm() == 0 && "invalid ADDXri operands"); |
| return true; |
| } |
| break; |
| } |
| return false; |
| } |
| |
| // Return true if this instruction simply renames a general register without |
| // modifying bits. |
| bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) { |
| switch (MI.getOpcode()) { |
| default: |
| break; |
| case TargetOpcode::COPY: { |
| Register DstReg = MI.getOperand(0).getReg(); |
| return AArch64::FPR128RegClass.contains(DstReg); |
| } |
| case AArch64::ORRv16i8: |
| if (MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) { |
| assert(MI.getDesc().getNumOperands() == 3 && MI.getOperand(0).isReg() && |
| "invalid ORRv16i8 operands"); |
| return true; |
| } |
| break; |
| } |
| return false; |
| } |
| |
| unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI, |
| int &FrameIndex) const { |
| switch (MI.getOpcode()) { |
| default: |
| break; |
| case AArch64::LDRWui: |
| case AArch64::LDRXui: |
| case AArch64::LDRBui: |
| case AArch64::LDRHui: |
| case AArch64::LDRSui: |
| case AArch64::LDRDui: |
| case AArch64::LDRQui: |
| if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() && |
| MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) { |
| FrameIndex = MI.getOperand(1).getIndex(); |
| return MI.getOperand(0).getReg(); |
| } |
| break; |
| } |
| |
| return 0; |
| } |
| |
| unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI, |
| int &FrameIndex) const { |
| switch (MI.getOpcode()) { |
| default: |
| break; |
| case AArch64::STRWui: |
| case AArch64::STRXui: |
| case AArch64::STRBui: |
| case AArch64::STRHui: |
| case AArch64::STRSui: |
| case AArch64::STRDui: |
| case AArch64::STRQui: |
| case AArch64::LDR_PXI: |
| case AArch64::STR_PXI: |
| if (MI.getOperand(0).getSubReg() == 0 && MI.getOperand(1).isFI() && |
| MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0) { |
| FrameIndex = MI.getOperand(1).getIndex(); |
| return MI.getOperand(0).getReg(); |
| } |
| break; |
| } |
| return 0; |
| } |
| |
| /// Check all MachineMemOperands for a hint to suppress pairing. |
| bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) { |
| return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) { |
| return MMO->getFlags() & MOSuppressPair; |
| }); |
| } |
| |
| /// Set a flag on the first MachineMemOperand to suppress pairing. |
| void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) { |
| if (MI.memoperands_empty()) |
| return; |
| (*MI.memoperands_begin())->setFlags(MOSuppressPair); |
| } |
| |
| /// Check all MachineMemOperands for a hint that the load/store is strided. |
| bool AArch64InstrInfo::isStridedAccess(const MachineInstr &MI) { |
| return llvm::any_of(MI.memoperands(), [](MachineMemOperand *MMO) { |
| return MMO->getFlags() & MOStridedAccess; |
| }); |
| } |
| |
| bool AArch64InstrInfo::hasUnscaledLdStOffset(unsigned Opc) { |
| switch (Opc) { |
| default: |
| return false; |
| case AArch64::STURSi: |
| case AArch64::STRSpre: |
| case AArch64::STURDi: |
| case AArch64::STRDpre: |
| case AArch64::STURQi: |
| case AArch64::STRQpre: |
| case AArch64::STURBBi: |
| case AArch64::STURHHi: |
| case AArch64::STURWi: |
| case AArch64::STRWpre: |
| case AArch64::STURXi: |
| case AArch64::STRXpre: |
| case AArch64::LDURSi: |
| case AArch64::LDRSpre: |
| case AArch64::LDURDi: |
| case AArch64::LDRDpre: |
| case AArch64::LDURQi: |
| case AArch64::LDRQpre: |
| case AArch64::LDURWi: |
| case AArch64::LDRWpre: |
| case AArch64::LDURXi: |
| case AArch64::LDRXpre: |
| case AArch64::LDURSWi: |
| case AArch64::LDURHHi: |
| case AArch64::LDURBBi: |
| case AArch64::LDURSBWi: |
| case AArch64::LDURSHWi: |
| return true; |
| } |
| } |
| |
| Optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) { |
| switch (Opc) { |
| default: return {}; |
| case AArch64::PRFMui: return AArch64::PRFUMi; |
| case AArch64::LDRXui: return AArch64::LDURXi; |
| case AArch64::LDRWui: return AArch64::LDURWi; |
| case AArch64::LDRBui: return AArch64::LDURBi; |
| case AArch64::LDRHui: return AArch64::LDURHi; |
| case AArch64::LDRSui: return AArch64::LDURSi; |
| case AArch64::LDRDui: return AArch64::LDURDi; |
| case AArch64::LDRQui: return AArch64::LDURQi; |
| case AArch64::LDRBBui: return AArch64::LDURBBi; |
| case AArch64::LDRHHui: return AArch64::LDURHHi; |
| case AArch64::LDRSBXui: return AArch64::LDURSBXi; |
| case AArch64::LDRSBWui: return AArch64::LDURSBWi; |
| case AArch64::LDRSHXui: return AArch64::LDURSHXi; |
| case AArch64::LDRSHWui: return AArch64::LDURSHWi; |
| case AArch64::LDRSWui: return AArch64::LDURSWi; |
| case AArch64::STRXui: return AArch64::STURXi; |
| case AArch64::STRWui: return AArch64::STURWi; |
| case AArch64::STRBui: return AArch64::STURBi; |
| case AArch64::STRHui: return AArch64::STURHi; |
| case AArch64::STRSui: return AArch64::STURSi; |
| case AArch64::STRDui: return AArch64::STURDi; |
| case AArch64::STRQui: return AArch64::STURQi; |
| case AArch64::STRBBui: return AArch64::STURBBi; |
| case AArch64::STRHHui: return AArch64::STURHHi; |
| } |
| } |
| |
| unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) { |
| switch (Opc) { |
| default: |
| return 2; |
| case AArch64::LDPXi: |
| case AArch64::LDPDi: |
| case AArch64::STPXi: |
| case AArch64::STPDi: |
| case AArch64::LDNPXi: |
| case AArch64::LDNPDi: |
| case AArch64::STNPXi: |
| case AArch64::STNPDi: |
| case AArch64::LDPQi: |
| case AArch64::STPQi: |
| case AArch64::LDNPQi: |
| case AArch64::STNPQi: |
| case AArch64::LDPWi: |
| case AArch64::LDPSi: |
| case AArch64::STPWi: |
| case AArch64::STPSi: |
| case AArch64::LDNPWi: |
| case AArch64::LDNPSi: |
| case AArch64::STNPWi: |
| case AArch64::STNPSi: |
| case AArch64::LDG: |
| case AArch64::STGPi: |
| |
| case AArch64::LD1B_IMM: |
| case AArch64::LD1B_H_IMM: |
| case AArch64::LD1B_S_IMM: |
| case AArch64::LD1B_D_IMM: |
| case AArch64::LD1SB_H_IMM: |
| case AArch64::LD1SB_S_IMM: |
| case AArch64::LD1SB_D_IMM: |
| case AArch64::LD1H_IMM: |
| case AArch64::LD1H_S_IMM: |
| case AArch64::LD1H_D_IMM: |
| case AArch64::LD1SH_S_IMM: |
| case AArch64::LD1SH_D_IMM: |
| case AArch64::LD1W_IMM: |
| case AArch64::LD1W_D_IMM: |
| case AArch64::LD1SW_D_IMM: |
| case AArch64::LD1D_IMM: |
| |
| case AArch64::ST1B_IMM: |
| case AArch64::ST1B_H_IMM: |
| case AArch64::ST1B_S_IMM: |
| case AArch64::ST1B_D_IMM: |
| case AArch64::ST1H_IMM: |
| case AArch64::ST1H_S_IMM: |
| case AArch64::ST1H_D_IMM: |
| case AArch64::ST1W_IMM: |
| case AArch64::ST1W_D_IMM: |
| case AArch64::ST1D_IMM: |
| |
| case AArch64::LD1RB_IMM: |
| case AArch64::LD1RB_H_IMM: |
| case AArch64::LD1RB_S_IMM: |
| case AArch64::LD1RB_D_IMM: |
| case AArch64::LD1RSB_H_IMM: |
| case AArch64::LD1RSB_S_IMM: |
| case AArch64::LD1RSB_D_IMM: |
| case AArch64::LD1RH_IMM: |
| case AArch64::LD1RH_S_IMM: |
| case AArch64::LD1RH_D_IMM: |
| case AArch64::LD1RSH_S_IMM: |
| case AArch64::LD1RSH_D_IMM: |
| case AArch64::LD1RW_IMM: |
| case AArch64::LD1RW_D_IMM: |
| case AArch64::LD1RSW_IMM: |
| case AArch64::LD1RD_IMM: |
| |
| case AArch64::LDNT1B_ZRI: |
| case AArch64::LDNT1H_ZRI: |
| case AArch64::LDNT1W_ZRI: |
| case AArch64::LDNT1D_ZRI: |
| case AArch64::STNT1B_ZRI: |
| case AArch64::STNT1H_ZRI: |
| case AArch64::STNT1W_ZRI: |
| case AArch64::STNT1D_ZRI: |
| |
| case AArch64::LDNF1B_IMM: |
| case AArch64::LDNF1B_H_IMM: |
| case AArch64::LDNF1B_S_IMM: |
| case AArch64::LDNF1B_D_IMM: |
| case AArch64::LDNF1SB_H_IMM: |
| case AArch64::LDNF1SB_S_IMM: |
| case AArch64::LDNF1SB_D_IMM: |
| case AArch64::LDNF1H_IMM: |
| case AArch64::LDNF1H_S_IMM: |
| case AArch64::LDNF1H_D_IMM: |
| case AArch64::LDNF1SH_S_IMM: |
| case AArch64::LDNF1SH_D_IMM: |
| case AArch64::LDNF1W_IMM: |
| case AArch64::LDNF1W_D_IMM: |
| case AArch64::LDNF1SW_D_IMM: |
| case AArch64::LDNF1D_IMM: |
| return 3; |
| case AArch64::ADDG: |
| case AArch64::STGOffset: |
| case AArch64::LDR_PXI: |
| case AArch64::STR_PXI: |
| return 2; |
| } |
| } |
| |
| bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) { |
| switch (MI.getOpcode()) { |
| default: |
| return false; |
| // Scaled instructions. |
| case AArch64::STRSui: |
| case AArch64::STRDui: |
| case AArch64::STRQui: |
| case AArch64::STRXui: |
| case AArch64::STRWui: |
| case AArch64::LDRSui: |
| case AArch64::LDRDui: |
| case AArch64::LDRQui: |
| case AArch64::LDRXui: |
| case AArch64::LDRWui: |
| case AArch64::LDRSWui: |
| // Unscaled instructions. |
| case AArch64::STURSi: |
| case AArch64::STRSpre: |
| case AArch64::STURDi: |
| case AArch64::STRDpre: |
| case AArch64::STURQi: |
| case AArch64::STRQpre: |
| case AArch64::STURWi: |
| case AArch64::STRWpre: |
| case AArch64::STURXi: |
| case AArch64::STRXpre: |
| case AArch64::LDURSi: |
| case AArch64::LDRSpre: |
| case AArch64::LDURDi: |
| case AArch64::LDRDpre: |
| case AArch64::LDURQi: |
| case AArch64::LDRQpre: |
| case AArch64::LDURWi: |
| case AArch64::LDRWpre: |
| case AArch64::LDURXi: |
| case AArch64::LDRXpre: |
| case AArch64::LDURSWi: |
| return true; |
| } |
| } |
| |
| unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc, |
| bool &Is64Bit) { |
| switch (Opc) { |
| default: |
| llvm_unreachable("Opcode has no flag setting equivalent!"); |
| // 32-bit cases: |
| case AArch64::ADDWri: |
| Is64Bit = false; |
| return AArch64::ADDSWri; |
| case AArch64::ADDWrr: |
| Is64Bit = false; |
| return AArch64::ADDSWrr; |
| case AArch64::ADDWrs: |
| Is64Bit = false; |
| return AArch64::ADDSWrs; |
| case AArch64::ADDWrx: |
| Is64Bit = false; |
| return AArch64::ADDSWrx; |
| case AArch64::ANDWri: |
| Is64Bit = false; |
| return AArch64::ANDSWri; |
| case AArch64::ANDWrr: |
| Is64Bit = false; |
| return AArch64::ANDSWrr; |
| case AArch64::ANDWrs: |
| Is64Bit = false; |
| return AArch64::ANDSWrs; |
| case AArch64::BICWrr: |
| Is64Bit = false; |
| return AArch64::BICSWrr; |
| case AArch64::BICWrs: |
| Is64Bit = false; |
| return AArch64::BICSWrs; |
| case AArch64::SUBWri: |
| Is64Bit = false; |
| return AArch64::SUBSWri; |
| case AArch64::SUBWrr: |
| Is64Bit = false; |
| return AArch64::SUBSWrr; |
| case AArch64::SUBWrs: |
| Is64Bit = false; |
| return AArch64::SUBSWrs; |
| case AArch64::SUBWrx: |
| Is64Bit = false; |
| return AArch64::SUBSWrx; |
| // 64-bit cases: |
| case AArch64::ADDXri: |
| Is64Bit = true; |
| return AArch64::ADDSXri; |
| case AArch64::ADDXrr: |
| Is64Bit = true; |
| return AArch64::ADDSXrr; |
| case AArch64::ADDXrs: |
| Is64Bit = true; |
| return AArch64::ADDSXrs; |
| case AArch64::ADDXrx: |
| Is64Bit = true; |
| return AArch64::ADDSXrx; |
| case AArch64::ANDXri: |
| Is64Bit = true; |
| return AArch64::ANDSXri; |
| case AArch64::ANDXrr: |
| Is64Bit = true; |
| return AArch64::ANDSXrr; |
| case AArch64::ANDXrs: |
| Is64Bit = true; |
| return AArch64::ANDSXrs; |
| case AArch64::BICXrr: |
| Is64Bit = true; |
| return AArch64::BICSXrr; |
| case AArch64::BICXrs: |
| Is64Bit = true; |
| return AArch64::BICSXrs; |
| case AArch64::SUBXri: |
| Is64Bit = true; |
| return AArch64::SUBSXri; |
| case AArch64::SUBXrr: |
| Is64Bit = true; |
| return AArch64::SUBSXrr; |
| case AArch64::SUBXrs: |
| Is64Bit = true; |
| return AArch64::SUBSXrs; |
| case AArch64::SUBXrx: |
| Is64Bit = true; |
| return AArch64::SUBSXrx; |
| } |
| } |
| |
| // Is this a candidate for ld/st merging or pairing? For example, we don't |
| // touch volatiles or load/stores that have a hint to avoid pair formation. |
| bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const { |
| |
| bool IsPreLdSt = isPreLdSt(MI); |
| |
| // If this is a volatile load/store, don't mess with it. |
| if (MI.hasOrderedMemoryRef()) |
| return false; |
| |
| // Make sure this is a reg/fi+imm (as opposed to an address reloc). |
| // For Pre-inc LD/ST, the operand is shifted by one. |
| assert((MI.getOperand(IsPreLdSt ? 2 : 1).isReg() || |
| MI.getOperand(IsPreLdSt ? 2 : 1).isFI()) && |
| "Expected a reg or frame index operand."); |
| |
| // For Pre-indexed addressing quadword instructions, the third operand is the |
| // immediate value. |
| bool IsImmPreLdSt = IsPreLdSt && MI.getOperand(3).isImm(); |
| |
| if (!MI.getOperand(2).isImm() && !IsImmPreLdSt) |
| return false; |
| |
| // Can't merge/pair if the instruction modifies the base register. |
| // e.g., ldr x0, [x0] |
| // This case will never occur with an FI base. |
| // However, if the instruction is an LDR/STR<S,D,Q,W,X>pre, it can be merged. |
| // For example: |
| // ldr q0, [x11, #32]! |
| // ldr q1, [x11, #16] |
| // to |
| // ldp q0, q1, [x11, #32]! |
| if (MI.getOperand(1).isReg() && !IsPreLdSt) { |
| Register BaseReg = MI.getOperand(1).getReg(); |
| const TargetRegisterInfo *TRI = &getRegisterInfo(); |
| if (MI.modifiesRegister(BaseReg, TRI)) |
| return false; |
| } |
| |
| // Check if this load/store has a hint to avoid pair formation. |
| // MachineMemOperands hints are set by the AArch64StorePairSuppress pass. |
| if (isLdStPairSuppressed(MI)) |
| return false; |
| |
| // Do not pair any callee-save store/reload instructions in the |
| // prologue/epilogue if the CFI information encoded the operations as separate |
| // instructions, as that will cause the size of the actual prologue to mismatch |
| // with the prologue size recorded in the Windows CFI. |
| const MCAsmInfo *MAI = MI.getMF()->getTarget().getMCAsmInfo(); |
| bool NeedsWinCFI = MAI->usesWindowsCFI() && |
| MI.getMF()->getFunction().needsUnwindTableEntry(); |
| if (NeedsWinCFI && (MI.getFlag(MachineInstr::FrameSetup) || |
| MI.getFlag(MachineInstr::FrameDestroy))) |
| return false; |
| |
| // On some CPUs quad load/store pairs are slower than two single load/stores. |
| if (Subtarget.isPaired128Slow()) { |
| switch (MI.getOpcode()) { |
| default: |
| break; |
| case AArch64::LDURQi: |
| case AArch64::STURQi: |
| case AArch64::LDRQui: |
| case AArch64::STRQui: |
| return false; |
| } |
| } |
| |
| return true; |
| } |
| |
| bool AArch64InstrInfo::getMemOperandsWithOffsetWidth( |
| const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, |
| int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, |
| const TargetRegisterInfo *TRI) const { |
| if (!LdSt.mayLoadOrStore()) |
| return false; |
| |
| const MachineOperand *BaseOp; |
| if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, OffsetIsScalable, |
| Width, TRI)) |
| return false; |
| BaseOps.push_back(BaseOp); |
| return true; |
| } |
| |
| Optional<ExtAddrMode> |
| AArch64InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI, |
| const TargetRegisterInfo *TRI) const { |
| const MachineOperand *Base; // Filled with the base operand of MI. |
| int64_t Offset; // Filled with the offset of MI. |
| bool OffsetIsScalable; |
| if (!getMemOperandWithOffset(MemI, Base, Offset, OffsetIsScalable, TRI)) |
| return None; |
| |
| if (!Base->isReg()) |
| return None; |
| ExtAddrMode AM; |
| AM.BaseReg = Base->getReg(); |
| AM.Displacement = Offset; |
| AM.ScaledReg = 0; |
| return AM; |
| } |
| |
| bool AArch64InstrInfo::getMemOperandWithOffsetWidth( |
| const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, |
| bool &OffsetIsScalable, unsigned &Width, |
| const TargetRegisterInfo *TRI) const { |
| assert(LdSt.mayLoadOrStore() && "Expected a memory operation."); |
| // Handle only loads/stores with base register followed by immediate offset. |
| if (LdSt.getNumExplicitOperands() == 3) { |
| // Non-paired instruction (e.g., ldr x1, [x0, #8]). |
| if ((!LdSt.getOperand(1).isReg() && !LdSt.getOperand(1).isFI()) || |
| !LdSt.getOperand(2).isImm()) |
| return false; |
| } else if (LdSt.getNumExplicitOperands() == 4) { |
| // Paired instruction (e.g., ldp x1, x2, [x0, #8]). |
| if (!LdSt.getOperand(1).isReg() || |
| (!LdSt.getOperand(2).isReg() && !LdSt.getOperand(2).isFI()) || |
| !LdSt.getOperand(3).isImm()) |
| return false; |
| } else |
| return false; |
| |
| // Get the scaling factor for the instruction and set the width for the |
| // instruction. |
| TypeSize Scale(0U, false); |
| int64_t Dummy1, Dummy2; |
| |
| // If this returns false, then it's an instruction we don't want to handle. |
| if (!getMemOpInfo(LdSt.getOpcode(), Scale, Width, Dummy1, Dummy2)) |
| return false; |
| |
| // Compute the offset. Offset is calculated as the immediate operand |
| // multiplied by the scaling factor. Unscaled instructions have scaling factor |
| // set to 1. |
| if (LdSt.getNumExplicitOperands() == 3) { |
| BaseOp = &LdSt.getOperand(1); |
| Offset = LdSt.getOperand(2).getImm() * Scale.getKnownMinSize(); |
| } else { |
| assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands"); |
| BaseOp = &LdSt.getOperand(2); |
| Offset = LdSt.getOperand(3).getImm() * Scale.getKnownMinSize(); |
| } |
| OffsetIsScalable = Scale.isScalable(); |
| |
| if (!BaseOp->isReg() && !BaseOp->isFI()) |
| return false; |
| |
| return true; |
| } |
| |
| MachineOperand & |
| AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const { |
| assert(LdSt.mayLoadOrStore() && "Expected a memory operation."); |
| MachineOperand &OfsOp = LdSt.getOperand(LdSt.getNumExplicitOperands() - 1); |
| assert(OfsOp.isImm() && "Offset operand wasn't immediate."); |
| return OfsOp; |
| } |
| |
| bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale, |
| unsigned &Width, int64_t &MinOffset, |
| int64_t &MaxOffset) { |
| const unsigned SVEMaxBytesPerVector = AArch64::SVEMaxBitsPerVector / 8; |
| switch (Opcode) { |
| // Not a memory operation or something we want to handle. |
| default: |
| Scale = TypeSize::Fixed(0); |
| Width = 0; |
| MinOffset = MaxOffset = 0; |
| return false; |
| case AArch64::STRWpost: |
| case AArch64::LDRWpost: |
| Width = 32; |
| Scale = TypeSize::Fixed(4); |
| MinOffset = -256; |
| MaxOffset = 255; |
| break; |
| case AArch64::LDURQi: |
| case AArch64::STURQi: |
| Width = 16; |
| Scale = TypeSize::Fixed(1); |
| MinOffset = -256; |
| MaxOffset = 255; |
| break; |
| case AArch64::PRFUMi: |
| case AArch64::LDURXi: |
| case AArch64::LDURDi: |
| case AArch64::STURXi: |
| case AArch64::STURDi: |
| Width = 8; |
| Scale = TypeSize::Fixed(1); |
| MinOffset = -256; |
| MaxOffset = 255; |
| break; |
| case AArch64::LDURWi: |
| case AArch64::LDURSi: |
| case AArch64::LDURSWi: |
|