blob: 7df1b7e5800025f166ee9c44be66a07e49b71faf [file] [log] [blame]
//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file implements the targeting of the InstructionSelector class for
/// RISC-V.
/// \todo This should be generated by TableGen.
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/RISCVMatInt.h"
#include "RISCVRegisterBankInfo.h"
#include "RISCVSubtarget.h"
#include "RISCVTargetMachine.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
#include "llvm/CodeGen/GlobalISel/GISelValueTracking.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "riscv-isel"
using namespace llvm;
using namespace MIPatternMatch;
#define GET_GLOBALISEL_PREDICATE_BITSET
#include "RISCVGenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATE_BITSET
namespace {
class RISCVInstructionSelector : public InstructionSelector {
public:
RISCVInstructionSelector(const RISCVTargetMachine &TM,
const RISCVSubtarget &STI,
const RISCVRegisterBankInfo &RBI);
bool select(MachineInstr &MI) override;
void setupMF(MachineFunction &MF, GISelValueTracking *VT,
CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
BlockFrequencyInfo *BFI) override {
InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
MRI = &MF.getRegInfo();
}
static const char *getName() { return DEBUG_TYPE; }
private:
const TargetRegisterClass *
getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
static constexpr unsigned MaxRecursionDepth = 6;
bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
const unsigned Depth = 0) const;
bool hasAllHUsers(const MachineInstr &MI) const {
return hasAllNBitUsers(MI, 16);
}
bool hasAllWUsers(const MachineInstr &MI) const {
return hasAllNBitUsers(MI, 32);
}
bool isRegInGprb(Register Reg) const;
bool isRegInFprb(Register Reg) const;
// tblgen-erated 'select' implementation, used as the initial selector for
// the patterns that don't require complex C++.
bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
// A lowering phase that runs before any selection attempts.
// Returns true if the instruction was modified.
void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
// Custom selection methods
bool selectCopy(MachineInstr &MI) const;
bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
bool IsExternWeak = false) const;
bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
MachineIRBuilder &MIB) const;
bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const;
ComplexRendererFns selectShiftMask(MachineOperand &Root,
unsigned ShiftWidth) const;
ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
return selectShiftMask(Root, STI.getXLen());
}
ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
return selectShiftMask(Root, 32);
}
ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
template <unsigned Bits>
ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
return selectSExtBits(Root, Bits);
}
ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
template <unsigned Bits>
ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
return selectZExtBits(Root, Bits);
}
ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
template <unsigned ShAmt>
ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
return selectSHXADDOp(Root, ShAmt);
}
ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
unsigned ShAmt) const;
template <unsigned ShAmt>
ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
return selectSHXADD_UWOp(Root, ShAmt);
}
ComplexRendererFns renderVLOp(MachineOperand &Root) const;
// Custom renderers for tablegen
void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
const MachineInstr &MI, int OpIdx) const;
void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
int OpIdx) const;
const RISCVSubtarget &STI;
const RISCVInstrInfo &TII;
const RISCVRegisterInfo &TRI;
const RISCVRegisterBankInfo &RBI;
const RISCVTargetMachine &TM;
MachineRegisterInfo *MRI = nullptr;
// FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
// uses "STI." in the code generated by TableGen. We need to unify the name of
// Subtarget variable.
const RISCVSubtarget *Subtarget = &STI;
#define GET_GLOBALISEL_PREDICATES_DECL
#include "RISCVGenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATES_DECL
#define GET_GLOBALISEL_TEMPORARIES_DECL
#include "RISCVGenGlobalISel.inc"
#undef GET_GLOBALISEL_TEMPORARIES_DECL
};
} // end anonymous namespace
#define GET_GLOBALISEL_IMPL
#include "RISCVGenGlobalISel.inc"
#undef GET_GLOBALISEL_IMPL
RISCVInstructionSelector::RISCVInstructionSelector(
const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
const RISCVRegisterBankInfo &RBI)
: STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
TM(TM),
#define GET_GLOBALISEL_PREDICATES_INIT
#include "RISCVGenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
#include "RISCVGenGlobalISel.inc"
#undef GET_GLOBALISEL_TEMPORARIES_INIT
{
}
// Mimics optimizations in ISel and RISCVOptWInst Pass
bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
unsigned Bits,
const unsigned Depth) const {
assert((MI.getOpcode() == TargetOpcode::G_ADD ||
MI.getOpcode() == TargetOpcode::G_SUB ||
MI.getOpcode() == TargetOpcode::G_MUL ||
MI.getOpcode() == TargetOpcode::G_SHL ||
MI.getOpcode() == TargetOpcode::G_LSHR ||
MI.getOpcode() == TargetOpcode::G_AND ||
MI.getOpcode() == TargetOpcode::G_OR ||
MI.getOpcode() == TargetOpcode::G_XOR ||
MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
"Unexpected opcode");
if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
return false;
auto DestReg = MI.getOperand(0).getReg();
for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
assert(UserOp.getParent() && "UserOp must have a parent");
const MachineInstr &UserMI = *UserOp.getParent();
unsigned OpIdx = UserOp.getOperandNo();
switch (UserMI.getOpcode()) {
default:
return false;
case RISCV::ADDW:
case RISCV::ADDIW:
case RISCV::SUBW:
if (Bits >= 32)
break;
return false;
case RISCV::SLL:
case RISCV::SRA:
case RISCV::SRL:
// Shift amount operands only use log2(Xlen) bits.
if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
break;
return false;
case RISCV::SLLI:
// SLLI only uses the lower (XLen - ShAmt) bits.
if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
break;
return false;
case RISCV::ANDI:
if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
(uint64_t)UserMI.getOperand(2).getImm()))
break;
goto RecCheck;
case RISCV::AND:
case RISCV::OR:
case RISCV::XOR:
RecCheck:
if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
break;
return false;
case RISCV::SRLI: {
unsigned ShAmt = UserMI.getOperand(2).getImm();
// If we are shifting right by less than Bits, and users don't demand any
// bits that were shifted into [Bits-1:0], then we can consider this as an
// N-Bit user.
if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
break;
return false;
}
}
}
return true;
}
InstructionSelector::ComplexRendererFns
RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
unsigned ShiftWidth) const {
if (!Root.isReg())
return std::nullopt;
using namespace llvm::MIPatternMatch;
Register ShAmtReg = Root.getReg();
// Peek through zext.
Register ZExtSrcReg;
if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
ShAmtReg = ZExtSrcReg;
APInt AndMask;
Register AndSrcReg;
// Try to combine the following pattern (applicable to other shift
// instructions as well as 32-bit ones):
//
// %4:gprb(s64) = G_AND %3, %2
// %5:gprb(s64) = G_LSHR %1, %4(s64)
//
// According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
// the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
// the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
// then it can be eliminated. Given register rs1 or rs2 holding a constant
// (the and mask), there are two cases G_AND can be erased:
//
// 1. the lowest log2(XLEN) bits of the and mask are all set
// 2. the bits of the register being masked are already unset (zero set)
if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
if (ShMask.isSubsetOf(AndMask)) {
ShAmtReg = AndSrcReg;
} else {
// SimplifyDemandedBits may have optimized the mask so try restoring any
// bits that are known zero.
KnownBits Known = VT->getKnownBits(AndSrcReg);
if (ShMask.isSubsetOf(AndMask | Known.Zero))
ShAmtReg = AndSrcReg;
}
}
APInt Imm;
Register Reg;
if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
// If we are shifting by X+N where N == 0 mod Size, then just shift by X
// to avoid the ADD.
ShAmtReg = Reg;
} else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
// If we are shifting by N-X where N == 0 mod Size, then just shift by -X
// to generate a NEG instead of a SUB of a constant.
ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
return {{[=](MachineInstrBuilder &MIB) {
MachineIRBuilder(*MIB.getInstr())
.buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
MIB.addReg(ShAmtReg);
}}};
}
if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
// If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
// to generate a NOT instead of a SUB of a constant.
ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
return {{[=](MachineInstrBuilder &MIB) {
MachineIRBuilder(*MIB.getInstr())
.buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
.addImm(-1);
MIB.addReg(ShAmtReg);
}}};
}
}
return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
}
InstructionSelector::ComplexRendererFns
RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
unsigned Bits) const {
if (!Root.isReg())
return std::nullopt;
Register RootReg = Root.getReg();
MachineInstr *RootDef = MRI->getVRegDef(RootReg);
if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
RootDef->getOperand(2).getImm() == Bits) {
return {
{[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
}
unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
return std::nullopt;
}
InstructionSelector::ComplexRendererFns
RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
unsigned Bits) const {
if (!Root.isReg())
return std::nullopt;
Register RootReg = Root.getReg();
Register RegX;
uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
}
if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
MRI->getType(RegX).getScalarSizeInBits() == Bits)
return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
return std::nullopt;
}
InstructionSelector::ComplexRendererFns
RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
unsigned ShAmt) const {
using namespace llvm::MIPatternMatch;
if (!Root.isReg())
return std::nullopt;
Register RootReg = Root.getReg();
const unsigned XLen = STI.getXLen();
APInt Mask, C2;
Register RegY;
std::optional<bool> LeftShift;
// (and (shl y, c2), mask)
if (mi_match(RootReg, *MRI,
m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
LeftShift = true;
// (and (lshr y, c2), mask)
else if (mi_match(RootReg, *MRI,
m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
LeftShift = false;
if (LeftShift.has_value()) {
if (*LeftShift)
Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
else
Mask &= maskTrailingOnes<uint64_t>(XLen - C2.getLimitedValue());
if (Mask.isShiftedMask()) {
unsigned Leading = XLen - Mask.getActiveBits();
unsigned Trailing = Mask.countr_zero();
// Given (and (shl y, c2), mask) in which mask has no leading zeros and
// c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
return {{[=](MachineInstrBuilder &MIB) {
MachineIRBuilder(*MIB.getInstr())
.buildInstr(RISCV::SRLI, {DstReg}, {RegY})
.addImm(Trailing - C2.getLimitedValue());
MIB.addReg(DstReg);
}}};
}
// Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
// c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
return {{[=](MachineInstrBuilder &MIB) {
MachineIRBuilder(*MIB.getInstr())
.buildInstr(RISCV::SRLI, {DstReg}, {RegY})
.addImm(Leading + Trailing);
MIB.addReg(DstReg);
}}};
}
}
}
LeftShift.reset();
// (shl (and y, mask), c2)
if (mi_match(RootReg, *MRI,
m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
m_ICst(C2))))
LeftShift = true;
// (lshr (and y, mask), c2)
else if (mi_match(RootReg, *MRI,
m_GLShr(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
m_ICst(C2))))
LeftShift = false;
if (LeftShift.has_value() && Mask.isShiftedMask()) {
unsigned Leading = XLen - Mask.getActiveBits();
unsigned Trailing = Mask.countr_zero();
// Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
// c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
(Trailing + C2.getLimitedValue()) == ShAmt;
if (!Cond)
// Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
// c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
(Trailing - C2.getLimitedValue()) == ShAmt;
if (Cond) {
Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
return {{[=](MachineInstrBuilder &MIB) {
MachineIRBuilder(*MIB.getInstr())
.buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
.addImm(Trailing);
MIB.addReg(DstReg);
}}};
}
}
return std::nullopt;
}
InstructionSelector::ComplexRendererFns
RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
unsigned ShAmt) const {
using namespace llvm::MIPatternMatch;
if (!Root.isReg())
return std::nullopt;
Register RootReg = Root.getReg();
// Given (and (shl x, c2), mask) in which mask is a shifted mask with
// 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
// c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
APInt Mask, C2;
Register RegX;
if (mi_match(
RootReg, *MRI,
m_OneNonDBGUse(m_GAnd(m_OneNonDBGUse(m_GShl(m_Reg(RegX), m_ICst(C2))),
m_ICst(Mask))))) {
Mask &= maskTrailingZeros<uint64_t>(C2.getLimitedValue());
if (Mask.isShiftedMask()) {
unsigned Leading = Mask.countl_zero();
unsigned Trailing = Mask.countr_zero();
if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
return {{[=](MachineInstrBuilder &MIB) {
MachineIRBuilder(*MIB.getInstr())
.buildInstr(RISCV::SLLI, {DstReg}, {RegX})
.addImm(C2.getLimitedValue() - ShAmt);
MIB.addReg(DstReg);
}}};
}
}
}
return std::nullopt;
}
InstructionSelector::ComplexRendererFns
RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
assert(Root.isReg() && "Expected operand to be a Register");
MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
auto C = RootDef->getOperand(1).getCImm();
if (C->getValue().isAllOnes())
// If the operand is a G_CONSTANT with value of all ones it is larger than
// VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
// recognized specially by the vsetvli insertion pass.
return {{[=](MachineInstrBuilder &MIB) {
MIB.addImm(RISCV::VLMaxSentinel);
}}};
if (isUInt<5>(C->getZExtValue())) {
uint64_t ZExtC = C->getZExtValue();
return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
}
}
return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
}
InstructionSelector::ComplexRendererFns
RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
if (!Root.isReg())
return std::nullopt;
MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
return {{
[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
}};
}
if (isBaseWithConstantOffset(Root, *MRI)) {
MachineOperand &LHS = RootDef->getOperand(1);
MachineOperand &RHS = RootDef->getOperand(2);
MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
if (isInt<12>(RHSC)) {
if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
return {{
[=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
}};
return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
}
}
// TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
// the combiner?
return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
[=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
}
/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
/// CC Must be an ICMP Predicate.
static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
switch (CC) {
default:
llvm_unreachable("Expected ICMP CmpInst::Predicate.");
case CmpInst::Predicate::ICMP_EQ:
return RISCVCC::COND_EQ;
case CmpInst::Predicate::ICMP_NE:
return RISCVCC::COND_NE;
case CmpInst::Predicate::ICMP_ULT:
return RISCVCC::COND_LTU;
case CmpInst::Predicate::ICMP_SLT:
return RISCVCC::COND_LT;
case CmpInst::Predicate::ICMP_UGE:
return RISCVCC::COND_GEU;
case CmpInst::Predicate::ICMP_SGE:
return RISCVCC::COND_GE;
}
}
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC,
Register &LHS, Register &RHS,
MachineRegisterInfo &MRI) {
// Try to fold an ICmp. If that fails, use a NE compare with X0.
CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
LHS = CondReg;
RHS = RISCV::X0;
CC = RISCVCC::COND_NE;
return;
}
// We found an ICmp, do some canonicalization.
// Adjust comparisons to use comparison with 0 if possible.
if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
switch (Pred) {
case CmpInst::Predicate::ICMP_SGT:
// Convert X > -1 to X >= 0
if (*Constant == -1) {
CC = RISCVCC::COND_GE;
RHS = RISCV::X0;
return;
}
break;
case CmpInst::Predicate::ICMP_SLT:
// Convert X < 1 to 0 >= X
if (*Constant == 1) {
CC = RISCVCC::COND_GE;
RHS = LHS;
LHS = RISCV::X0;
return;
}
break;
default:
break;
}
}
switch (Pred) {
default:
llvm_unreachable("Expected ICMP CmpInst::Predicate.");
case CmpInst::Predicate::ICMP_EQ:
case CmpInst::Predicate::ICMP_NE:
case CmpInst::Predicate::ICMP_ULT:
case CmpInst::Predicate::ICMP_SLT:
case CmpInst::Predicate::ICMP_UGE:
case CmpInst::Predicate::ICMP_SGE:
// These CCs are supported directly by RISC-V branches.
break;
case CmpInst::Predicate::ICMP_SGT:
case CmpInst::Predicate::ICMP_SLE:
case CmpInst::Predicate::ICMP_UGT:
case CmpInst::Predicate::ICMP_ULE:
// These CCs are not supported directly by RISC-V branches, but changing the
// direction of the CC and swapping LHS and RHS are.
Pred = CmpInst::getSwappedPredicate(Pred);
std::swap(LHS, RHS);
break;
}
CC = getRISCVCCFromICmp(Pred);
}
bool RISCVInstructionSelector::select(MachineInstr &MI) {
MachineIRBuilder MIB(MI);
preISelLower(MI, MIB);
const unsigned Opc = MI.getOpcode();
if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
const Register DefReg = MI.getOperand(0).getReg();
const LLT DefTy = MRI->getType(DefReg);
const RegClassOrRegBank &RegClassOrBank =
MRI->getRegClassOrRegBank(DefReg);
const TargetRegisterClass *DefRC =
dyn_cast<const TargetRegisterClass *>(RegClassOrBank);
if (!DefRC) {
if (!DefTy.isValid()) {
LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
return false;
}
const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
DefRC = getRegClassForTypeOnBank(DefTy, RB);
if (!DefRC) {
LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
return false;
}
}
MI.setDesc(TII.get(TargetOpcode::PHI));
return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
}
// Certain non-generic instructions also need some special handling.
if (MI.isCopy())
return selectCopy(MI);
return true;
}
if (selectImpl(MI, *CoverageInfo))
return true;
switch (Opc) {
case TargetOpcode::G_ANYEXT:
case TargetOpcode::G_PTRTOINT:
case TargetOpcode::G_INTTOPTR:
case TargetOpcode::G_TRUNC:
case TargetOpcode::G_FREEZE:
return selectCopy(MI);
case TargetOpcode::G_CONSTANT: {
Register DstReg = MI.getOperand(0).getReg();
int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
if (!materializeImm(DstReg, Imm, MIB))
return false;
MI.eraseFromParent();
return true;
}
case TargetOpcode::G_FCONSTANT: {
// TODO: Use constant pool for complex constants.
Register DstReg = MI.getOperand(0).getReg();
const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
APInt Imm = FPimm.bitcastToAPInt();
unsigned Size = MRI->getType(DstReg).getSizeInBits();
if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
Register GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
return false;
unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
: Size == 32 ? RISCV::FMV_W_X
: RISCV::FMV_H_X;
auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
if (!FMV.constrainAllUses(TII, TRI, RBI))
return false;
} else {
// s64 on rv32
assert(Size == 64 && !Subtarget->is64Bit() &&
"Unexpected size or subtarget");
if (Imm.isNonNegative() && Imm.isZero()) {
// Optimize +0.0 to use fcvt.d.w
MachineInstrBuilder FCVT =
MIB.buildInstr(RISCV::FCVT_D_W, {DstReg}, {Register(RISCV::X0)})
.addImm(RISCVFPRndMode::RNE);
if (!FCVT.constrainAllUses(TII, TRI, RBI))
return false;
MI.eraseFromParent();
return true;
}
// Split into two pieces and build through the stack.
Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
MIB))
return false;
if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
return false;
MachineInstrBuilder PairF64 = MIB.buildInstr(
RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
if (!PairF64.constrainAllUses(TII, TRI, RBI))
return false;
}
MI.eraseFromParent();
return true;
}
case TargetOpcode::G_GLOBAL_VALUE: {
auto *GV = MI.getOperand(1).getGlobal();
if (GV->isThreadLocal()) {
// TODO: implement this case.
return false;
}
return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
}
case TargetOpcode::G_JUMP_TABLE:
case TargetOpcode::G_CONSTANT_POOL:
return selectAddr(MI, MIB, MRI);
case TargetOpcode::G_BRCOND: {
Register LHS, RHS;
RISCVCC::CondCode CC;
getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
.addMBB(MI.getOperand(1).getMBB());
MI.eraseFromParent();
return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
}
case TargetOpcode::G_BRINDIRECT:
MI.setDesc(TII.get(RISCV::PseudoBRIND));
MI.addOperand(MachineOperand::CreateImm(0));
return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
case TargetOpcode::G_SELECT:
return selectSelect(MI, MIB);
case TargetOpcode::G_FCMP:
return selectFPCompare(MI, MIB);
case TargetOpcode::G_FENCE: {
AtomicOrdering FenceOrdering =
static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
SyncScope::ID FenceSSID =
static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
emitFence(FenceOrdering, FenceSSID, MIB);
MI.eraseFromParent();
return true;
}
case TargetOpcode::G_IMPLICIT_DEF:
return selectImplicitDef(MI, MIB);
case TargetOpcode::G_UNMERGE_VALUES:
return selectUnmergeValues(MI, MIB);
default:
return false;
}
}
bool RISCVInstructionSelector::selectUnmergeValues(
MachineInstr &MI, MachineIRBuilder &MIB) const {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
if (!Subtarget->hasStdExtZfa())
return false;
// Split F64 Src into two s32 parts
if (MI.getNumOperands() != 3)
return false;
Register Src = MI.getOperand(2).getReg();
Register Lo = MI.getOperand(0).getReg();
Register Hi = MI.getOperand(1).getReg();
if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
return false;
MachineInstr *ExtractLo = MIB.buildInstr(RISCV::FMV_X_W_FPR64, {Lo}, {Src});
if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
return false;
MachineInstr *ExtractHi = MIB.buildInstr(RISCV::FMVH_X_D, {Hi}, {Src});
if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
return false;
MI.eraseFromParent();
return true;
}
bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
MachineIRBuilder &MIB) {
Register PtrReg = Op.getReg();
assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
const LLT sXLen = LLT::scalar(STI.getXLen());
auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
Op.setReg(PtrToInt.getReg(0));
return select(*PtrToInt);
}
void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
MachineIRBuilder &MIB) {
switch (MI.getOpcode()) {
case TargetOpcode::G_PTR_ADD: {
Register DstReg = MI.getOperand(0).getReg();
const LLT sXLen = LLT::scalar(STI.getXLen());
replacePtrWithInt(MI.getOperand(1), MIB);
MI.setDesc(TII.get(TargetOpcode::G_ADD));
MRI->setType(DstReg, sXLen);
break;
}
case TargetOpcode::G_PTRMASK: {
Register DstReg = MI.getOperand(0).getReg();
const LLT sXLen = LLT::scalar(STI.getXLen());
replacePtrWithInt(MI.getOperand(1), MIB);
MI.setDesc(TII.get(TargetOpcode::G_AND));
MRI->setType(DstReg, sXLen);
break;
}
}
}
void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
MIB.addImm(-CstVal);
}
void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
MIB.addImm(STI.getXLen() - CstVal);
}
void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
MIB.addImm(32 - CstVal);
}
void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
MIB.addImm(CstVal + 1);
}
void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
"Expected G_FRAME_INDEX");
MIB.add(MI.getOperand(1));
}
void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
MIB.addImm(llvm::countr_zero(C));
}
void RISCVInstructionSelector::renderXLenSubTrailingOnes(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
}
void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
int64_t Adj = Imm < 0 ? -2048 : 2047;
MIB.addImm(Imm - Adj);
}
void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
const MachineInstr &MI,
int OpIdx) const {
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
MIB.addImm(Imm);
}
const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
LLT Ty, const RegisterBank &RB) const {
if (RB.getID() == RISCV::GPRBRegBankID) {
if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
return &RISCV::GPRRegClass;
}
if (RB.getID() == RISCV::FPRBRegBankID) {
if (Ty.getSizeInBits() == 16)
return &RISCV::FPR16RegClass;
if (Ty.getSizeInBits() == 32)
return &RISCV::FPR32RegClass;
if (Ty.getSizeInBits() == 64)
return &RISCV::FPR64RegClass;
}
if (RB.getID() == RISCV::VRBRegBankID) {
if (Ty.getSizeInBits().getKnownMinValue() <= 64)
return &RISCV::VRRegClass;
if (Ty.getSizeInBits().getKnownMinValue() == 128)
return &RISCV::VRM2RegClass;
if (Ty.getSizeInBits().getKnownMinValue() == 256)
return &RISCV::VRM4RegClass;
if (Ty.getSizeInBits().getKnownMinValue() == 512)
return &RISCV::VRM8RegClass;
}
return nullptr;
}
bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
}
bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
}
bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
Register DstReg = MI.getOperand(0).getReg();
if (DstReg.isPhysical())
return true;
const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
assert(DstRC &&
"Register class not available for LLT, register bank combination");
// No need to constrain SrcReg. It will get constrained when
// we hit another of its uses or its defs.
// Copies do not have constraints.
if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
<< " operand\n");
return false;
}
MI.setDesc(TII.get(RISCV::COPY));
return true;
}
bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
MachineIRBuilder &MIB) const {
assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
const Register DstReg = MI.getOperand(0).getReg();
const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
assert(DstRC &&
"Register class not available for LLT, register bank combination");
if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
<< " operand\n");
}
MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
return true;
}
bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
MachineIRBuilder &MIB) const {
if (Imm == 0) {
MIB.buildCopy(DstReg, Register(RISCV::X0));
RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
return true;
}
RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget);
unsigned NumInsts = Seq.size();
Register SrcReg = RISCV::X0;
for (unsigned i = 0; i < NumInsts; i++) {
Register TmpReg = i < NumInsts - 1
? MRI->createVirtualRegister(&RISCV::GPRRegClass)
: DstReg;
const RISCVMatInt::Inst &I = Seq[i];
MachineInstr *Result;
switch (I.getOpndKind()) {
case RISCVMatInt::Imm:
// clang-format off
Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
.addImm(I.getImm());
// clang-format on
break;
case RISCVMatInt::RegX0:
Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
{SrcReg, Register(RISCV::X0)});
break;
case RISCVMatInt::RegReg:
Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
break;
case RISCVMatInt::RegImm:
Result =
MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
break;
}
if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
return false;
SrcReg = TmpReg;
}
return true;
}
bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
MachineIRBuilder &MIB, bool IsLocal,
bool IsExternWeak) const {
assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
"Unexpected opcode");
const MachineOperand &DispMO = MI.getOperand(1);
Register DefReg = MI.getOperand(0).getReg();
const LLT DefTy = MRI->getType(DefReg);
// When HWASAN is used and tagging of global variables is enabled
// they should be accessed via the GOT, since the tagged address of a global
// is incompatible with existing code models. This also applies to non-pic
// mode.
if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
if (IsLocal && !Subtarget->allowTaggedGlobals()) {
// Use PC-relative addressing to access the symbol. This generates the
// pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
// %pcrel_lo(auipc)).
MI.setDesc(TII.get(RISCV::PseudoLLA));
return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
}
// Use PC-relative addressing to access the GOT for this symbol, then
// load the address from the GOT. This generates the pattern (PseudoLGA
// sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
// %pcrel_lo(auipc))).
MachineFunction &MF = *MI.getParent()->getParent();
MachineMemOperand *MemOp = MF.getMachineMemOperand(
MachinePointerInfo::getGOT(MF),
MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant,
DefTy, Align(DefTy.getSizeInBits() / 8));
auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
.addDisp(DispMO, 0)
.addMemOperand(MemOp);
if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
return false;
MI.eraseFromParent();
return true;
}
switch (TM.getCodeModel()) {
default: {
reportGISelFailure(*MF, *TPC, *MORE, getName(),
"Unsupported code model for lowering", MI);
return false;
}
case CodeModel::Small: {
// Must lie within a single 2 GiB address range and must lie between
// absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
// (lui %hi(sym)) %lo(sym)).
Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
.addDisp(DispMO, 0, RISCVII::MO_HI);
if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
return false;
auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
.addDisp(DispMO, 0, RISCVII::MO_LO);
if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
return false;
MI.eraseFromParent();
return true;
}
case CodeModel::Medium:
// Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
// relocation needs to reference a label that points to the auipc
// instruction itself, not the global. This cannot be done inside the
// instruction selector.
if (IsExternWeak) {
// An extern weak symbol may be undefined, i.e. have value 0, which may
// not be within 2GiB of PC, so use GOT-indirect addressing to access the
// symbol. This generates the pattern (PseudoLGA sym), which expands to
// (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
MachineFunction &MF = *MI.getParent()->getParent();
MachineMemOperand *MemOp = MF.getMachineMemOperand(
MachinePointerInfo::getGOT(MF),
MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant,
DefTy, Align(DefTy.getSizeInBits() / 8));
auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
.addDisp(DispMO, 0)
.addMemOperand(MemOp);
if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
return false;
MI.eraseFromParent();
return true;
}
// Generate a sequence for accessing addresses within any 2GiB range
// within the address space. This generates the pattern (PseudoLLA sym),
// which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
MI.setDesc(TII.get(RISCV::PseudoLLA));
return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
}
return false;
}
bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
MachineIRBuilder &MIB) const {
auto &SelectMI = cast<GSelect>(MI);
Register LHS, RHS;
RISCVCC::CondCode CC;
getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
Register DstReg = SelectMI.getReg(0);
unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
unsigned Size = MRI->getType(DstReg).getSizeInBits();
Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
: RISCV::Select_FPR64_Using_CC_GPR;
}
MachineInstr *Result = MIB.buildInstr(Opc)
.addDef(DstReg)
.addReg(LHS)
.addReg(RHS)
.addImm(CC)
.addReg(SelectMI.getTrueReg())
.addReg(SelectMI.getFalseReg());
MI.eraseFromParent();
return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
}
// Convert an FCMP predicate to one of the supported F or D instructions.
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
switch (Pred) {
default:
llvm_unreachable("Unsupported predicate");
case CmpInst::FCMP_OLT:
return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
case CmpInst::FCMP_OLE:
return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
case CmpInst::FCMP_OEQ:
return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
}
}
// Try legalizing an FCMP by swapping or inverting the predicate to one that
// is supported.
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS,
CmpInst::Predicate &Pred, bool &NeedInvert) {
auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
Pred == CmpInst::FCMP_OEQ;
};
assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(Pred);
if (isLegalFCmpPredicate(InvPred)) {
Pred = InvPred;
std::swap(LHS, RHS);
return true;
}
InvPred = CmpInst::getInversePredicate(Pred);
NeedInvert = true;
if (isLegalFCmpPredicate(InvPred)) {
Pred = InvPred;
return true;
}
InvPred = CmpInst::getSwappedPredicate(InvPred);
if (isLegalFCmpPredicate(InvPred)) {
Pred = InvPred;
std::swap(LHS, RHS);
return true;
}
return false;
}
// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
// the result in DstReg.
// FIXME: Maybe we should expand this earlier.
bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
MachineIRBuilder &MIB) const {
auto &CmpMI = cast<GFCmp>(MI);
CmpInst::Predicate Pred = CmpMI.getCond();
Register DstReg = CmpMI.getReg(0);
Register LHS = CmpMI.getLHSReg();
Register RHS = CmpMI.getRHSReg();
unsigned Size = MRI->getType(LHS).getSizeInBits();
assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
Register TmpReg = DstReg;
bool NeedInvert = false;
// First try swapping operands or inverting.
if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
if (NeedInvert)
TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
if (!Cmp.constrainAllUses(TII, TRI, RBI))
return false;
} else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
// fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
NeedInvert = Pred == CmpInst::FCMP_UEQ;
auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
{&RISCV::GPRRegClass}, {LHS, RHS});
if (!Cmp1.constrainAllUses(TII, TRI, RBI))
return false;
auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
{&RISCV::GPRRegClass}, {RHS, LHS});
if (!Cmp2.constrainAllUses(TII, TRI, RBI))
return false;
if (NeedInvert)
TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
auto Or =
MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
if (!Or.constrainAllUses(TII, TRI, RBI))
return false;
} else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
// fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
// FIXME: If LHS and RHS are the same we can use a single FEQ.
NeedInvert = Pred == CmpInst::FCMP_UNO;
auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
{&RISCV::GPRRegClass}, {LHS, LHS});
if (!Cmp1.constrainAllUses(TII, TRI, RBI))
return false;
auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
{&RISCV::GPRRegClass}, {RHS, RHS});
if (!Cmp2.constrainAllUses(TII, TRI, RBI))
return false;
if (NeedInvert)
TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
auto And =
MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
if (!And.constrainAllUses(TII, TRI, RBI))
return false;
} else
llvm_unreachable("Unhandled predicate");
// Emit an XORI to invert the result if needed.
if (NeedInvert) {
auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
if (!Xor.constrainAllUses(TII, TRI, RBI))
return false;
}
MI.eraseFromParent();
return true;
}
void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
SyncScope::ID FenceSSID,
MachineIRBuilder &MIB) const {
if (STI.hasStdExtZtso()) {
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
FenceSSID == SyncScope::System) {
// fence rw, rw
MIB.buildInstr(RISCV::FENCE, {}, {})
.addImm(RISCVFenceField::R | RISCVFenceField::W)
.addImm(RISCVFenceField::R | RISCVFenceField::W);
return;
}
// MEMBARRIER is a compiler barrier; it codegens to a no-op.
MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
return;
}
// singlethread fences only synchronize with signal handlers on the same
// thread and thus only need to preserve instruction order, not actually
// enforce memory ordering.
if (FenceSSID == SyncScope::SingleThread) {
MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
return;
}
// Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
// Manual: Volume I.
unsigned Pred, Succ;
switch (FenceOrdering) {
default:
llvm_unreachable("Unexpected ordering");
case AtomicOrdering::AcquireRelease:
// fence acq_rel -> fence.tso
MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
return;
case AtomicOrdering::Acquire:
// fence acquire -> fence r, rw
Pred = RISCVFenceField::R;
Succ = RISCVFenceField::R | RISCVFenceField::W;
break;
case AtomicOrdering::Release:
// fence release -> fence rw, w
Pred = RISCVFenceField::R | RISCVFenceField::W;
Succ = RISCVFenceField::W;
break;
case AtomicOrdering::SequentiallyConsistent:
// fence seq_cst -> fence rw, rw
Pred = RISCVFenceField::R | RISCVFenceField::W;
Succ = RISCVFenceField::R | RISCVFenceField::W;
break;
}
MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
}
namespace llvm {
InstructionSelector *
createRISCVInstructionSelector(const RISCVTargetMachine &TM,
const RISCVSubtarget &Subtarget,
const RISCVRegisterBankInfo &RBI) {
return new RISCVInstructionSelector(TM, Subtarget, RBI);
}
} // end namespace llvm