blob: fee73c11b30796d89bbb88f83067daf732ccf064 [file] [log] [blame]
//===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This implements routines for translating from LLVM IR into SelectionDAG IR.
//
//===----------------------------------------------------------------------===//
#include "SelectionDAGBuilder.h"
#include "SDNodeDbgValue.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
#include "llvm/CodeGen/CodeGenCommonISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineInstrBundleIterator.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/SwiftErrorValueTracking.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/EHPersonalities.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCContext.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/Triple.h"
#include "llvm/Transforms/Utils/Local.h"
#include <cstddef>
#include <iterator>
#include <limits>
#include <optional>
#include <tuple>
using namespace llvm;
using namespace PatternMatch;
using namespace SwitchCG;
#define DEBUG_TYPE "isel"
/// LimitFloatPrecision - Generate low-precision inline sequences for
/// some float libcalls (6, 8 or 12 bits).
static unsigned LimitFloatPrecision;
static cl::opt<bool>
InsertAssertAlign("insert-assert-align", cl::init(true),
cl::desc("Insert the experimental `assertalign` node."),
cl::ReallyHidden);
static cl::opt<unsigned, true>
LimitFPPrecision("limit-float-precision",
cl::desc("Generate low-precision inline sequences "
"for some float libcalls"),
cl::location(LimitFloatPrecision), cl::Hidden,
cl::init(0));
static cl::opt<unsigned> SwitchPeelThreshold(
"switch-peel-threshold", cl::Hidden, cl::init(66),
cl::desc("Set the case probability threshold for peeling the case from a "
"switch statement. A value greater than 100 will void this "
"optimization"));
// Limit the width of DAG chains. This is important in general to prevent
// DAG-based analysis from blowing up. For example, alias analysis and
// load clustering may not complete in reasonable time. It is difficult to
// recognize and avoid this situation within each individual analysis, and
// future analyses are likely to have the same behavior. Limiting DAG width is
// the safe approach and will be especially important with global DAGs.
//
// MaxParallelChains default is arbitrarily high to avoid affecting
// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
// sequence over this should have been converted to llvm.memcpy by the
// frontend. It is easy to induce this behavior with .ll code such as:
// %buffer = alloca [4096 x i8]
// %data = load [4096 x i8]* %argPtr
// store [4096 x i8] %data, [4096 x i8]* %buffer
static const unsigned MaxParallelChains = 64;
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V,
std::optional<CallingConv::ID> CC);
/// getCopyFromParts - Create a value that contains the specified legal parts
/// combined into the value they represent. If the parts combine to a type
/// larger than ValueVT then AssertOp can be used to specify whether the extra
/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
/// (ISD::AssertSext).
static SDValue
getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
std::optional<CallingConv::ID> CC = std::nullopt,
std::optional<ISD::NodeType> AssertOp = std::nullopt) {
// Let the target assemble the parts if it wants to
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
PartVT, ValueVT, CC))
return Val;
if (ValueVT.isVector())
return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
CC);
assert(NumParts > 0 && "No parts to assemble!");
SDValue Val = Parts[0];
if (NumParts > 1) {
// Assemble the value from multiple parts.
if (ValueVT.isInteger()) {
unsigned PartBits = PartVT.getSizeInBits();
unsigned ValueBits = ValueVT.getSizeInBits();
// Assemble the power of 2 part.
unsigned RoundParts = llvm::bit_floor(NumParts);
unsigned RoundBits = PartBits * RoundParts;
EVT RoundVT = RoundBits == ValueBits ?
ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
SDValue Lo, Hi;
EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
if (RoundParts > 2) {
Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
PartVT, HalfVT, V);
Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
RoundParts / 2, PartVT, HalfVT, V);
} else {
Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
}
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
if (RoundParts < NumParts) {
// Assemble the trailing non-power-of-2 part.
unsigned OddParts = NumParts - RoundParts;
EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
OddVT, V, CC);
// Combine the round and odd parts.
Lo = Val;
if (DAG.getDataLayout().isBigEndian())
std::swap(Lo, Hi);
EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
DAG.getConstant(Lo.getValueSizeInBits(), DL,
TLI.getShiftAmountTy(
TotalVT, DAG.getDataLayout())));
Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
}
} else if (PartVT.isFloatingPoint()) {
// FP split into multiple FP parts (for ppcf128)
assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
"Unexpected split");
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
std::swap(Lo, Hi);
Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
} else {
// FP split into integer parts (soft fp)
assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
!PartVT.isVector() && "Unexpected split");
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
}
}
// There is now one part, held in Val. Correct it to match ValueVT.
// PartEVT is the type of the register class that holds the value.
// ValueVT is the type of the inline asm operation.
EVT PartEVT = Val.getValueType();
if (PartEVT == ValueVT)
return Val;
if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
ValueVT.bitsLT(PartEVT)) {
// For an FP value in an integer part, we need to truncate to the right
// width first.
PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
}
// Handle types that have the same size.
if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
// Handle types with different sizes.
if (PartEVT.isInteger() && ValueVT.isInteger()) {
if (ValueVT.bitsLT(PartEVT)) {
// For a truncate, see if we have any information to
// indicate whether the truncated bits will always be
// zero or sign-extension.
if (AssertOp)
Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
DAG.getValueType(ValueVT));
return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
}
return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
}
if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
// FP_ROUND's are always exact here.
if (ValueVT.bitsLT(Val.getValueType()))
return DAG.getNode(
ISD::FP_ROUND, DL, ValueVT, Val,
DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
}
// Handle MMX to a narrower integer type by bitcasting MMX to integer and
// then truncating.
if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
ValueVT.bitsLT(PartEVT)) {
Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
}
report_fatal_error("Unknown mismatch in getCopyFromParts!");
}
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
const Twine &ErrMsg) {
const Instruction *I = dyn_cast_or_null<Instruction>(V);
if (!V)
return Ctx.emitError(ErrMsg);
const char *AsmError = ", possible invalid constraint for vector type";
if (const CallInst *CI = dyn_cast<CallInst>(I))
if (CI->isInlineAsm())
return Ctx.emitError(I, ErrMsg + AsmError);
return Ctx.emitError(I, ErrMsg);
}
/// getCopyFromPartsVector - Create a value that contains the specified legal
/// parts combined into the value they represent. If the parts combine to a
/// type larger than ValueVT then AssertOp can be used to specify whether the
/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
/// ValueVT (ISD::AssertSext).
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, const Value *V,
std::optional<CallingConv::ID> CallConv) {
assert(ValueVT.isVector() && "Not a vector value");
assert(NumParts > 0 && "No parts to assemble!");
const bool IsABIRegCopy = CallConv.has_value();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Val = Parts[0];
// Handle a multi-element vector.
if (NumParts > 1) {
EVT IntermediateVT;
MVT RegisterVT;
unsigned NumIntermediates;
unsigned NumRegs;
if (IsABIRegCopy) {
NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
*DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
NumIntermediates, RegisterVT);
} else {
NumRegs =
TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
NumIntermediates, RegisterVT);
}
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
assert(RegisterVT.getSizeInBits() ==
Parts[0].getSimpleValueType().getSizeInBits() &&
"Part type sizes don't match!");
// Assemble the parts into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates);
if (NumIntermediates == NumParts) {
// If the register was not expanded, truncate or copy the value,
// as appropriate.
for (unsigned i = 0; i != NumParts; ++i)
Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
PartVT, IntermediateVT, V, CallConv);
} else if (NumParts > 0) {
// If the intermediate type was expanded, build the intermediate
// operands from the parts.
assert(NumParts % NumIntermediates == 0 &&
"Must expand into a divisible number of parts!");
unsigned Factor = NumParts / NumIntermediates;
for (unsigned i = 0; i != NumIntermediates; ++i)
Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
PartVT, IntermediateVT, V, CallConv);
}
// Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
// intermediate operands.
EVT BuiltVectorTy =
IntermediateVT.isVector()
? EVT::getVectorVT(
*DAG.getContext(), IntermediateVT.getScalarType(),
IntermediateVT.getVectorElementCount() * NumParts)
: EVT::getVectorVT(*DAG.getContext(),
IntermediateVT.getScalarType(),
NumIntermediates);
Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
: ISD::BUILD_VECTOR,
DL, BuiltVectorTy, Ops);
}
// There is now one part, held in Val. Correct it to match ValueVT.
EVT PartEVT = Val.getValueType();
if (PartEVT == ValueVT)
return Val;
if (PartEVT.isVector()) {
// Vector/Vector bitcast.
if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
// If the parts vector has more elements than the value vector, then we
// have a vector widening case (e.g. <2 x float> -> <4 x float>).
// Extract the elements we want.
if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
assert((PartEVT.getVectorElementCount().getKnownMinValue() >
ValueVT.getVectorElementCount().getKnownMinValue()) &&
(PartEVT.getVectorElementCount().isScalable() ==
ValueVT.getVectorElementCount().isScalable()) &&
"Cannot narrow, it would be a lossy transformation");
PartEVT =
EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
ValueVT.getVectorElementCount());
Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
DAG.getVectorIdxConstant(0, DL));
if (PartEVT == ValueVT)
return Val;
if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
}
// Promoted vector extract
return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
}
// Trivial bitcast if the types are the same size and the destination
// vector type is legal.
if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
TLI.isTypeLegal(ValueVT))
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
if (ValueVT.getVectorNumElements() != 1) {
// Certain ABIs require that vectors are passed as integers. For vectors
// are the same size, this is an obvious bitcast.
if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
} else if (ValueVT.bitsLT(PartEVT)) {
const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
// Drop the extra bits.
Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
return DAG.getBitcast(ValueVT, Val);
}
diagnosePossiblyInvalidConstraint(
*DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
return DAG.getUNDEF(ValueVT);
}
// Handle cases such as i8 -> <1 x i1>
EVT ValueSVT = ValueVT.getVectorElementType();
if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
unsigned ValueSize = ValueSVT.getSizeInBits();
if (ValueSize == PartEVT.getSizeInBits()) {
Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
} else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
// It's possible a scalar floating point type gets softened to integer and
// then promoted to a larger integer. If PartEVT is the larger integer
// we need to truncate it and then bitcast to the FP type.
assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
Val = DAG.getBitcast(ValueSVT, Val);
} else {
Val = ValueVT.isFloatingPoint()
? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
: DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
}
}
return DAG.getBuildVector(ValueVT, DL, Val);
}
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V,
std::optional<CallingConv::ID> CallConv);
/// getCopyToParts - Create a series of nodes that contain the specified value
/// split into legal parts. If the parts contain more bits than Val, then, for
/// integers, ExtendKind can be used to specify how to generate the extra bits.
static void
getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
unsigned NumParts, MVT PartVT, const Value *V,
std::optional<CallingConv::ID> CallConv = std::nullopt,
ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
// Let the target split the parts if it wants to
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
CallConv))
return;
EVT ValueVT = Val.getValueType();
// Handle the vector case separately.
if (ValueVT.isVector())
return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
CallConv);
unsigned OrigNumParts = NumParts;
assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
"Copying to an illegal type!");
if (NumParts == 0)
return;
assert(!ValueVT.isVector() && "Vector case handled elsewhere");
EVT PartEVT = PartVT;
if (PartEVT == ValueVT) {
assert(NumParts == 1 && "No-op copy with multiple parts!");
Parts[0] = Val;
return;
}
unsigned PartBits = PartVT.getSizeInBits();
if (NumParts * PartBits > ValueVT.getSizeInBits()) {
// If the parts cover more bits than the value has, promote the value.
if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
assert(NumParts == 1 && "Do not know what to promote to!");
Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
} else {
if (ValueVT.isFloatingPoint()) {
// FP values need to be bitcast, then extended if they are being put
// into a larger container.
ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
}
assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
ValueVT.isInteger() &&
"Unknown mismatch!");
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
if (PartVT == MVT::x86mmx)
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
}
} else if (PartBits == ValueVT.getSizeInBits()) {
// Different types of the same size.
assert(NumParts == 1 && PartEVT != ValueVT);
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
} else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
// If the parts cover less bits than value has, truncate the value.
assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
ValueVT.isInteger() &&
"Unknown mismatch!");
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
if (PartVT == MVT::x86mmx)
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
}
// The value may have changed - recompute ValueVT.
ValueVT = Val.getValueType();
assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
"Failed to tile the value with PartVT!");
if (NumParts == 1) {
if (PartEVT != ValueVT) {
diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
"scalar-to-vector conversion failed");
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
}
Parts[0] = Val;
return;
}
// Expand the value into multiple parts.
if (NumParts & (NumParts - 1)) {
// The number of parts is not a power of 2. Split off and copy the tail.
assert(PartVT.isInteger() && ValueVT.isInteger() &&
"Do not know what to expand to!");
unsigned RoundParts = llvm::bit_floor(NumParts);
unsigned RoundBits = RoundParts * PartBits;
unsigned OddParts = NumParts - RoundParts;
SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
CallConv);
if (DAG.getDataLayout().isBigEndian())
// The odd parts were reversed by getCopyToParts - unreverse them.
std::reverse(Parts + RoundParts, Parts + NumParts);
NumParts = RoundParts;
ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
}
// The number of parts is a power of 2. Repeatedly bisect the value using
// EXTRACT_ELEMENT.
Parts[0] = DAG.getNode(ISD::BITCAST, DL,
EVT::getIntegerVT(*DAG.getContext(),
ValueVT.getSizeInBits()),
Val);
for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
for (unsigned i = 0; i < NumParts; i += StepSize) {
unsigned ThisBits = StepSize * PartBits / 2;
EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
SDValue &Part0 = Parts[i];
SDValue &Part1 = Parts[i+StepSize/2];
Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
if (ThisBits == PartBits && ThisVT != PartVT) {
Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
}
}
}
if (DAG.getDataLayout().isBigEndian())
std::reverse(Parts, Parts + OrigNumParts);
}
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
const SDLoc &DL, EVT PartVT) {
if (!PartVT.isVector())
return SDValue();
EVT ValueVT = Val.getValueType();
ElementCount PartNumElts = PartVT.getVectorElementCount();
ElementCount ValueNumElts = ValueVT.getVectorElementCount();
// We only support widening vectors with equivalent element types and
// fixed/scalable properties. If a target needs to widen a fixed-length type
// to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
PartNumElts.isScalable() != ValueNumElts.isScalable() ||
PartVT.getVectorElementType() != ValueVT.getVectorElementType())
return SDValue();
// Widening a scalable vector to another scalable vector is done by inserting
// the vector into a larger undef one.
if (PartNumElts.isScalable())
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
Val, DAG.getVectorIdxConstant(0, DL));
EVT ElementVT = PartVT.getVectorElementType();
// Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
// undef elements.
SmallVector<SDValue, 16> Ops;
DAG.ExtractVectorElements(Val, Ops);
SDValue EltUndef = DAG.getUNDEF(ElementVT);
Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
// FIXME: Use CONCAT for 2x -> 4x.
return DAG.getBuildVector(PartVT, DL, Ops);
}
/// getCopyToPartsVector - Create a series of nodes that contain the specified
/// value split into legal parts.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
SDValue Val, SDValue *Parts, unsigned NumParts,
MVT PartVT, const Value *V,
std::optional<CallingConv::ID> CallConv) {
EVT ValueVT = Val.getValueType();
assert(ValueVT.isVector() && "Not a vector");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
const bool IsABIRegCopy = CallConv.has_value();
if (NumParts == 1) {
EVT PartEVT = PartVT;
if (PartEVT == ValueVT) {
// Nothing to do.
} else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
// Bitconvert vector->vector case.
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
} else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
Val = Widened;
} else if (PartVT.isVector() &&
PartEVT.getVectorElementType().bitsGE(
ValueVT.getVectorElementType()) &&
PartEVT.getVectorElementCount() ==
ValueVT.getVectorElementCount()) {
// Promoted vector extract
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
} else if (PartEVT.isVector() &&
PartEVT.getVectorElementType() !=
ValueVT.getVectorElementType() &&
TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
TargetLowering::TypeWidenVector) {
// Combination of widening and promotion.
EVT WidenVT =
EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
PartVT.getVectorElementCount());
SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
} else {
// Don't extract an integer from a float vector. This can happen if the
// FP type gets softened to integer and then promoted. The promotion
// prevents it from being picked up by the earlier bitcast case.
if (ValueVT.getVectorElementCount().isScalar() &&
(!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
DAG.getVectorIdxConstant(0, DL));
} else {
uint64_t ValueSize = ValueVT.getFixedSizeInBits();
assert(PartVT.getFixedSizeInBits() > ValueSize &&
"lossy conversion of vector to scalar type");
EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
Val = DAG.getBitcast(IntermediateType, Val);
Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
}
}
assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
Parts[0] = Val;
return;
}
// Handle a multi-element vector.
EVT IntermediateVT;
MVT RegisterVT;
unsigned NumIntermediates;
unsigned NumRegs;
if (IsABIRegCopy) {
NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
*DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
RegisterVT);
} else {
NumRegs =
TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
NumIntermediates, RegisterVT);
}
assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
NumParts = NumRegs; // Silence a compiler warning.
assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
"Mixing scalable and fixed vectors when copying in parts");
std::optional<ElementCount> DestEltCnt;
if (IntermediateVT.isVector())
DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
else
DestEltCnt = ElementCount::getFixed(NumIntermediates);
EVT BuiltVectorTy = EVT::getVectorVT(
*DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
if (ValueVT == BuiltVectorTy) {
// Nothing to do.
} else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
// Bitconvert vector->vector case.
Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
} else {
if (BuiltVectorTy.getVectorElementType().bitsGT(
ValueVT.getVectorElementType())) {
// Integer promotion.
ValueVT = EVT::getVectorVT(*DAG.getContext(),
BuiltVectorTy.getVectorElementType(),
ValueVT.getVectorElementCount());
Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
}
if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
Val = Widened;
}
}
assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
// Split the vector into intermediate operands.
SmallVector<SDValue, 8> Ops(NumIntermediates);
for (unsigned i = 0; i != NumIntermediates; ++i) {
if (IntermediateVT.isVector()) {
// This does something sensible for scalable vectors - see the
// definition of EXTRACT_SUBVECTOR for further details.
unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
Ops[i] =
DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
} else {
Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
DAG.getVectorIdxConstant(i, DL));
}
}
// Split the intermediate operands into legal parts.
if (NumParts == NumIntermediates) {
// If the register was not expanded, promote or copy the value,
// as appropriate.
for (unsigned i = 0; i != NumParts; ++i)
getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
} else if (NumParts > 0) {
// If the intermediate type was expanded, split each the value into
// legal parts.
assert(NumIntermediates != 0 && "division by zero");
assert(NumParts % NumIntermediates == 0 &&
"Must expand into a divisible number of parts!");
unsigned Factor = NumParts / NumIntermediates;
for (unsigned i = 0; i != NumIntermediates; ++i)
getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
CallConv);
}
}
RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
EVT valuevt, std::optional<CallingConv::ID> CC)
: ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
RegCount(1, regs.size()), CallConv(CC) {}
RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
const DataLayout &DL, unsigned Reg, Type *Ty,
std::optional<CallingConv::ID> CC) {
ComputeValueVTs(TLI, DL, Ty, ValueVTs);
CallConv = CC;
for (EVT ValueVT : ValueVTs) {
unsigned NumRegs =
isABIMangled()
? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
: TLI.getNumRegisters(Context, ValueVT);
MVT RegisterVT =
isABIMangled()
? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
: TLI.getRegisterType(Context, ValueVT);
for (unsigned i = 0; i != NumRegs; ++i)
Regs.push_back(Reg + i);
RegVTs.push_back(RegisterVT);
RegCount.push_back(NumRegs);
Reg += NumRegs;
}
}
SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
FunctionLoweringInfo &FuncInfo,
const SDLoc &dl, SDValue &Chain,
SDValue *Glue, const Value *V) const {
// A Value with type {} or [0 x %t] needs no registers.
if (ValueVTs.empty())
return SDValue();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Assemble the legal parts into the final values.
SmallVector<SDValue, 4> Values(ValueVTs.size());
SmallVector<SDValue, 8> Parts;
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
// Copy the legal parts from the registers.
EVT ValueVT = ValueVTs[Value];
unsigned NumRegs = RegCount[Value];
MVT RegisterVT = isABIMangled()
? TLI.getRegisterTypeForCallingConv(
*DAG.getContext(), *CallConv, RegVTs[Value])
: RegVTs[Value];
Parts.resize(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
SDValue P;
if (!Glue) {
P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
} else {
P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
*Glue = P.getValue(2);
}
Chain = P.getValue(1);
Parts[i] = P;
// If the source register was virtual and if we know something about it,
// add an assert node.
if (!Register::isVirtualRegister(Regs[Part + i]) ||
!RegisterVT.isInteger())
continue;
const FunctionLoweringInfo::LiveOutInfo *LOI =
FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
if (!LOI)
continue;
unsigned RegSize = RegisterVT.getScalarSizeInBits();
unsigned NumSignBits = LOI->NumSignBits;
unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
if (NumZeroBits == RegSize) {
// The current value is a zero.
// Explicitly express that as it would be easier for
// optimizations to kick in.
Parts[i] = DAG.getConstant(0, dl, RegisterVT);
continue;
}
// FIXME: We capture more information than the dag can represent. For
// now, just use the tightest assertzext/assertsext possible.
bool isSExt;
EVT FromVT(MVT::Other);
if (NumZeroBits) {
FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
isSExt = false;
} else if (NumSignBits > 1) {
FromVT =
EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
isSExt = true;
} else {
continue;
}
// Add an assertion node.
assert(FromVT != MVT::Other);
Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
RegisterVT, P, DAG.getValueType(FromVT));
}
Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
RegisterVT, ValueVT, V, CallConv);
Part += NumRegs;
Parts.clear();
}
return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
}
void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
const SDLoc &dl, SDValue &Chain, SDValue *Glue,
const Value *V,
ISD::NodeType PreferredExtendType) const {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
ISD::NodeType ExtendKind = PreferredExtendType;
// Get the list of the values's legal parts.
unsigned NumRegs = Regs.size();
SmallVector<SDValue, 8> Parts(NumRegs);
for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
unsigned NumParts = RegCount[Value];
MVT RegisterVT = isABIMangled()
? TLI.getRegisterTypeForCallingConv(
*DAG.getContext(), *CallConv, RegVTs[Value])
: RegVTs[Value];
if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
ExtendKind = ISD::ZERO_EXTEND;
getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
NumParts, RegisterVT, V, CallConv, ExtendKind);
Part += NumParts;
}
// Copy the parts into the registers.
SmallVector<SDValue, 8> Chains(NumRegs);
for (unsigned i = 0; i != NumRegs; ++i) {
SDValue Part;
if (!Glue) {
Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
} else {
Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
*Glue = Part.getValue(1);
}
Chains[i] = Part.getValue(0);
}
if (NumRegs == 1 || Glue)
// If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
// flagged to it. That is the CopyToReg nodes and the user are considered
// a single scheduling unit. If we create a TokenFactor and return it as
// chain, then the TokenFactor is both a predecessor (operand) of the
// user as well as a successor (the TF operands are flagged to the user).
// c1, f1 = CopyToReg
// c2, f2 = CopyToReg
// c3 = TokenFactor c1, c2
// ...
// = op c3, ..., f2
Chain = Chains[NumRegs-1];
else
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
}
void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
unsigned MatchingIdx, const SDLoc &dl,
SelectionDAG &DAG,
std::vector<SDValue> &Ops) const {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
if (HasMatching)
Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
// Put the register class of the virtual registers in the flag word. That
// way, later passes can recompute register class constraints for inline
// assembly as well as normal instructions.
// Don't do this for tied operands that can use the regclass information
// from the def.
const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
}
SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
Ops.push_back(Res);
if (Code == InlineAsm::Kind_Clobber) {
// Clobbers should always have a 1:1 mapping with registers, and may
// reference registers that have illegal (e.g. vector) types. Hence, we
// shouldn't try to apply any sort of splitting logic to them.
assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
"No 1:1 mapping from clobbers to regs?");
Register SP = TLI.getStackPointerRegisterToSaveRestore();
(void)SP;
for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
assert(
(Regs[I] != SP ||
DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
"If we clobbered the stack pointer, MFI should know about it.");
}
return;
}
for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
MVT RegisterVT = RegVTs[Value];
unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
RegisterVT);
for (unsigned i = 0; i != NumRegs; ++i) {
assert(Reg < Regs.size() && "Mismatch in # registers expected");
unsigned TheReg = Regs[Reg++];
Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
}
}
}
SmallVector<std::pair<unsigned, TypeSize>, 4>
RegsForValue::getRegsAndSizes() const {
SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
unsigned I = 0;
for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
unsigned RegCount = std::get<0>(CountAndVT);
MVT RegisterVT = std::get<1>(CountAndVT);
TypeSize RegisterSize = RegisterVT.getSizeInBits();
for (unsigned E = I + RegCount; I != E; ++I)
OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
}
return OutVec;
}
void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
AssumptionCache *ac,
const TargetLibraryInfo *li) {
AA = aa;
AC = ac;
GFI = gfi;
LibInfo = li;
Context = DAG.getContext();
LPadToCallSiteMap.clear();
SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
*DAG.getMachineFunction().getFunction().getParent());
}
void SelectionDAGBuilder::clear() {
NodeMap.clear();
UnusedArgNodeMap.clear();
PendingLoads.clear();
PendingExports.clear();
PendingConstrainedFP.clear();
PendingConstrainedFPStrict.clear();
CurInst = nullptr;
HasTailCall = false;
SDNodeOrder = LowestSDNodeOrder;
StatepointLowering.clear();
}
void SelectionDAGBuilder::clearDanglingDebugInfo() {
DanglingDebugInfoMap.clear();
}
// Update DAG root to include dependencies on Pending chains.
SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
SDValue Root = DAG.getRoot();
if (Pending.empty())
return Root;
// Add current root to PendingChains, unless we already indirectly
// depend on it.
if (Root.getOpcode() != ISD::EntryToken) {
unsigned i = 0, e = Pending.size();
for (; i != e; ++i) {
assert(Pending[i].getNode()->getNumOperands() > 1);
if (Pending[i].getNode()->getOperand(0) == Root)
break; // Don't add the root if we already indirectly depend on it.
}
if (i == e)
Pending.push_back(Root);
}
if (Pending.size() == 1)
Root = Pending[0];
else
Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
DAG.setRoot(Root);
Pending.clear();
return Root;
}
SDValue SelectionDAGBuilder::getMemoryRoot() {
return updateRoot(PendingLoads);
}
SDValue SelectionDAGBuilder::getRoot() {
// Chain up all pending constrained intrinsics together with all
// pending loads, by simply appending them to PendingLoads and
// then calling getMemoryRoot().
PendingLoads.reserve(PendingLoads.size() +
PendingConstrainedFP.size() +
PendingConstrainedFPStrict.size());
PendingLoads.append(PendingConstrainedFP.begin(),
PendingConstrainedFP.end());
PendingLoads.append(PendingConstrainedFPStrict.begin(),
PendingConstrainedFPStrict.end());
PendingConstrainedFP.clear();
PendingConstrainedFPStrict.clear();
return getMemoryRoot();
}
SDValue SelectionDAGBuilder::getControlRoot() {
// We need to emit pending fpexcept.strict constrained intrinsics,
// so append them to the PendingExports list.
PendingExports.append(PendingConstrainedFPStrict.begin(),
PendingConstrainedFPStrict.end());
PendingConstrainedFPStrict.clear();
return updateRoot(PendingExports);
}
void SelectionDAGBuilder::visit(const Instruction &I) {
// Set up outgoing PHI node register values before emitting the terminator.
if (I.isTerminator()) {
HandlePHINodesInSuccessorBlocks(I.getParent());
}
// Add SDDbgValue nodes for any var locs here. Do so before updating
// SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
// Add SDDbgValue nodes for any var locs here. Do so before updating
// SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
It != End; ++It) {
auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
dropDanglingDebugInfo(Var, It->Expr);
if (It->Values.isKillLocation(It->Expr)) {
handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
continue;
}
SmallVector<Value *> Values(It->Values.location_ops());
if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
It->Values.hasArgList()))
addDanglingDebugInfo(It, SDNodeOrder);
}
}
// Increase the SDNodeOrder if dealing with a non-debug instruction.
if (!isa<DbgInfoIntrinsic>(I))
++SDNodeOrder;
CurInst = &I;
// Set inserted listener only if required.
bool NodeInserted = false;
std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
if (PCSectionsMD) {
InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
DAG, [&](SDNode *) { NodeInserted = true; });
}
visit(I.getOpcode(), I);
if (!I.isTerminator() && !HasTailCall &&
!isa<GCStatepointInst>(I)) // statepoints handle their exports internally
CopyToExportRegsIfNeeded(&I);
// Handle metadata.
if (PCSectionsMD) {
auto It = NodeMap.find(&I);
if (It != NodeMap.end()) {
DAG.addPCSections(It->second.getNode(), PCSectionsMD);
} else if (NodeInserted) {
// This should not happen; if it does, don't let it go unnoticed so we can
// fix it. Relevant visit*() function is probably missing a setValue().
errs() << "warning: loosing !pcsections metadata ["
<< I.getModule()->getName() << "]\n";
LLVM_DEBUG(I.dump());
assert(false);
}
}
CurInst = nullptr;
}
void SelectionDAGBuilder::visitPHI(const PHINode &) {
llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
}
void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
// Note: this doesn't use InstVisitor, because it has to work with
// ConstantExpr's in addition to instructions.
switch (Opcode) {
default: llvm_unreachable("Unknown instruction type encountered!");
// Build the switch statement using the Instruction.def file.
#define HANDLE_INST(NUM, OPCODE, CLASS) \
case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
#include "llvm/IR/Instruction.def"
}
}
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG,
DILocalVariable *Variable,
DebugLoc DL, unsigned Order,
RawLocationWrapper Values,
DIExpression *Expression) {
if (!Values.hasArgList())
return false;
// For variadic dbg_values we will now insert an undef.
// FIXME: We can potentially recover these!
SmallVector<SDDbgOperand, 2> Locs;
for (const Value *V : Values.location_ops()) {
auto *Undef = UndefValue::get(V->getType());
Locs.push_back(SDDbgOperand::fromConst(Undef));
}
SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
/*IsIndirect=*/false, DL, Order,
/*IsVariadic=*/true);
DAG.AddDbgValue(SDV, /*isParameter=*/false);
return true;
}
void SelectionDAGBuilder::addDanglingDebugInfo(const VarLocInfo *VarLoc,
unsigned Order) {
if (!handleDanglingVariadicDebugInfo(
DAG,
const_cast<DILocalVariable *>(DAG.getFunctionVarLocs()
->getVariable(VarLoc->VariableID)
.getVariable()),
VarLoc->DL, Order, VarLoc->Values, VarLoc->Expr)) {
DanglingDebugInfoMap[VarLoc->Values.getVariableLocationOp(0)].emplace_back(
VarLoc, Order);
}
}
void SelectionDAGBuilder::addDanglingDebugInfo(const DbgValueInst *DI,
unsigned Order) {
// We treat variadic dbg_values differently at this stage.
if (!handleDanglingVariadicDebugInfo(
DAG, DI->getVariable(), DI->getDebugLoc(), Order,
DI->getWrappedLocation(), DI->getExpression())) {
// TODO: Dangling debug info will eventually either be resolved or produce
// an Undef DBG_VALUE. However in the resolution case, a gap may appear
// between the original dbg.value location and its resolved DBG_VALUE,
// which we should ideally fill with an extra Undef DBG_VALUE.
assert(DI->getNumVariableLocationOps() == 1 &&
"DbgValueInst without an ArgList should have a single location "
"operand.");
DanglingDebugInfoMap[DI->getValue(0)].emplace_back(DI, Order);
}
}
void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
const DIExpression *Expr) {
auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
DIVariable *DanglingVariable = DDI.getVariable(DAG.getFunctionVarLocs());
DIExpression *DanglingExpr = DDI.getExpression();
if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << printDDI(DDI)
<< "\n");
return true;
}
return false;
};
for (auto &DDIMI : DanglingDebugInfoMap) {
DanglingDebugInfoVector &DDIV = DDIMI.second;
// If debug info is to be dropped, run it through final checks to see
// whether it can be salvaged.
for (auto &DDI : DDIV)
if (isMatchingDbgValue(DDI))
salvageUnresolvedDbgValue(DDI);
erase_if(DDIV, isMatchingDbgValue);
}
}
// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
// generate the debug data structures now that we've seen its definition.
void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
SDValue Val) {
auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
return;
DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
for (auto &DDI : DDIV) {
DebugLoc DL = DDI.getDebugLoc();
unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
DILocalVariable *Variable = DDI.getVariable(DAG.getFunctionVarLocs());
DIExpression *Expr = DDI.getExpression();
assert(Variable->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
SDDbgValue *SDV;
if (Val.getNode()) {
// FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
// FuncArgumentDbgValue (it would be hoisted to the function entry, and if
// we couldn't resolve it directly when examining the DbgValue intrinsic
// in the first place we should not be more successful here). Unless we
// have some test case that prove this to be correct we should avoid
// calling EmitFuncArgumentDbgValue here.
if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
FuncArgumentDbgValueKind::Value, Val)) {
LLVM_DEBUG(dbgs() << "Resolve dangling debug info for " << printDDI(DDI)
<< "\n");
LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump());
// Increase the SDNodeOrder for the DbgValue here to make sure it is
// inserted after the definition of Val when emitting the instructions
// after ISel. An alternative could be to teach
// ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
<< "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
<< ValSDNodeOrder << "\n");
SDV = getDbgValue(Val, Variable, Expr, DL,
std::max(DbgSDNodeOrder, ValSDNodeOrder));
DAG.AddDbgValue(SDV, false);
} else
LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
<< printDDI(DDI) << " in EmitFuncArgumentDbgValue\n");
} else {
LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(DDI) << "\n");
auto Undef = UndefValue::get(V->getType());
auto SDV =
DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder);
DAG.AddDbgValue(SDV, false);
}
}
DDIV.clear();
}
void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
// TODO: For the variadic implementation, instead of only checking the fail
// state of `handleDebugValue`, we need know specifically which values were
// invalid, so that we attempt to salvage only those values when processing
// a DIArgList.
Value *V = DDI.getVariableLocationOp(0);
Value *OrigV = V;
DILocalVariable *Var = DDI.getVariable(DAG.getFunctionVarLocs());
DIExpression *Expr = DDI.getExpression();
DebugLoc DL = DDI.getDebugLoc();
unsigned SDOrder = DDI.getSDNodeOrder();
// Currently we consider only dbg.value intrinsics -- we tell the salvager
// that DW_OP_stack_value is desired.
bool StackValue = true;
// Can this Value can be encoded without any further work?
if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
return;
// Attempt to salvage back through as many instructions as possible. Bail if
// a non-instruction is seen, such as a constant expression or global
// variable. FIXME: Further work could recover those too.
while (isa<Instruction>(V)) {
Instruction &VAsInst = *cast<Instruction>(V);
// Temporary "0", awaiting real implementation.
SmallVector<uint64_t, 16> Ops;
SmallVector<Value *, 4> AdditionalValues;
V = salvageDebugInfoImpl(VAsInst, Expr->getNumLocationOperands(), Ops,
AdditionalValues);
// If we cannot salvage any further, and haven't yet found a suitable debug
// expression, bail out.
if (!V)
break;
// TODO: If AdditionalValues isn't empty, then the salvage can only be
// represented with a DBG_VALUE_LIST, so we give up. When we have support
// here for variadic dbg_values, remove that condition.
if (!AdditionalValues.empty())
break;
// New value and expr now represent this debuginfo.
Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
// Some kind of simplification occurred: check whether the operand of the
// salvaged debug expression can be encoded in this DAG.
if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
LLVM_DEBUG(
dbgs() << "Salvaged debug location info for:\n " << *Var << "\n"
<< *OrigV << "\nBy stripping back to:\n " << *V << "\n");
return;
}
}
// This was the final opportunity to salvage this debug information, and it
// couldn't be done. Place an undef DBG_VALUE at this location to terminate
// any earlier variable location.
assert(OrigV && "V shouldn't be null");
auto *Undef = UndefValue::get(OrigV->getType());
auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
DAG.AddDbgValue(SDV, false);
LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " << printDDI(DDI)
<< "\n");
}
void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var,
DIExpression *Expr,
DebugLoc DbgLoc,
unsigned Order) {
Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context));
DIExpression *NewExpr =
const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr));
handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
/*IsVariadic*/ false);
}
bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
DILocalVariable *Var,
DIExpression *Expr, DebugLoc DbgLoc,
unsigned Order, bool IsVariadic) {
if (Values.empty())
return true;
SmallVector<SDDbgOperand> LocationOps;
SmallVector<SDNode *> Dependencies;
for (const Value *V : Values) {
// Constant value.
if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
isa<ConstantPointerNull>(V)) {
LocationOps.emplace_back(SDDbgOperand::fromConst(V));
continue;
}
// Look through IntToPtr constants.
if (auto *CE = dyn_cast<ConstantExpr>(V))
if (CE->getOpcode() == Instruction::IntToPtr) {
LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
continue;
}
// If the Value is a frame index, we can create a FrameIndex debug value
// without relying on the DAG at all.
if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
auto SI = FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end()) {
LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
continue;
}
}
// Do not use getValue() in here; we don't want to generate code at
// this point if it hasn't been done yet.
SDValue N = NodeMap[V];
if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
N = UnusedArgNodeMap[V];
if (N.getNode()) {
// Only emit func arg dbg value for non-variadic dbg.values for now.
if (!IsVariadic &&
EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
FuncArgumentDbgValueKind::Value, N))
return true;
if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
// Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
// describe stack slot locations.
//
// Consider "int x = 0; int *px = &x;". There are two kinds of
// interesting debug values here after optimization:
//
// dbg.value(i32* %px, !"int *px", !DIExpression()), and
// dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
//
// Both describe the direct values of their associated variables.
Dependencies.push_back(N.getNode());
LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
continue;
}
LocationOps.emplace_back(
SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
continue;
}
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Special rules apply for the first dbg.values of parameter variables in a
// function. Identify them by the fact they reference Argument Values, that
// they're parameters, and they are parameters of the current function. We
// need to let them dangle until they get an SDNode.
bool IsParamOfFunc =
isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
if (IsParamOfFunc)
return false;
// The value is not used in this block yet (or it would have an SDNode).
// We still want the value to appear for the user if possible -- if it has
// an associated VReg, we can refer to that instead.
auto VMI = FuncInfo.ValueMap.find(V);
if (VMI != FuncInfo.ValueMap.end()) {
unsigned Reg = VMI->second;
// If this is a PHI node, it may be split up into several MI PHI nodes
// (in FunctionLoweringInfo::set).
RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
V->getType(), std::nullopt);
if (RFV.occupiesMultipleRegs()) {
// FIXME: We could potentially support variadic dbg_values here.
if (IsVariadic)
return false;
unsigned Offset = 0;
unsigned BitsToDescribe = 0;
if (auto VarSize = Var->getSizeInBits())
BitsToDescribe = *VarSize;
if (auto Fragment = Expr->getFragmentInfo())
BitsToDescribe = Fragment->SizeInBits;
for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
// Bail out if all bits are described already.
if (Offset >= BitsToDescribe)
break;
// TODO: handle scalable vectors.
unsigned RegisterSize = RegAndSize.second;
unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
? BitsToDescribe - Offset
: RegisterSize;
auto FragmentExpr = DIExpression::createFragmentExpression(
Expr, Offset, FragmentSize);
if (!FragmentExpr)
continue;
SDDbgValue *SDV = DAG.getVRegDbgValue(
Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, SDNodeOrder);
DAG.AddDbgValue(SDV, false);
Offset += RegisterSize;
}
return true;
}
// We can use simple vreg locations for variadic dbg_values as well.
LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
continue;
}
// We failed to create a SDDbgOperand for V.
return false;
}
// We have created a SDDbgOperand for each Value in Values.
// Should use Order instead of SDNodeOrder?
assert(!LocationOps.empty());
SDDbgValue *SDV = DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
/*IsIndirect=*/false, DbgLoc,
SDNodeOrder, IsVariadic);
DAG.AddDbgValue(SDV, /*isParameter=*/false);
return true;
}
void SelectionDAGBuilder::resolveOrClearDbgInfo() {
// Try to fixup any remaining dangling debug info -- and drop it if we can't.
for (auto &Pair : DanglingDebugInfoMap)
for (auto &DDI : Pair.second)
salvageUnresolvedDbgValue(DDI);
clearDanglingDebugInfo();
}
/// getCopyFromRegs - If there was virtual register allocated for the value V
/// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
SDValue Result;
if (It != FuncInfo.ValueMap.end()) {
Register InReg = It->second;
RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
DAG.getDataLayout(), InReg, Ty,
std::nullopt); // This is not an ABI copy.
SDValue Chain = DAG.getEntryNode();
Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
V);
resolveDanglingDebugInfo(V, Result);
}
return Result;
}
/// getValue - Return an SDValue for the given Value.
SDValue SelectionDAGBuilder::getValue(const Value *V) {
// If we already have an SDValue for this value, use it. It's important
// to do this first, so that we don't create a CopyFromReg if we already
// have a regular SDValue.
SDValue &N = NodeMap[V];
if (N.getNode()) return N;
// If there's a virtual register allocated and initialized for this
// value, use it.
if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
return copyFromReg;
// Otherwise create a new SDValue and remember it.
SDValue Val = getValueImpl(V);
NodeMap[V] = Val;
resolveDanglingDebugInfo(V, Val);
return Val;
}
/// getNonRegisterValue - Return an SDValue for the given Value, but
/// don't look in FuncInfo.ValueMap for a virtual register.
SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
// If we already have an SDValue for this value, use it.
SDValue &N = NodeMap[V];
if (N.getNode()) {
if (isIntOrFPConstant(N)) {
// Remove the debug location from the node as the node is about to be used
// in a location which may differ from the original debug location. This
// is relevant to Constant and ConstantFP nodes because they can appear
// as constant expressions inside PHI nodes.
N->setDebugLoc(DebugLoc());
}
return N;
}
// Otherwise create a new SDValue and remember it.
SDValue Val = getValueImpl(V);
NodeMap[V] = Val;
resolveDanglingDebugInfo(V, Val);
return Val;
}
/// getValueImpl - Helper function for getValue and getNonRegisterValue.
/// Create an SDValue for the given value.
SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (const Constant *C = dyn_cast<Constant>(V)) {
EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
return DAG.getConstant(*CI, getCurSDLoc(), VT);
if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
if (isa<ConstantPointerNull>(C)) {
unsigned AS = V->getType()->getPointerAddressSpace();
return DAG.getConstant(0, getCurSDLoc(),
TLI.getPointerTy(DAG.getDataLayout(), AS));
}
if (match(C, m_VScale()))
return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
return DAG.getUNDEF(VT);
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
visit(CE->getOpcode(), *CE);
SDValue N1 = NodeMap[V];
assert(N1.getNode() && "visit didn't populate the NodeMap!");
return N1;
}
if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
SmallVector<SDValue, 4> Constants;
for (const Use &U : C->operands()) {
SDNode *Val = getValue(U).getNode();
// If the operand is an empty aggregate, there are no values.
if (!Val) continue;
// Add each leaf value from the operand to the Constants list
// to form a flattened list of all the values.
for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
Constants.push_back(SDValue(Val, i));
}
return DAG.getMergeValues(Constants, getCurSDLoc());
}
if (const ConstantDataSequential *CDS =
dyn_cast<ConstantDataSequential>(C)) {
SmallVector<SDValue, 4> Ops;
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
// Add each leaf value from the operand to the Constants list
// to form a flattened list of all the values.
for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
Ops.push_back(SDValue(Val, i));
}
if (isa<ArrayType>(CDS->getType()))
return DAG.getMergeValues(Ops, getCurSDLoc());
return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
}
if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
"Unknown struct or array constant!");
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
unsigned NumElts = ValueVTs.size();
if (NumElts == 0)
return SDValue(); // empty struct
SmallVector<SDValue, 4> Constants(NumElts);
for (unsigned i = 0; i != NumElts; ++i) {
EVT EltVT = ValueVTs[i];
if (isa<UndefValue>(C))
Constants[i] = DAG.getUNDEF(EltVT);
else if (EltVT.isFloatingPoint())
Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
else
Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
}
return DAG.getMergeValues(Constants, getCurSDLoc());
}
if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
return DAG.getBlockAddress(BA, VT);
if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
return getValue(Equiv->getGlobalValue());
if (const auto *NC = dyn_cast<NoCFIValue>(C))
return getValue(NC->getGlobalValue());
VectorType *VecTy = cast<VectorType>(V->getType());
// Now that we know the number and type of the elements, get that number of
// elements into the Ops array based on what kind of constant it is.
if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
SmallVector<SDValue, 16> Ops;
unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
for (unsigned i = 0; i != NumElements; ++i)
Ops.push_back(getValue(CV->getOperand(i)));
return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
}
if (isa<ConstantAggregateZero>(C)) {
EVT EltVT =
TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
SDValue Op;
if (EltVT.isFloatingPoint())
Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
else
Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op);
}
llvm_unreachable("Unknown vector constant");
}
// If this is a static alloca, generate it as the frameindex instead of
// computation.
if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI != FuncInfo.StaticAllocaMap.end())
return DAG.getFrameIndex(
SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
}
// If this is an instruction which fast-isel has deferred, select it now.
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
Register InReg = FuncInfo.InitializeRegForValue(Inst);
RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
Inst->getType(), std::nullopt);
SDValue Chain = DAG.getEntryNode();
return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
}
if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
if (const auto *BB = dyn_cast<BasicBlock>(V))
return DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
llvm_unreachable("Can't get register for value!");
}
void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
bool IsSEH = isAsynchronousEHPersonality(Pers);
MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
if (!IsSEH)
CatchPadMBB->setIsEHScopeEntry();
// In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
if (IsMSVCCXX || IsCoreCLR)
CatchPadMBB->setIsEHFuncletEntry();
}
void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
// Update machine-CFG edge.
MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
FuncInfo.MBB->addSuccessor(TargetMBB);
TargetMBB->setIsEHCatchretTarget(true);
DAG.getMachineFunction().setHasEHCatchret(true);
auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
bool IsSEH = isAsynchronousEHPersonality(Pers);
if (IsSEH) {
// If this is not a fall-through branch or optimizations are switched off,
// emit the branch.
if (TargetMBB != NextBlock(FuncInfo.MBB) ||
TM.getOptLevel() == CodeGenOpt::None)
DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
getControlRoot(), DAG.getBasicBlock(TargetMBB)));
return;
}
// Figure out the funclet membership for the catchret's successor.
// This will be used by the FuncletLayout pass to determine how to order the
// BB's.
// A 'catchret' returns to the outer scope's color.
Value *ParentPad = I.getCatchSwitchParentPad();
const BasicBlock *SuccessorColor;
if (isa<ConstantTokenNone>(ParentPad))
SuccessorColor = &FuncInfo.Fn->getEntryBlock();
else
SuccessorColor = cast<Instruction>(ParentPad)->getParent();
assert(SuccessorColor && "No parent funclet for catchret!");
MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
// Create the terminator node.
SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
getControlRoot(), DAG.getBasicBlock(TargetMBB),
DAG.getBasicBlock(SuccessorColorMBB));
DAG.setRoot(Ret);
}
void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
// Don't emit any special code for the cleanuppad instruction. It just marks
// the start of an EH scope/funclet.
FuncInfo.MBB->setIsEHScopeEntry();
auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
if (Pers != EHPersonality::Wasm_CXX) {
FuncInfo.MBB->setIsEHFuncletEntry();
FuncInfo.MBB->setIsCleanupFuncletEntry();
}
}
// In wasm EH, even though a catchpad may not catch an exception if a tag does
// not match, it is OK to add only the first unwind destination catchpad to the
// successors, because there will be at least one invoke instruction within the
// catch scope that points to the next unwind destination, if one exists, so
// CFGSort cannot mess up with BB sorting order.
// (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
// call within them, and catchpads only consisting of 'catch (...)' have a
// '__cxa_end_catch' call within them, both of which generate invokes in case
// the next unwind destination exists, i.e., the next unwind destination is not
// the caller.)
//
// Having at most one EH pad successor is also simpler and helps later
// transformations.
//
// For example,
// current:
// invoke void @foo to ... unwind label %catch.dispatch
// catch.dispatch:
// %0 = catchswitch within ... [label %catch.start] unwind label %next
// catch.start:
// ...
// ... in this BB or some other child BB dominated by this BB there will be an
// invoke that points to 'next' BB as an unwind destination
//
// next: ; We don't need to add this to 'current' BB's successor
// ...
static void findWasmUnwindDestinations(
FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
BranchProbability Prob,
SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
&UnwindDests) {
while (EHPadBB) {
const Instruction *Pad = EHPadBB->getFirstNonPHI();
if (isa<CleanupPadInst>(Pad)) {
// Stop on cleanup pads.
UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
UnwindDests.back().first->setIsEHScopeEntry();
break;
} else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
// Add the catchpad handlers to the possible destinations. We don't
// continue to the unwind destination of the catchswitch for wasm.
for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
UnwindDests.back().first->setIsEHScopeEntry();
}
break;
} else {
continue;
}
}
}
/// When an invoke or a cleanupret unwinds to the next EH pad, there are
/// many places it could ultimately go. In the IR, we have a single unwind
/// destination, but in the machine CFG, we enumerate all the possible blocks.
/// This function skips over imaginary basic blocks that hold catchswitch
/// instructions, and finds all the "real" machine
/// basic block destinations. As those destinations may not be successors of
/// EHPadBB, here we also calculate the edge probability to those destinations.
/// The passed-in Prob is the edge probability to EHPadBB.
static void findUnwindDestinations(
FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
BranchProbability Prob,
SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
&UnwindDests) {
EHPersonality Personality =
classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
bool IsSEH = isAsynchronousEHPersonality(Personality);
if (IsWasmCXX) {
findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
assert(UnwindDests.size() <= 1 &&
"There should be at most one unwind destination for wasm");
return;
}
while (EHPadBB) {
const Instruction *Pad = EHPadBB->getFirstNonPHI();
BasicBlock *NewEHPadBB = nullptr;
if (isa<LandingPadInst>(Pad)) {
// Stop on landingpads. They are not funclets.
UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
break;
} else if (isa<CleanupPadInst>(Pad)) {
// Stop on cleanup pads. Cleanups are always funclet entries for all known
// personalities.
UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
UnwindDests.back().first->setIsEHScopeEntry();
UnwindDests.back().first->setIsEHFuncletEntry();
break;
} else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
// Add the catchpad handlers to the possible destinations.
for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
// For MSVC++ and the CLR, catchblocks are funclets and need prologues.
if (IsMSVCCXX || IsCoreCLR)
UnwindDests.back().first->setIsEHFuncletEntry();
if (!IsSEH)
UnwindDests.back().first->setIsEHScopeEntry();
}
NewEHPadBB = CatchSwitch->getUnwindDest();
} else {
continue;
}
BranchProbabilityInfo *BPI = FuncInfo.BPI;
if (BPI && NewEHPadBB)
Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
EHPadBB = NewEHPadBB;
}
}
void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
// Update successor info.
SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
auto UnwindDest = I.getUnwindDest();
BranchProbabilityInfo *BPI = FuncInfo.BPI;
BranchProbability UnwindDestProb =
(BPI && UnwindDest)
? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
: BranchProbability::getZero();
findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
for (auto &UnwindDest : UnwindDests) {
UnwindDest.first->setIsEHPad();
addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
}
FuncInfo.MBB->normalizeSuccProbs();
// Create the terminator node.
SDValue Ret =
DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
DAG.setRoot(Ret);
}
void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
report_fatal_error("visitCatchSwitch not yet implemented!");
}
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
auto &DL = DAG.getDataLayout();
SDValue Chain = getControlRoot();
SmallVector<ISD::OutputArg, 8> Outs;
SmallVector<SDValue, 8> OutVals;
// Calls to @llvm.experimental.deoptimize don't generate a return value, so
// lower
//
// %val = call <ty> @llvm.experimental.deoptimize()
// ret <ty> %val
//
// differently.
if (I.getParent()->getTerminatingDeoptimizeCall()) {
LowerDeoptimizingReturn();
return;
}
if (!FuncInfo.CanLowerReturn) {
unsigned DemoteReg = FuncInfo.DemoteRegister;
const Function *F = I.getParent()->getParent();
// Emit a store of the return value through the virtual register.
// Leave Outs empty so that LowerReturn won't try to load return
// registers the usual way.
SmallVector<EVT, 1> PtrValueVTs;
ComputeValueVTs(TLI, DL,
F->getReturnType()->getPointerTo(
DAG.getDataLayout().getAllocaAddrSpace()),
PtrValueVTs);
SDValue RetPtr =
DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVTs[0]);
SDValue RetOp = getValue(I.getOperand(0));
SmallVector<EVT, 4> ValueVTs, MemVTs;
SmallVector<uint64_t, 4> Offsets;
ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
&Offsets, 0);
unsigned NumValues = ValueVTs.size();
SmallVector<SDValue, 4> Chains(NumValues);
Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
for (unsigned i = 0; i != NumValues; ++i) {
// An aggregate return value cannot wrap around the address space, so
// offsets to its parts don't wrap either.
SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
TypeSize::Fixed(Offsets[i]));
SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
if (MemVTs[i] != ValueVTs[i])
Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
Chains[i] = DAG.getStore(
Chain, getCurSDLoc(), Val,
// FIXME: better loc info would be nice.
Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
commonAlignment(BaseAlign, Offsets[i]));
}
Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
MVT::Other, Chains);
} else if (I.getNumOperands() != 0) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
if (NumValues) {
SDValue RetOp = getValue(I.getOperand(0));
const Function *F = I.getParent()->getParent();
bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
I.getOperand(0)->getType(), F->getCallingConv(),
/*IsVarArg*/ false, DL);
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
if (F->getAttributes().hasRetAttr(Attribute::SExt))
ExtendKind = ISD::SIGN_EXTEND;
else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
ExtendKind = ISD::ZERO_EXTEND;
LLVMContext &Context = F->getContext();
bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
for (unsigned j = 0; j != NumValues; ++j) {
EVT VT = ValueVTs[j];
if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
CallingConv::ID CC = F->getCallingConv();
unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
SmallVector<SDValue, 4> Parts(NumParts);
getCopyToParts(DAG, getCurSDLoc(),
SDValue(RetOp.getNode(), RetOp.getResNo() + j),
&Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
if (RetInReg)
Flags.setInReg();
if (I.getOperand(0)->getType()->isPointerTy()) {
Flags.setPointer();
Flags.setPointerAddrSpace(
cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
}
if (NeedsRegBlock) {
Flags.setInConsecutiveRegs();
if (j == NumValues - 1)
Flags.setInConsecutiveRegsLast();
}
// Propagate extension type if any
if (ExtendKind == ISD::SIGN_EXTEND)
Flags.setSExt();
else if (ExtendKind == ISD::ZERO_EXTEND)
Flags.setZExt();
for (unsigned i = 0; i < NumParts; ++i) {
Outs.push_back(ISD::OutputArg(Flags,
Parts[i].getValueType().getSimpleVT(),
VT, /*isfixed=*/true, 0, 0));
OutVals.push_back(Parts[i]);
}
}
}
}
// Push in swifterror virtual register as the last element of Outs. This makes
// sure swifterror virtual register will be returned in the swifterror
// physical register.
const Function *F = I.getParent()->getParent();
if (TLI.supportSwiftError() &&
F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
assert(SwiftError.getFunctionArg() && "Need a swift error argument");
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
Flags.setSwiftError();
Outs.push_back(ISD::OutputArg(
Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
/*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
// Create SDNode for the swifterror virtual register.
OutVals.push_back(
DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
&I, FuncInfo.MBB, SwiftError.getFunctionArg()),
EVT(TLI.getPointerTy(DL))));
}
bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
CallingConv::ID CallConv =
DAG.getMachineFunction().getFunction().getCallingConv();
Chain = DAG.getTargetLoweringInfo().LowerReturn(
Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
// Verify that the target's LowerReturn behaved as expected.
assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
"LowerReturn didn't return a valid chain!");
// Update the DAG with the new chain value resulting from return lowering.
DAG.setRoot(Chain);
}
/// CopyToExportRegsIfNeeded - If the given value has virtual registers
/// created for it, emit nodes to copy the value into the virtual
/// registers.
void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
// Skip empty types
if (V->getType()->isEmptyTy())
return;
DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
if (VMI != FuncInfo.ValueMap.end()) {
assert((!V->use_empty() || isa<CallBrInst>(V)) &&
"Unused value assigned virtual registers!");
CopyValueToVirtualRegister(V, VMI->second);
}
}
/// ExportFromCurrentBlock - If this condition isn't known to be exported from
/// the current basic block, add it to ValueMap now so that we'll get a
/// CopyTo/FromReg.
void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
// No need to export constants.
if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
// Already exported?
if (FuncInfo.isExportedInst(V)) return;
Register Reg = FuncInfo.InitializeRegForValue(V);
CopyValueToVirtualRegister(V, Reg);
}
bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
const BasicBlock *FromBB) {
// The operands of the setcc have to be in this block. We don't know
// how to export them from some other block.
if (const Instruction *VI = dyn_cast<Instruction>(V)) {
// Can export from current BB.
if (VI->getParent() == FromBB)
return true;
// Is already exported, noop.
return FuncInfo.isExportedInst(V);
}
// If this is an argument, we can export it if the BB is the entry block or
// if it is already exported.
if (isa<Argument>(V)) {
if (FromBB->isEntryBlock())
return true;
// Otherwise, can only export this if it is already exported.
return FuncInfo.isExportedInst(V);
}
// Otherwise, constants can always be exported.
return true;
}
/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
BranchProbability
SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const {
BranchProbabilityInfo *BPI = FuncInfo.BPI;
const BasicBlock *SrcBB = Src->getBasicBlock();
const BasicBlock *DstBB = Dst->getBasicBlock();
if (!BPI) {
// If BPI is not available, set the default probability as 1 / N, where N is
// the number of successors.
auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
return BranchProbability(1, SuccSize);
}
return BPI->getEdgeProbability(SrcBB, DstBB);
}
void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
MachineBasicBlock *Dst,
BranchProbability Prob) {
if (!FuncInfo.BPI)
Src->addSuccessorWithoutProb(Dst);
else {
if (Prob.isUnknown())
Prob = getEdgeProbability(Src, Dst);
Src->addSuccessor(Dst, Prob);
}
}
static bool InBlock(const Value *V, const BasicBlock *BB) {
if (const Instruction *I = dyn_cast<Instruction>(V))
return I->getParent() == BB;
return true;
}
/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
/// This function emits a branch and is used at the leaves of an OR or an
/// AND operator tree.
void
SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
MachineBasicBlock *SwitchBB,
BranchProbability TProb,
BranchProbability FProb,
bool InvertCond) {
const BasicBlock *BB = CurBB->getBasicBlock();
// If the leaf of the tree is a comparison, merge the condition into
// the caseblock.
if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
// The operands of the cmp have to be in this block. We don't know
// how to export them from some other block. If this is the first block
// of the sequence, no exporting is needed.
if (CurBB == SwitchBB ||
(isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
ISD::CondCode Condition;
if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
ICmpInst::Predicate Pred =
InvertCond ? IC->getInversePredicate() : IC->getPredicate();
Condition = getICmpCondCode(Pred);
} else {
const FCmpInst *FC = cast<FCmpInst>(Cond);
FCmpInst::Predicate Pred =
InvertCond ? FC->getInversePredicate() : FC->getPredicate();
Condition = getFCmpCondCode(Pred);
if (TM.Options.NoNaNsFPMath)
Condition = getFCmpCodeWithoutNaN(Condition);
}
CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
SL->SwitchCases.push_back(CB);
return;
}
}
// Create a CaseBlock record representing this branch.
ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
SL->SwitchCases.push_back(CB);
}
void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
MachineBasicBlock *SwitchBB,
Instruction::BinaryOps Opc,
BranchProbability TProb,
BranchProbability FProb,
bool InvertCond) {
// Skip over not part of the tree and remember to invert op and operands at
// next level.
Value *NotCond;
if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
InBlock(NotCond, CurBB->getBasicBlock())) {
FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
!InvertCond);
return;
}
const Instruction *BOp = dyn_cast<Instruction>(Cond);
const Value *BOpOp0, *BOpOp1;
// Compute the effective opcode for Cond, taking into account whether it needs
// to be inverted, e.g.
// and (not (or A, B)), C
// gets lowered as
// and (and (not A, not B), C)
Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
if (BOp) {
BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
? Instruction::And
: (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
? Instruction::Or
: (Instruction::BinaryOps)0);
if (InvertCond) {
if (BOpc == Instruction::And)
BOpc = Instruction::Or;
else if (BOpc == Instruction::Or)
BOpc = Instruction::And;
}
}
// If this node is not part of the or/and tree, emit it as a branch.
// Note that all nodes in the tree should have same opcode.
bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
!InBlock(BOpOp0, CurBB->getBasicBlock()) ||
!InBlock(BOpOp1, CurBB->getBasicBlock())) {
EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
TProb, FProb, InvertCond);
return;
}
// Create TmpBB after CurBB.
MachineFunction::iterator BBI(CurBB);
MachineFunction &MF = DAG.getMachineFunction();
MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
CurBB->getParent()->insert(++BBI, TmpBB);
if (Opc == Instruction::Or) {
// Codegen X | Y as:
// BB1:
// jmp_if_X TBB
// jmp TmpBB
// TmpBB:
// jmp_if_Y TBB
// jmp FBB
//
// We have flexibility in setting Prob for BB1 and Prob for TmpBB.
// The requirement is that
// TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
// = TrueProb for original BB.
// Assuming the original probabilities are A and B, one choice is to set
// BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
// A/(1+B) and 2B/(1+B). This choice assumes that
// TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
// Another choice is to assume TrueProb for BB1 equals to TrueProb for
// TmpBB, but the math is more complicated.
auto NewTrueProb = TProb / 2;
auto NewFalseProb = TProb / 2 + FProb;
// Emit the LHS condition.
FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
NewFalseProb, InvertCond);
// Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
// Emit the RHS condition into TmpBB.
FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
Probs[1], InvertCond);
} else {
assert(Opc == Instruction::And && "Unknown merge op!");
// Codegen X & Y as:
// BB1:
// jmp_if_X TmpBB
// jmp FBB
// TmpBB:
// jmp_if_Y TBB
// jmp FBB
//
// This requires creation of TmpBB after CurBB.
// We have flexibility in setting Prob for BB1 and Prob for TmpBB.
// The requirement is that
// FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
// = FalseProb for original BB.
// Assuming the original probabilities are A and B, one choice is to set
// BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
// 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
// TrueProb for BB1 * FalseProb for TmpBB.
auto NewTrueProb = TProb + FProb / 2;
auto NewFalseProb = FProb / 2;
// Emit the LHS condition.
FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
NewFalseProb, InvertCond);
// Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
// Emit the RHS condition into TmpBB.
FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
Probs[1], InvertCond);
}
}
/// If the set of cases should be emitted as a series of branches, return true.
/// If we should emit this as a bunch of and/or'd together conditions, return
/// false.
bool
SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
if (Cases.size() != 2) return true;
// If this is two comparisons of the same values or'd or and'd together, they
// will get folded into a single comparison, so don't emit two blocks.
if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
Cases[0].CmpRHS == Cases[1].CmpRHS) ||
(Cases[0].CmpRHS == Cases[1].CmpLHS &&
Cases[0].CmpLHS == Cases[1].CmpRHS)) {
return false;
}
// Handle: (X != null) | (Y != null) --> (X|Y) != 0
// Handle: (X == null) & (Y == null) --> (X|Y) == 0
if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
Cases[0].CC == Cases[1].CC &&
isa<Constant>(Cases[0].CmpRHS) &&
cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
return false;
if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
return false;
}
return true;
}
void SelectionDAGBuilder::visitBr(const BranchInst &I) {
MachineBasicBlock *BrMBB = FuncInfo.MBB;
// Update machine-CFG edges.
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
if (I.isUnconditional()) {
// Update machine-CFG edges.
BrMBB->addSuccessor(Succ0MBB);
// If this is not a fall-through branch or optimizations are switched off,
// emit the branch.
if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
MVT::Other, getControlRoot(),
DAG.getBasicBlock(Succ0MBB)));
return;
}
// If this condition is one of the special cases we handle, do special stuff
// now.
const Value *CondVal = I.getCondition();
MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
// If this is a series of conditions that are or'd or and'd together, emit
// this as a sequence of branches instead of setcc's with and/or operations.
// As long as jumps are not expensive (exceptions for multi-use logic ops,
// unpredictable branches, and vector extracts because those jumps are likely
// expensive for any target), this should improve performance.
// For example, instead of something like:
// cmp A, B
// C = seteq
// cmp D, E
// F = setle
// or C, F
// jnz foo
// Emit:
// cmp A, B
// je foo
// cmp D, E
// jle foo
const Instruction *BOp = dyn_cast<Instruction>(CondVal);
if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
Value *Vec;
const Value *BOp0, *BOp1;
Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
Opcode = Instruction::And;
else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
Opcode = Instruction::Or;
if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
getEdgeProbability(BrMBB, Succ0MBB),
getEdgeProbability(BrMBB, Succ1MBB),
/*InvertCond=*/false);
// If the compares in later blocks need to use values not currently
// exported from this block, export them now. This block should always
// be the first entry.
assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
// Allow some cases to be rejected.
if (ShouldEmitAsBranches(SL->SwitchCases)) {
for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
}
// Emit the branch for this block.
visitSwitchCase(SL->SwitchCases[0], BrMBB);
SL->SwitchCases.erase(SL->SwitchCases.begin());
return;
}
// Okay, we decided not to do this, remove any inserted MBB's and clear
// SwitchCases.
for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
SL->SwitchCases.clear();
}
}
// Create a CaseBlock record representing this branch.
CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());