| //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===// | 
 | // | 
 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | 
 | // See https://llvm.org/LICENSE.txt for license information. | 
 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | 
 | // | 
 | //===----------------------------------------------------------------------===// | 
 | // | 
 | // This implements the SelectionDAG class. | 
 | // | 
 | //===----------------------------------------------------------------------===// | 
 |  | 
 | #include "llvm/CodeGen/SelectionDAG.h" | 
 | #include "SDNodeDbgValue.h" | 
 | #include "llvm/ADT/APFloat.h" | 
 | #include "llvm/ADT/APInt.h" | 
 | #include "llvm/ADT/APSInt.h" | 
 | #include "llvm/ADT/ArrayRef.h" | 
 | #include "llvm/ADT/BitVector.h" | 
 | #include "llvm/ADT/DenseSet.h" | 
 | #include "llvm/ADT/FoldingSet.h" | 
 | #include "llvm/ADT/STLExtras.h" | 
 | #include "llvm/ADT/SmallPtrSet.h" | 
 | #include "llvm/ADT/SmallVector.h" | 
 | #include "llvm/ADT/Twine.h" | 
 | #include "llvm/Analysis/AliasAnalysis.h" | 
 | #include "llvm/Analysis/MemoryLocation.h" | 
 | #include "llvm/Analysis/ValueTracking.h" | 
 | #include "llvm/Analysis/VectorUtils.h" | 
 | #include "llvm/BinaryFormat/Dwarf.h" | 
 | #include "llvm/CodeGen/Analysis.h" | 
 | #include "llvm/CodeGen/FunctionLoweringInfo.h" | 
 | #include "llvm/CodeGen/ISDOpcodes.h" | 
 | #include "llvm/CodeGen/MachineBasicBlock.h" | 
 | #include "llvm/CodeGen/MachineConstantPool.h" | 
 | #include "llvm/CodeGen/MachineFrameInfo.h" | 
 | #include "llvm/CodeGen/MachineFunction.h" | 
 | #include "llvm/CodeGen/MachineMemOperand.h" | 
 | #include "llvm/CodeGen/MachineValueType.h" | 
 | #include "llvm/CodeGen/RuntimeLibcalls.h" | 
 | #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" | 
 | #include "llvm/CodeGen/SelectionDAGNodes.h" | 
 | #include "llvm/CodeGen/SelectionDAGTargetInfo.h" | 
 | #include "llvm/CodeGen/TargetFrameLowering.h" | 
 | #include "llvm/CodeGen/TargetLowering.h" | 
 | #include "llvm/CodeGen/TargetRegisterInfo.h" | 
 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | 
 | #include "llvm/CodeGen/ValueTypes.h" | 
 | #include "llvm/IR/Constant.h" | 
 | #include "llvm/IR/ConstantRange.h" | 
 | #include "llvm/IR/Constants.h" | 
 | #include "llvm/IR/DataLayout.h" | 
 | #include "llvm/IR/DebugInfoMetadata.h" | 
 | #include "llvm/IR/DebugLoc.h" | 
 | #include "llvm/IR/DerivedTypes.h" | 
 | #include "llvm/IR/Function.h" | 
 | #include "llvm/IR/GlobalValue.h" | 
 | #include "llvm/IR/Metadata.h" | 
 | #include "llvm/IR/Type.h" | 
 | #include "llvm/Support/Casting.h" | 
 | #include "llvm/Support/CodeGen.h" | 
 | #include "llvm/Support/Compiler.h" | 
 | #include "llvm/Support/Debug.h" | 
 | #include "llvm/Support/ErrorHandling.h" | 
 | #include "llvm/Support/KnownBits.h" | 
 | #include "llvm/Support/MathExtras.h" | 
 | #include "llvm/Support/Mutex.h" | 
 | #include "llvm/Support/raw_ostream.h" | 
 | #include "llvm/Target/TargetMachine.h" | 
 | #include "llvm/Target/TargetOptions.h" | 
 | #include "llvm/TargetParser/Triple.h" | 
 | #include "llvm/Transforms/Utils/SizeOpts.h" | 
 | #include <algorithm> | 
 | #include <cassert> | 
 | #include <cstdint> | 
 | #include <cstdlib> | 
 | #include <limits> | 
 | #include <set> | 
 | #include <string> | 
 | #include <utility> | 
 | #include <vector> | 
 |  | 
 | using namespace llvm; | 
 |  | 
 | /// makeVTList - Return an instance of the SDVTList struct initialized with the | 
 | /// specified members. | 
 | static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { | 
 |   SDVTList Res = {VTs, NumVTs}; | 
 |   return Res; | 
 | } | 
 |  | 
 | // Default null implementations of the callbacks. | 
 | void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} | 
 | void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} | 
 | void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {} | 
 |  | 
 | void SelectionDAG::DAGNodeDeletedListener::anchor() {} | 
 | void SelectionDAG::DAGNodeInsertedListener::anchor() {} | 
 |  | 
 | #define DEBUG_TYPE "selectiondag" | 
 |  | 
 | static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt", | 
 |        cl::Hidden, cl::init(true), | 
 |        cl::desc("Gang up loads and stores generated by inlining of memcpy")); | 
 |  | 
 | static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max", | 
 |        cl::desc("Number limit for gluing ld/st of memcpy."), | 
 |        cl::Hidden, cl::init(0)); | 
 |  | 
 | static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) { | 
 |   LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G);); | 
 | } | 
 |  | 
 | //===----------------------------------------------------------------------===// | 
 | //                              ConstantFPSDNode Class | 
 | //===----------------------------------------------------------------------===// | 
 |  | 
 | /// isExactlyValue - We don't rely on operator== working on double values, as | 
 | /// it returns true for things that are clearly not equal, like -0.0 and 0.0. | 
 | /// As such, this method can be used to do an exact bit-for-bit comparison of | 
 | /// two floating point values. | 
 | bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { | 
 |   return getValueAPF().bitwiseIsEqual(V); | 
 | } | 
 |  | 
 | bool ConstantFPSDNode::isValueValidForType(EVT VT, | 
 |                                            const APFloat& Val) { | 
 |   assert(VT.isFloatingPoint() && "Can only convert between FP types"); | 
 |  | 
 |   // convert modifies in place, so make a copy. | 
 |   APFloat Val2 = APFloat(Val); | 
 |   bool losesInfo; | 
 |   (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), | 
 |                       APFloat::rmNearestTiesToEven, | 
 |                       &losesInfo); | 
 |   return !losesInfo; | 
 | } | 
 |  | 
 | //===----------------------------------------------------------------------===// | 
 | //                              ISD Namespace | 
 | //===----------------------------------------------------------------------===// | 
 |  | 
 | bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) { | 
 |   if (N->getOpcode() == ISD::SPLAT_VECTOR) { | 
 |     unsigned EltSize = | 
 |         N->getValueType(0).getVectorElementType().getSizeInBits(); | 
 |     if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) { | 
 |       SplatVal = Op0->getAPIntValue().trunc(EltSize); | 
 |       return true; | 
 |     } | 
 |     if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) { | 
 |       SplatVal = Op0->getValueAPF().bitcastToAPInt().trunc(EltSize); | 
 |       return true; | 
 |     } | 
 |   } | 
 |  | 
 |   auto *BV = dyn_cast<BuildVectorSDNode>(N); | 
 |   if (!BV) | 
 |     return false; | 
 |  | 
 |   APInt SplatUndef; | 
 |   unsigned SplatBitSize; | 
 |   bool HasUndefs; | 
 |   unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); | 
 |   // Endianness does not matter here. We are checking for a splat given the | 
 |   // element size of the vector, and if we find such a splat for little endian | 
 |   // layout, then that should be valid also for big endian (as the full vector | 
 |   // size is known to be a multiple of the element size). | 
 |   const bool IsBigEndian = false; | 
 |   return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs, | 
 |                              EltSize, IsBigEndian) && | 
 |          EltSize == SplatBitSize; | 
 | } | 
 |  | 
 | // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be | 
 | // specializations of the more general isConstantSplatVector()? | 
 |  | 
 | bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) { | 
 |   // Look through a bit convert. | 
 |   while (N->getOpcode() == ISD::BITCAST) | 
 |     N = N->getOperand(0).getNode(); | 
 |  | 
 |   if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { | 
 |     APInt SplatVal; | 
 |     return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnes(); | 
 |   } | 
 |  | 
 |   if (N->getOpcode() != ISD::BUILD_VECTOR) return false; | 
 |  | 
 |   unsigned i = 0, e = N->getNumOperands(); | 
 |  | 
 |   // Skip over all of the undef values. | 
 |   while (i != e && N->getOperand(i).isUndef()) | 
 |     ++i; | 
 |  | 
 |   // Do not accept an all-undef vector. | 
 |   if (i == e) return false; | 
 |  | 
 |   // Do not accept build_vectors that aren't all constants or which have non-~0 | 
 |   // elements. We have to be a bit careful here, as the type of the constant | 
 |   // may not be the same as the type of the vector elements due to type | 
 |   // legalization (the elements are promoted to a legal type for the target and | 
 |   // a vector of a type may be legal when the base element type is not). | 
 |   // We only want to check enough bits to cover the vector elements, because | 
 |   // we care if the resultant vector is all ones, not whether the individual | 
 |   // constants are. | 
 |   SDValue NotZero = N->getOperand(i); | 
 |   unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); | 
 |   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { | 
 |     if (CN->getAPIntValue().countr_one() < EltSize) | 
 |       return false; | 
 |   } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { | 
 |     if (CFPN->getValueAPF().bitcastToAPInt().countr_one() < EltSize) | 
 |       return false; | 
 |   } else | 
 |     return false; | 
 |  | 
 |   // Okay, we have at least one ~0 value, check to see if the rest match or are | 
 |   // undefs. Even with the above element type twiddling, this should be OK, as | 
 |   // the same type legalization should have applied to all the elements. | 
 |   for (++i; i != e; ++i) | 
 |     if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef()) | 
 |       return false; | 
 |   return true; | 
 | } | 
 |  | 
 | bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) { | 
 |   // Look through a bit convert. | 
 |   while (N->getOpcode() == ISD::BITCAST) | 
 |     N = N->getOperand(0).getNode(); | 
 |  | 
 |   if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) { | 
 |     APInt SplatVal; | 
 |     return isConstantSplatVector(N, SplatVal) && SplatVal.isZero(); | 
 |   } | 
 |  | 
 |   if (N->getOpcode() != ISD::BUILD_VECTOR) return false; | 
 |  | 
 |   bool IsAllUndef = true; | 
 |   for (const SDValue &Op : N->op_values()) { | 
 |     if (Op.isUndef()) | 
 |       continue; | 
 |     IsAllUndef = false; | 
 |     // Do not accept build_vectors that aren't all constants or which have non-0 | 
 |     // elements. We have to be a bit careful here, as the type of the constant | 
 |     // may not be the same as the type of the vector elements due to type | 
 |     // legalization (the elements are promoted to a legal type for the target | 
 |     // and a vector of a type may be legal when the base element type is not). | 
 |     // We only want to check enough bits to cover the vector elements, because | 
 |     // we care if the resultant vector is all zeros, not whether the individual | 
 |     // constants are. | 
 |     unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); | 
 |     if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) { | 
 |       if (CN->getAPIntValue().countr_zero() < EltSize) | 
 |         return false; | 
 |     } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) { | 
 |       if (CFPN->getValueAPF().bitcastToAPInt().countr_zero() < EltSize) | 
 |         return false; | 
 |     } else | 
 |       return false; | 
 |   } | 
 |  | 
 |   // Do not accept an all-undef vector. | 
 |   if (IsAllUndef) | 
 |     return false; | 
 |   return true; | 
 | } | 
 |  | 
 | bool ISD::isBuildVectorAllOnes(const SDNode *N) { | 
 |   return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true); | 
 | } | 
 |  | 
 | bool ISD::isBuildVectorAllZeros(const SDNode *N) { | 
 |   return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true); | 
 | } | 
 |  | 
 | bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) { | 
 |   if (N->getOpcode() != ISD::BUILD_VECTOR) | 
 |     return false; | 
 |  | 
 |   for (const SDValue &Op : N->op_values()) { | 
 |     if (Op.isUndef()) | 
 |       continue; | 
 |     if (!isa<ConstantSDNode>(Op)) | 
 |       return false; | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) { | 
 |   if (N->getOpcode() != ISD::BUILD_VECTOR) | 
 |     return false; | 
 |  | 
 |   for (const SDValue &Op : N->op_values()) { | 
 |     if (Op.isUndef()) | 
 |       continue; | 
 |     if (!isa<ConstantFPSDNode>(Op)) | 
 |       return false; | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | bool ISD::isVectorShrinkable(const SDNode *N, unsigned NewEltSize, | 
 |                              bool Signed) { | 
 |   assert(N->getValueType(0).isVector() && "Expected a vector!"); | 
 |  | 
 |   unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); | 
 |   if (EltSize <= NewEltSize) | 
 |     return false; | 
 |  | 
 |   if (N->getOpcode() == ISD::ZERO_EXTEND) { | 
 |     return (N->getOperand(0).getValueType().getScalarSizeInBits() <= | 
 |             NewEltSize) && | 
 |            !Signed; | 
 |   } | 
 |   if (N->getOpcode() == ISD::SIGN_EXTEND) { | 
 |     return (N->getOperand(0).getValueType().getScalarSizeInBits() <= | 
 |             NewEltSize) && | 
 |            Signed; | 
 |   } | 
 |   if (N->getOpcode() != ISD::BUILD_VECTOR) | 
 |     return false; | 
 |  | 
 |   for (const SDValue &Op : N->op_values()) { | 
 |     if (Op.isUndef()) | 
 |       continue; | 
 |     if (!isa<ConstantSDNode>(Op)) | 
 |       return false; | 
 |  | 
 |     APInt C = cast<ConstantSDNode>(Op)->getAPIntValue().trunc(EltSize); | 
 |     if (Signed && C.trunc(NewEltSize).sext(EltSize) != C) | 
 |       return false; | 
 |     if (!Signed && C.trunc(NewEltSize).zext(EltSize) != C) | 
 |       return false; | 
 |   } | 
 |  | 
 |   return true; | 
 | } | 
 |  | 
 | bool ISD::allOperandsUndef(const SDNode *N) { | 
 |   // Return false if the node has no operands. | 
 |   // This is "logically inconsistent" with the definition of "all" but | 
 |   // is probably the desired behavior. | 
 |   if (N->getNumOperands() == 0) | 
 |     return false; | 
 |   return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); }); | 
 | } | 
 |  | 
 | bool ISD::isFreezeUndef(const SDNode *N) { | 
 |   return N->getOpcode() == ISD::FREEZE && N->getOperand(0).isUndef(); | 
 | } | 
 |  | 
 | template <typename ConstNodeType> | 
 | bool ISD::matchUnaryPredicateImpl(SDValue Op, | 
 |                                   std::function<bool(ConstNodeType *)> Match, | 
 |                                   bool AllowUndefs) { | 
 |   // FIXME: Add support for scalar UNDEF cases? | 
 |   if (auto *C = dyn_cast<ConstNodeType>(Op)) | 
 |     return Match(C); | 
 |  | 
 |   // FIXME: Add support for vector UNDEF cases? | 
 |   if (ISD::BUILD_VECTOR != Op.getOpcode() && | 
 |       ISD::SPLAT_VECTOR != Op.getOpcode()) | 
 |     return false; | 
 |  | 
 |   EVT SVT = Op.getValueType().getScalarType(); | 
 |   for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { | 
 |     if (AllowUndefs && Op.getOperand(i).isUndef()) { | 
 |       if (!Match(nullptr)) | 
 |         return false; | 
 |       continue; | 
 |     } | 
 |  | 
 |     auto *Cst = dyn_cast<ConstNodeType>(Op.getOperand(i)); | 
 |     if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) | 
 |       return false; | 
 |   } | 
 |   return true; | 
 | } | 
 | // Build used template types. | 
 | template bool ISD::matchUnaryPredicateImpl<ConstantSDNode>( | 
 |     SDValue, std::function<bool(ConstantSDNode *)>, bool); | 
 | template bool ISD::matchUnaryPredicateImpl<ConstantFPSDNode>( | 
 |     SDValue, std::function<bool(ConstantFPSDNode *)>, bool); | 
 |  | 
 | bool ISD::matchBinaryPredicate( | 
 |     SDValue LHS, SDValue RHS, | 
 |     std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match, | 
 |     bool AllowUndefs, bool AllowTypeMismatch) { | 
 |   if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType()) | 
 |     return false; | 
 |  | 
 |   // TODO: Add support for scalar UNDEF cases? | 
 |   if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS)) | 
 |     if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS)) | 
 |       return Match(LHSCst, RHSCst); | 
 |  | 
 |   // TODO: Add support for vector UNDEF cases? | 
 |   if (LHS.getOpcode() != RHS.getOpcode() || | 
 |       (LHS.getOpcode() != ISD::BUILD_VECTOR && | 
 |        LHS.getOpcode() != ISD::SPLAT_VECTOR)) | 
 |     return false; | 
 |  | 
 |   EVT SVT = LHS.getValueType().getScalarType(); | 
 |   for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { | 
 |     SDValue LHSOp = LHS.getOperand(i); | 
 |     SDValue RHSOp = RHS.getOperand(i); | 
 |     bool LHSUndef = AllowUndefs && LHSOp.isUndef(); | 
 |     bool RHSUndef = AllowUndefs && RHSOp.isUndef(); | 
 |     auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp); | 
 |     auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp); | 
 |     if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef)) | 
 |       return false; | 
 |     if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT || | 
 |                                LHSOp.getValueType() != RHSOp.getValueType())) | 
 |       return false; | 
 |     if (!Match(LHSCst, RHSCst)) | 
 |       return false; | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) { | 
 |   switch (VecReduceOpcode) { | 
 |   default: | 
 |     llvm_unreachable("Expected VECREDUCE opcode"); | 
 |   case ISD::VECREDUCE_FADD: | 
 |   case ISD::VECREDUCE_SEQ_FADD: | 
 |   case ISD::VP_REDUCE_FADD: | 
 |   case ISD::VP_REDUCE_SEQ_FADD: | 
 |     return ISD::FADD; | 
 |   case ISD::VECREDUCE_FMUL: | 
 |   case ISD::VECREDUCE_SEQ_FMUL: | 
 |   case ISD::VP_REDUCE_FMUL: | 
 |   case ISD::VP_REDUCE_SEQ_FMUL: | 
 |     return ISD::FMUL; | 
 |   case ISD::VECREDUCE_ADD: | 
 |   case ISD::VP_REDUCE_ADD: | 
 |     return ISD::ADD; | 
 |   case ISD::VECREDUCE_MUL: | 
 |   case ISD::VP_REDUCE_MUL: | 
 |     return ISD::MUL; | 
 |   case ISD::VECREDUCE_AND: | 
 |   case ISD::VP_REDUCE_AND: | 
 |     return ISD::AND; | 
 |   case ISD::VECREDUCE_OR: | 
 |   case ISD::VP_REDUCE_OR: | 
 |     return ISD::OR; | 
 |   case ISD::VECREDUCE_XOR: | 
 |   case ISD::VP_REDUCE_XOR: | 
 |     return ISD::XOR; | 
 |   case ISD::VECREDUCE_SMAX: | 
 |   case ISD::VP_REDUCE_SMAX: | 
 |     return ISD::SMAX; | 
 |   case ISD::VECREDUCE_SMIN: | 
 |   case ISD::VP_REDUCE_SMIN: | 
 |     return ISD::SMIN; | 
 |   case ISD::VECREDUCE_UMAX: | 
 |   case ISD::VP_REDUCE_UMAX: | 
 |     return ISD::UMAX; | 
 |   case ISD::VECREDUCE_UMIN: | 
 |   case ISD::VP_REDUCE_UMIN: | 
 |     return ISD::UMIN; | 
 |   case ISD::VECREDUCE_FMAX: | 
 |   case ISD::VP_REDUCE_FMAX: | 
 |     return ISD::FMAXNUM; | 
 |   case ISD::VECREDUCE_FMIN: | 
 |   case ISD::VP_REDUCE_FMIN: | 
 |     return ISD::FMINNUM; | 
 |   case ISD::VECREDUCE_FMAXIMUM: | 
 |     return ISD::FMAXIMUM; | 
 |   case ISD::VECREDUCE_FMINIMUM: | 
 |     return ISD::FMINIMUM; | 
 |   } | 
 | } | 
 |  | 
 | bool ISD::isVPOpcode(unsigned Opcode) { | 
 |   switch (Opcode) { | 
 |   default: | 
 |     return false; | 
 | #define BEGIN_REGISTER_VP_SDNODE(VPSD, ...)                                    \ | 
 |   case ISD::VPSD:                                                              \ | 
 |     return true; | 
 | #include "llvm/IR/VPIntrinsics.def" | 
 |   } | 
 | } | 
 |  | 
 | bool ISD::isVPBinaryOp(unsigned Opcode) { | 
 |   switch (Opcode) { | 
 |   default: | 
 |     break; | 
 | #define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD: | 
 | #define VP_PROPERTY_BINARYOP return true; | 
 | #define END_REGISTER_VP_SDNODE(VPSD) break; | 
 | #include "llvm/IR/VPIntrinsics.def" | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | bool ISD::isVPReduction(unsigned Opcode) { | 
 |   switch (Opcode) { | 
 |   default: | 
 |     break; | 
 | #define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) case ISD::VPSD: | 
 | #define VP_PROPERTY_REDUCTION(STARTPOS, ...) return true; | 
 | #define END_REGISTER_VP_SDNODE(VPSD) break; | 
 | #include "llvm/IR/VPIntrinsics.def" | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | /// The operand position of the vector mask. | 
 | std::optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) { | 
 |   switch (Opcode) { | 
 |   default: | 
 |     return std::nullopt; | 
 | #define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, ...)         \ | 
 |   case ISD::VPSD:                                                              \ | 
 |     return MASKPOS; | 
 | #include "llvm/IR/VPIntrinsics.def" | 
 |   } | 
 | } | 
 |  | 
 | /// The operand position of the explicit vector length parameter. | 
 | std::optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) { | 
 |   switch (Opcode) { | 
 |   default: | 
 |     return std::nullopt; | 
 | #define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS)      \ | 
 |   case ISD::VPSD:                                                              \ | 
 |     return EVLPOS; | 
 | #include "llvm/IR/VPIntrinsics.def" | 
 |   } | 
 | } | 
 |  | 
 | std::optional<unsigned> ISD::getBaseOpcodeForVP(unsigned VPOpcode, | 
 |                                                 bool hasFPExcept) { | 
 |   // FIXME: Return strict opcodes in case of fp exceptions. | 
 |   switch (VPOpcode) { | 
 |   default: | 
 |     return std::nullopt; | 
 | #define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) case ISD::VPOPC: | 
 | #define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) return ISD::SDOPC; | 
 | #define END_REGISTER_VP_SDNODE(VPOPC) break; | 
 | #include "llvm/IR/VPIntrinsics.def" | 
 |   } | 
 |   return std::nullopt; | 
 | } | 
 |  | 
 | unsigned ISD::getVPForBaseOpcode(unsigned Opcode) { | 
 |   switch (Opcode) { | 
 |   default: | 
 |     llvm_unreachable("can not translate this Opcode to VP."); | 
 | #define BEGIN_REGISTER_VP_SDNODE(VPOPC, ...) break; | 
 | #define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC) case ISD::SDOPC: | 
 | #define END_REGISTER_VP_SDNODE(VPOPC) return ISD::VPOPC; | 
 | #include "llvm/IR/VPIntrinsics.def" | 
 |   } | 
 | } | 
 |  | 
 | ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) { | 
 |   switch (ExtType) { | 
 |   case ISD::EXTLOAD: | 
 |     return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND; | 
 |   case ISD::SEXTLOAD: | 
 |     return ISD::SIGN_EXTEND; | 
 |   case ISD::ZEXTLOAD: | 
 |     return ISD::ZERO_EXTEND; | 
 |   default: | 
 |     break; | 
 |   } | 
 |  | 
 |   llvm_unreachable("Invalid LoadExtType"); | 
 | } | 
 |  | 
 | ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { | 
 |   // To perform this operation, we just need to swap the L and G bits of the | 
 |   // operation. | 
 |   unsigned OldL = (Operation >> 2) & 1; | 
 |   unsigned OldG = (Operation >> 1) & 1; | 
 |   return ISD::CondCode((Operation & ~6) |  // Keep the N, U, E bits | 
 |                        (OldL << 1) |       // New G bit | 
 |                        (OldG << 2));       // New L bit. | 
 | } | 
 |  | 
 | static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) { | 
 |   unsigned Operation = Op; | 
 |   if (isIntegerLike) | 
 |     Operation ^= 7;   // Flip L, G, E bits, but not U. | 
 |   else | 
 |     Operation ^= 15;  // Flip all of the condition bits. | 
 |  | 
 |   if (Operation > ISD::SETTRUE2) | 
 |     Operation &= ~8;  // Don't let N and U bits get set. | 
 |  | 
 |   return ISD::CondCode(Operation); | 
 | } | 
 |  | 
 | ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) { | 
 |   return getSetCCInverseImpl(Op, Type.isInteger()); | 
 | } | 
 |  | 
 | ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op, | 
 |                                                bool isIntegerLike) { | 
 |   return getSetCCInverseImpl(Op, isIntegerLike); | 
 | } | 
 |  | 
 | /// For an integer comparison, return 1 if the comparison is a signed operation | 
 | /// and 2 if the result is an unsigned comparison. Return zero if the operation | 
 | /// does not depend on the sign of the input (setne and seteq). | 
 | static int isSignedOp(ISD::CondCode Opcode) { | 
 |   switch (Opcode) { | 
 |   default: llvm_unreachable("Illegal integer setcc operation!"); | 
 |   case ISD::SETEQ: | 
 |   case ISD::SETNE: return 0; | 
 |   case ISD::SETLT: | 
 |   case ISD::SETLE: | 
 |   case ISD::SETGT: | 
 |   case ISD::SETGE: return 1; | 
 |   case ISD::SETULT: | 
 |   case ISD::SETULE: | 
 |   case ISD::SETUGT: | 
 |   case ISD::SETUGE: return 2; | 
 |   } | 
 | } | 
 |  | 
 | ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, | 
 |                                        EVT Type) { | 
 |   bool IsInteger = Type.isInteger(); | 
 |   if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) | 
 |     // Cannot fold a signed integer setcc with an unsigned integer setcc. | 
 |     return ISD::SETCC_INVALID; | 
 |  | 
 |   unsigned Op = Op1 | Op2;  // Combine all of the condition bits. | 
 |  | 
 |   // If the N and U bits get set, then the resultant comparison DOES suddenly | 
 |   // care about orderedness, and it is true when ordered. | 
 |   if (Op > ISD::SETTRUE2) | 
 |     Op &= ~16;     // Clear the U bit if the N bit is set. | 
 |  | 
 |   // Canonicalize illegal integer setcc's. | 
 |   if (IsInteger && Op == ISD::SETUNE)  // e.g. SETUGT | SETULT | 
 |     Op = ISD::SETNE; | 
 |  | 
 |   return ISD::CondCode(Op); | 
 | } | 
 |  | 
 | ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, | 
 |                                         EVT Type) { | 
 |   bool IsInteger = Type.isInteger(); | 
 |   if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) | 
 |     // Cannot fold a signed setcc with an unsigned setcc. | 
 |     return ISD::SETCC_INVALID; | 
 |  | 
 |   // Combine all of the condition bits. | 
 |   ISD::CondCode Result = ISD::CondCode(Op1 & Op2); | 
 |  | 
 |   // Canonicalize illegal integer setcc's. | 
 |   if (IsInteger) { | 
 |     switch (Result) { | 
 |     default: break; | 
 |     case ISD::SETUO : Result = ISD::SETFALSE; break;  // SETUGT & SETULT | 
 |     case ISD::SETOEQ:                                 // SETEQ  & SETU[LG]E | 
 |     case ISD::SETUEQ: Result = ISD::SETEQ   ; break;  // SETUGE & SETULE | 
 |     case ISD::SETOLT: Result = ISD::SETULT  ; break;  // SETULT & SETNE | 
 |     case ISD::SETOGT: Result = ISD::SETUGT  ; break;  // SETUGT & SETNE | 
 |     } | 
 |   } | 
 |  | 
 |   return Result; | 
 | } | 
 |  | 
 | //===----------------------------------------------------------------------===// | 
 | //                           SDNode Profile Support | 
 | //===----------------------------------------------------------------------===// | 
 |  | 
 | /// AddNodeIDOpcode - Add the node opcode to the NodeID data. | 
 | static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC)  { | 
 |   ID.AddInteger(OpC); | 
 | } | 
 |  | 
 | /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them | 
 | /// solely with their pointer. | 
 | static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { | 
 |   ID.AddPointer(VTList.VTs); | 
 | } | 
 |  | 
 | /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. | 
 | static void AddNodeIDOperands(FoldingSetNodeID &ID, | 
 |                               ArrayRef<SDValue> Ops) { | 
 |   for (const auto &Op : Ops) { | 
 |     ID.AddPointer(Op.getNode()); | 
 |     ID.AddInteger(Op.getResNo()); | 
 |   } | 
 | } | 
 |  | 
 | /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. | 
 | static void AddNodeIDOperands(FoldingSetNodeID &ID, | 
 |                               ArrayRef<SDUse> Ops) { | 
 |   for (const auto &Op : Ops) { | 
 |     ID.AddPointer(Op.getNode()); | 
 |     ID.AddInteger(Op.getResNo()); | 
 |   } | 
 | } | 
 |  | 
 | static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned OpC, | 
 |                           SDVTList VTList, ArrayRef<SDValue> OpList) { | 
 |   AddNodeIDOpcode(ID, OpC); | 
 |   AddNodeIDValueTypes(ID, VTList); | 
 |   AddNodeIDOperands(ID, OpList); | 
 | } | 
 |  | 
 | /// If this is an SDNode with special info, add this info to the NodeID data. | 
 | static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { | 
 |   switch (N->getOpcode()) { | 
 |   case ISD::TargetExternalSymbol: | 
 |   case ISD::ExternalSymbol: | 
 |   case ISD::MCSymbol: | 
 |     llvm_unreachable("Should only be used on nodes with operands"); | 
 |   default: break;  // Normal nodes don't need extra info. | 
 |   case ISD::TargetConstant: | 
 |   case ISD::Constant: { | 
 |     const ConstantSDNode *C = cast<ConstantSDNode>(N); | 
 |     ID.AddPointer(C->getConstantIntValue()); | 
 |     ID.AddBoolean(C->isOpaque()); | 
 |     break; | 
 |   } | 
 |   case ISD::TargetConstantFP: | 
 |   case ISD::ConstantFP: | 
 |     ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); | 
 |     break; | 
 |   case ISD::TargetGlobalAddress: | 
 |   case ISD::GlobalAddress: | 
 |   case ISD::TargetGlobalTLSAddress: | 
 |   case ISD::GlobalTLSAddress: { | 
 |     const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); | 
 |     ID.AddPointer(GA->getGlobal()); | 
 |     ID.AddInteger(GA->getOffset()); | 
 |     ID.AddInteger(GA->getTargetFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::BasicBlock: | 
 |     ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); | 
 |     break; | 
 |   case ISD::Register: | 
 |     ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); | 
 |     break; | 
 |   case ISD::RegisterMask: | 
 |     ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); | 
 |     break; | 
 |   case ISD::SRCVALUE: | 
 |     ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); | 
 |     break; | 
 |   case ISD::FrameIndex: | 
 |   case ISD::TargetFrameIndex: | 
 |     ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); | 
 |     break; | 
 |   case ISD::LIFETIME_START: | 
 |   case ISD::LIFETIME_END: | 
 |     if (cast<LifetimeSDNode>(N)->hasOffset()) { | 
 |       ID.AddInteger(cast<LifetimeSDNode>(N)->getSize()); | 
 |       ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset()); | 
 |     } | 
 |     break; | 
 |   case ISD::PSEUDO_PROBE: | 
 |     ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid()); | 
 |     ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex()); | 
 |     ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes()); | 
 |     break; | 
 |   case ISD::JumpTable: | 
 |   case ISD::TargetJumpTable: | 
 |     ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); | 
 |     ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); | 
 |     break; | 
 |   case ISD::ConstantPool: | 
 |   case ISD::TargetConstantPool: { | 
 |     const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); | 
 |     ID.AddInteger(CP->getAlign().value()); | 
 |     ID.AddInteger(CP->getOffset()); | 
 |     if (CP->isMachineConstantPoolEntry()) | 
 |       CP->getMachineCPVal()->addSelectionDAGCSEId(ID); | 
 |     else | 
 |       ID.AddPointer(CP->getConstVal()); | 
 |     ID.AddInteger(CP->getTargetFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::TargetIndex: { | 
 |     const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); | 
 |     ID.AddInteger(TI->getIndex()); | 
 |     ID.AddInteger(TI->getOffset()); | 
 |     ID.AddInteger(TI->getTargetFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::LOAD: { | 
 |     const LoadSDNode *LD = cast<LoadSDNode>(N); | 
 |     ID.AddInteger(LD->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(LD->getRawSubclassData()); | 
 |     ID.AddInteger(LD->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(LD->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::STORE: { | 
 |     const StoreSDNode *ST = cast<StoreSDNode>(N); | 
 |     ID.AddInteger(ST->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(ST->getRawSubclassData()); | 
 |     ID.AddInteger(ST->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(ST->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::VP_LOAD: { | 
 |     const VPLoadSDNode *ELD = cast<VPLoadSDNode>(N); | 
 |     ID.AddInteger(ELD->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(ELD->getRawSubclassData()); | 
 |     ID.AddInteger(ELD->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(ELD->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::VP_STORE: { | 
 |     const VPStoreSDNode *EST = cast<VPStoreSDNode>(N); | 
 |     ID.AddInteger(EST->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(EST->getRawSubclassData()); | 
 |     ID.AddInteger(EST->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(EST->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: { | 
 |     const VPStridedLoadSDNode *SLD = cast<VPStridedLoadSDNode>(N); | 
 |     ID.AddInteger(SLD->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(SLD->getRawSubclassData()); | 
 |     ID.AddInteger(SLD->getPointerInfo().getAddrSpace()); | 
 |     break; | 
 |   } | 
 |   case ISD::EXPERIMENTAL_VP_STRIDED_STORE: { | 
 |     const VPStridedStoreSDNode *SST = cast<VPStridedStoreSDNode>(N); | 
 |     ID.AddInteger(SST->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(SST->getRawSubclassData()); | 
 |     ID.AddInteger(SST->getPointerInfo().getAddrSpace()); | 
 |     break; | 
 |   } | 
 |   case ISD::VP_GATHER: { | 
 |     const VPGatherSDNode *EG = cast<VPGatherSDNode>(N); | 
 |     ID.AddInteger(EG->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(EG->getRawSubclassData()); | 
 |     ID.AddInteger(EG->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(EG->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::VP_SCATTER: { | 
 |     const VPScatterSDNode *ES = cast<VPScatterSDNode>(N); | 
 |     ID.AddInteger(ES->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(ES->getRawSubclassData()); | 
 |     ID.AddInteger(ES->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(ES->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::MLOAD: { | 
 |     const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N); | 
 |     ID.AddInteger(MLD->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(MLD->getRawSubclassData()); | 
 |     ID.AddInteger(MLD->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(MLD->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::MSTORE: { | 
 |     const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N); | 
 |     ID.AddInteger(MST->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(MST->getRawSubclassData()); | 
 |     ID.AddInteger(MST->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(MST->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::MGATHER: { | 
 |     const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N); | 
 |     ID.AddInteger(MG->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(MG->getRawSubclassData()); | 
 |     ID.AddInteger(MG->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(MG->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::MSCATTER: { | 
 |     const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N); | 
 |     ID.AddInteger(MS->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(MS->getRawSubclassData()); | 
 |     ID.AddInteger(MS->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(MS->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::ATOMIC_CMP_SWAP: | 
 |   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: | 
 |   case ISD::ATOMIC_SWAP: | 
 |   case ISD::ATOMIC_LOAD_ADD: | 
 |   case ISD::ATOMIC_LOAD_SUB: | 
 |   case ISD::ATOMIC_LOAD_AND: | 
 |   case ISD::ATOMIC_LOAD_CLR: | 
 |   case ISD::ATOMIC_LOAD_OR: | 
 |   case ISD::ATOMIC_LOAD_XOR: | 
 |   case ISD::ATOMIC_LOAD_NAND: | 
 |   case ISD::ATOMIC_LOAD_MIN: | 
 |   case ISD::ATOMIC_LOAD_MAX: | 
 |   case ISD::ATOMIC_LOAD_UMIN: | 
 |   case ISD::ATOMIC_LOAD_UMAX: | 
 |   case ISD::ATOMIC_LOAD: | 
 |   case ISD::ATOMIC_STORE: { | 
 |     const AtomicSDNode *AT = cast<AtomicSDNode>(N); | 
 |     ID.AddInteger(AT->getMemoryVT().getRawBits()); | 
 |     ID.AddInteger(AT->getRawSubclassData()); | 
 |     ID.AddInteger(AT->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(AT->getMemOperand()->getFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::VECTOR_SHUFFLE: { | 
 |     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); | 
 |     for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); | 
 |          i != e; ++i) | 
 |       ID.AddInteger(SVN->getMaskElt(i)); | 
 |     break; | 
 |   } | 
 |   case ISD::TargetBlockAddress: | 
 |   case ISD::BlockAddress: { | 
 |     const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); | 
 |     ID.AddPointer(BA->getBlockAddress()); | 
 |     ID.AddInteger(BA->getOffset()); | 
 |     ID.AddInteger(BA->getTargetFlags()); | 
 |     break; | 
 |   } | 
 |   case ISD::AssertAlign: | 
 |     ID.AddInteger(cast<AssertAlignSDNode>(N)->getAlign().value()); | 
 |     break; | 
 |   case ISD::PREFETCH: | 
 |   case ISD::INTRINSIC_VOID: | 
 |   case ISD::INTRINSIC_W_CHAIN: | 
 |     // Handled by MemIntrinsicSDNode check after the switch. | 
 |     break; | 
 |   } // end switch (N->getOpcode()) | 
 |  | 
 |   // MemIntrinsic nodes could also have subclass data, address spaces, and flags | 
 |   // to check. | 
 |   if (auto *MN = dyn_cast<MemIntrinsicSDNode>(N)) { | 
 |     ID.AddInteger(MN->getRawSubclassData()); | 
 |     ID.AddInteger(MN->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(MN->getMemOperand()->getFlags()); | 
 |     ID.AddInteger(MN->getMemoryVT().getRawBits()); | 
 |   } | 
 | } | 
 |  | 
 | /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID | 
 | /// data. | 
 | static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { | 
 |   AddNodeIDOpcode(ID, N->getOpcode()); | 
 |   // Add the return value info. | 
 |   AddNodeIDValueTypes(ID, N->getVTList()); | 
 |   // Add the operand info. | 
 |   AddNodeIDOperands(ID, N->ops()); | 
 |  | 
 |   // Handle SDNode leafs with special info. | 
 |   AddNodeIDCustom(ID, N); | 
 | } | 
 |  | 
 | //===----------------------------------------------------------------------===// | 
 | //                              SelectionDAG Class | 
 | //===----------------------------------------------------------------------===// | 
 |  | 
 | /// doNotCSE - Return true if CSE should not be performed for this node. | 
 | static bool doNotCSE(SDNode *N) { | 
 |   if (N->getValueType(0) == MVT::Glue) | 
 |     return true; // Never CSE anything that produces a glue result. | 
 |  | 
 |   switch (N->getOpcode()) { | 
 |   default: break; | 
 |   case ISD::HANDLENODE: | 
 |   case ISD::EH_LABEL: | 
 |     return true;   // Never CSE these nodes. | 
 |   } | 
 |  | 
 |   // Check that remaining values produced are not flags. | 
 |   for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) | 
 |     if (N->getValueType(i) == MVT::Glue) | 
 |       return true; // Never CSE anything that produces a glue result. | 
 |  | 
 |   return false; | 
 | } | 
 |  | 
 | /// RemoveDeadNodes - This method deletes all unreachable nodes in the | 
 | /// SelectionDAG. | 
 | void SelectionDAG::RemoveDeadNodes() { | 
 |   // Create a dummy node (which is not added to allnodes), that adds a reference | 
 |   // to the root node, preventing it from being deleted. | 
 |   HandleSDNode Dummy(getRoot()); | 
 |  | 
 |   SmallVector<SDNode*, 128> DeadNodes; | 
 |  | 
 |   // Add all obviously-dead nodes to the DeadNodes worklist. | 
 |   for (SDNode &Node : allnodes()) | 
 |     if (Node.use_empty()) | 
 |       DeadNodes.push_back(&Node); | 
 |  | 
 |   RemoveDeadNodes(DeadNodes); | 
 |  | 
 |   // If the root changed (e.g. it was a dead load, update the root). | 
 |   setRoot(Dummy.getValue()); | 
 | } | 
 |  | 
 | /// RemoveDeadNodes - This method deletes the unreachable nodes in the | 
 | /// given list, and any nodes that become unreachable as a result. | 
 | void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { | 
 |  | 
 |   // Process the worklist, deleting the nodes and adding their uses to the | 
 |   // worklist. | 
 |   while (!DeadNodes.empty()) { | 
 |     SDNode *N = DeadNodes.pop_back_val(); | 
 |     // Skip to next node if we've already managed to delete the node. This could | 
 |     // happen if replacing a node causes a node previously added to the node to | 
 |     // be deleted. | 
 |     if (N->getOpcode() == ISD::DELETED_NODE) | 
 |       continue; | 
 |  | 
 |     for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) | 
 |       DUL->NodeDeleted(N, nullptr); | 
 |  | 
 |     // Take the node out of the appropriate CSE map. | 
 |     RemoveNodeFromCSEMaps(N); | 
 |  | 
 |     // Next, brutally remove the operand list.  This is safe to do, as there are | 
 |     // no cycles in the graph. | 
 |     for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { | 
 |       SDUse &Use = *I++; | 
 |       SDNode *Operand = Use.getNode(); | 
 |       Use.set(SDValue()); | 
 |  | 
 |       // Now that we removed this operand, see if there are no uses of it left. | 
 |       if (Operand->use_empty()) | 
 |         DeadNodes.push_back(Operand); | 
 |     } | 
 |  | 
 |     DeallocateNode(N); | 
 |   } | 
 | } | 
 |  | 
 | void SelectionDAG::RemoveDeadNode(SDNode *N){ | 
 |   SmallVector<SDNode*, 16> DeadNodes(1, N); | 
 |  | 
 |   // Create a dummy node that adds a reference to the root node, preventing | 
 |   // it from being deleted.  (This matters if the root is an operand of the | 
 |   // dead node.) | 
 |   HandleSDNode Dummy(getRoot()); | 
 |  | 
 |   RemoveDeadNodes(DeadNodes); | 
 | } | 
 |  | 
 | void SelectionDAG::DeleteNode(SDNode *N) { | 
 |   // First take this out of the appropriate CSE map. | 
 |   RemoveNodeFromCSEMaps(N); | 
 |  | 
 |   // Finally, remove uses due to operands of this node, remove from the | 
 |   // AllNodes list, and delete the node. | 
 |   DeleteNodeNotInCSEMaps(N); | 
 | } | 
 |  | 
 | void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { | 
 |   assert(N->getIterator() != AllNodes.begin() && | 
 |          "Cannot delete the entry node!"); | 
 |   assert(N->use_empty() && "Cannot delete a node that is not dead!"); | 
 |  | 
 |   // Drop all of the operands and decrement used node's use counts. | 
 |   N->DropOperands(); | 
 |  | 
 |   DeallocateNode(N); | 
 | } | 
 |  | 
 | void SDDbgInfo::add(SDDbgValue *V, bool isParameter) { | 
 |   assert(!(V->isVariadic() && isParameter)); | 
 |   if (isParameter) | 
 |     ByvalParmDbgValues.push_back(V); | 
 |   else | 
 |     DbgValues.push_back(V); | 
 |   for (const SDNode *Node : V->getSDNodes()) | 
 |     if (Node) | 
 |       DbgValMap[Node].push_back(V); | 
 | } | 
 |  | 
 | void SDDbgInfo::erase(const SDNode *Node) { | 
 |   DbgValMapType::iterator I = DbgValMap.find(Node); | 
 |   if (I == DbgValMap.end()) | 
 |     return; | 
 |   for (auto &Val: I->second) | 
 |     Val->setIsInvalidated(); | 
 |   DbgValMap.erase(I); | 
 | } | 
 |  | 
 | void SelectionDAG::DeallocateNode(SDNode *N) { | 
 |   // If we have operands, deallocate them. | 
 |   removeOperands(N); | 
 |  | 
 |   NodeAllocator.Deallocate(AllNodes.remove(N)); | 
 |  | 
 |   // Set the opcode to DELETED_NODE to help catch bugs when node | 
 |   // memory is reallocated. | 
 |   // FIXME: There are places in SDag that have grown a dependency on the opcode | 
 |   // value in the released node. | 
 |   __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType)); | 
 |   N->NodeType = ISD::DELETED_NODE; | 
 |  | 
 |   // If any of the SDDbgValue nodes refer to this SDNode, invalidate | 
 |   // them and forget about that node. | 
 |   DbgInfo->erase(N); | 
 |  | 
 |   // Invalidate extra info. | 
 |   SDEI.erase(N); | 
 | } | 
 |  | 
 | #ifndef NDEBUG | 
 | /// VerifySDNode - Check the given SDNode.  Aborts if it is invalid. | 
 | static void VerifySDNode(SDNode *N) { | 
 |   switch (N->getOpcode()) { | 
 |   default: | 
 |     break; | 
 |   case ISD::BUILD_PAIR: { | 
 |     EVT VT = N->getValueType(0); | 
 |     assert(N->getNumValues() == 1 && "Too many results!"); | 
 |     assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && | 
 |            "Wrong return type!"); | 
 |     assert(N->getNumOperands() == 2 && "Wrong number of operands!"); | 
 |     assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && | 
 |            "Mismatched operand types!"); | 
 |     assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && | 
 |            "Wrong operand type!"); | 
 |     assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && | 
 |            "Wrong return type size"); | 
 |     break; | 
 |   } | 
 |   case ISD::BUILD_VECTOR: { | 
 |     assert(N->getNumValues() == 1 && "Too many results!"); | 
 |     assert(N->getValueType(0).isVector() && "Wrong return type!"); | 
 |     assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && | 
 |            "Wrong number of operands!"); | 
 |     EVT EltVT = N->getValueType(0).getVectorElementType(); | 
 |     for (const SDUse &Op : N->ops()) { | 
 |       assert((Op.getValueType() == EltVT || | 
 |               (EltVT.isInteger() && Op.getValueType().isInteger() && | 
 |                EltVT.bitsLE(Op.getValueType()))) && | 
 |              "Wrong operand type!"); | 
 |       assert(Op.getValueType() == N->getOperand(0).getValueType() && | 
 |              "Operands must all have the same type"); | 
 |     } | 
 |     break; | 
 |   } | 
 |   } | 
 | } | 
 | #endif // NDEBUG | 
 |  | 
 | /// Insert a newly allocated node into the DAG. | 
 | /// | 
 | /// Handles insertion into the all nodes list and CSE map, as well as | 
 | /// verification and other common operations when a new node is allocated. | 
 | void SelectionDAG::InsertNode(SDNode *N) { | 
 |   AllNodes.push_back(N); | 
 | #ifndef NDEBUG | 
 |   N->PersistentId = NextPersistentId++; | 
 |   VerifySDNode(N); | 
 | #endif | 
 |   for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) | 
 |     DUL->NodeInserted(N); | 
 | } | 
 |  | 
 | /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that | 
 | /// correspond to it.  This is useful when we're about to delete or repurpose | 
 | /// the node.  We don't want future request for structurally identical nodes | 
 | /// to return N anymore. | 
 | bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { | 
 |   bool Erased = false; | 
 |   switch (N->getOpcode()) { | 
 |   case ISD::HANDLENODE: return false;  // noop. | 
 |   case ISD::CONDCODE: | 
 |     assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && | 
 |            "Cond code doesn't exist!"); | 
 |     Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr; | 
 |     CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr; | 
 |     break; | 
 |   case ISD::ExternalSymbol: | 
 |     Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); | 
 |     break; | 
 |   case ISD::TargetExternalSymbol: { | 
 |     ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); | 
 |     Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>( | 
 |         ESN->getSymbol(), ESN->getTargetFlags())); | 
 |     break; | 
 |   } | 
 |   case ISD::MCSymbol: { | 
 |     auto *MCSN = cast<MCSymbolSDNode>(N); | 
 |     Erased = MCSymbols.erase(MCSN->getMCSymbol()); | 
 |     break; | 
 |   } | 
 |   case ISD::VALUETYPE: { | 
 |     EVT VT = cast<VTSDNode>(N)->getVT(); | 
 |     if (VT.isExtended()) { | 
 |       Erased = ExtendedValueTypeNodes.erase(VT); | 
 |     } else { | 
 |       Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr; | 
 |       ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr; | 
 |     } | 
 |     break; | 
 |   } | 
 |   default: | 
 |     // Remove it from the CSE Map. | 
 |     assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); | 
 |     assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); | 
 |     Erased = CSEMap.RemoveNode(N); | 
 |     break; | 
 |   } | 
 | #ifndef NDEBUG | 
 |   // Verify that the node was actually in one of the CSE maps, unless it has a | 
 |   // glue result (which cannot be CSE'd) or is one of the special cases that are | 
 |   // not subject to CSE. | 
 |   if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && | 
 |       !N->isMachineOpcode() && !doNotCSE(N)) { | 
 |     N->dump(this); | 
 |     dbgs() << "\n"; | 
 |     llvm_unreachable("Node is not in map!"); | 
 |   } | 
 | #endif | 
 |   return Erased; | 
 | } | 
 |  | 
 | /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE | 
 | /// maps and modified in place. Add it back to the CSE maps, unless an identical | 
 | /// node already exists, in which case transfer all its users to the existing | 
 | /// node. This transfer can potentially trigger recursive merging. | 
 | void | 
 | SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { | 
 |   // For node types that aren't CSE'd, just act as if no identical node | 
 |   // already exists. | 
 |   if (!doNotCSE(N)) { | 
 |     SDNode *Existing = CSEMap.GetOrInsertNode(N); | 
 |     if (Existing != N) { | 
 |       // If there was already an existing matching node, use ReplaceAllUsesWith | 
 |       // to replace the dead one with the existing one.  This can cause | 
 |       // recursive merging of other unrelated nodes down the line. | 
 |       ReplaceAllUsesWith(N, Existing); | 
 |  | 
 |       // N is now dead. Inform the listeners and delete it. | 
 |       for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) | 
 |         DUL->NodeDeleted(N, Existing); | 
 |       DeleteNodeNotInCSEMaps(N); | 
 |       return; | 
 |     } | 
 |   } | 
 |  | 
 |   // If the node doesn't already exist, we updated it.  Inform listeners. | 
 |   for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) | 
 |     DUL->NodeUpdated(N); | 
 | } | 
 |  | 
 | /// FindModifiedNodeSlot - Find a slot for the specified node if its operands | 
 | /// were replaced with those specified.  If this node is never memoized, | 
 | /// return null, otherwise return a pointer to the slot it would take.  If a | 
 | /// node already exists with these operands, the slot will be non-null. | 
 | SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, | 
 |                                            void *&InsertPos) { | 
 |   if (doNotCSE(N)) | 
 |     return nullptr; | 
 |  | 
 |   SDValue Ops[] = { Op }; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); | 
 |   AddNodeIDCustom(ID, N); | 
 |   SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); | 
 |   if (Node) | 
 |     Node->intersectFlagsWith(N->getFlags()); | 
 |   return Node; | 
 | } | 
 |  | 
 | /// FindModifiedNodeSlot - Find a slot for the specified node if its operands | 
 | /// were replaced with those specified.  If this node is never memoized, | 
 | /// return null, otherwise return a pointer to the slot it would take.  If a | 
 | /// node already exists with these operands, the slot will be non-null. | 
 | SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, | 
 |                                            SDValue Op1, SDValue Op2, | 
 |                                            void *&InsertPos) { | 
 |   if (doNotCSE(N)) | 
 |     return nullptr; | 
 |  | 
 |   SDValue Ops[] = { Op1, Op2 }; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); | 
 |   AddNodeIDCustom(ID, N); | 
 |   SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); | 
 |   if (Node) | 
 |     Node->intersectFlagsWith(N->getFlags()); | 
 |   return Node; | 
 | } | 
 |  | 
 | /// FindModifiedNodeSlot - Find a slot for the specified node if its operands | 
 | /// were replaced with those specified.  If this node is never memoized, | 
 | /// return null, otherwise return a pointer to the slot it would take.  If a | 
 | /// node already exists with these operands, the slot will be non-null. | 
 | SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops, | 
 |                                            void *&InsertPos) { | 
 |   if (doNotCSE(N)) | 
 |     return nullptr; | 
 |  | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops); | 
 |   AddNodeIDCustom(ID, N); | 
 |   SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos); | 
 |   if (Node) | 
 |     Node->intersectFlagsWith(N->getFlags()); | 
 |   return Node; | 
 | } | 
 |  | 
 | Align SelectionDAG::getEVTAlign(EVT VT) const { | 
 |   Type *Ty = VT == MVT::iPTR ? PointerType::get(*getContext(), 0) | 
 |                              : VT.getTypeForEVT(*getContext()); | 
 |  | 
 |   return getDataLayout().getABITypeAlign(Ty); | 
 | } | 
 |  | 
 | // EntryNode could meaningfully have debug info if we can find it... | 
 | SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOptLevel OL) | 
 |     : TM(tm), OptLevel(OL), EntryNode(ISD::EntryToken, 0, DebugLoc(), | 
 |                                       getVTList(MVT::Other, MVT::Glue)), | 
 |       Root(getEntryNode()) { | 
 |   InsertNode(&EntryNode); | 
 |   DbgInfo = new SDDbgInfo(); | 
 | } | 
 |  | 
 | void SelectionDAG::init(MachineFunction &NewMF, | 
 |                         OptimizationRemarkEmitter &NewORE, Pass *PassPtr, | 
 |                         const TargetLibraryInfo *LibraryInfo, | 
 |                         UniformityInfo *NewUA, ProfileSummaryInfo *PSIin, | 
 |                         BlockFrequencyInfo *BFIin, | 
 |                         FunctionVarLocs const *VarLocs) { | 
 |   MF = &NewMF; | 
 |   SDAGISelPass = PassPtr; | 
 |   ORE = &NewORE; | 
 |   TLI = getSubtarget().getTargetLowering(); | 
 |   TSI = getSubtarget().getSelectionDAGInfo(); | 
 |   LibInfo = LibraryInfo; | 
 |   Context = &MF->getFunction().getContext(); | 
 |   UA = NewUA; | 
 |   PSI = PSIin; | 
 |   BFI = BFIin; | 
 |   FnVarLocs = VarLocs; | 
 | } | 
 |  | 
 | SelectionDAG::~SelectionDAG() { | 
 |   assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); | 
 |   allnodes_clear(); | 
 |   OperandRecycler.clear(OperandAllocator); | 
 |   delete DbgInfo; | 
 | } | 
 |  | 
 | bool SelectionDAG::shouldOptForSize() const { | 
 |   return MF->getFunction().hasOptSize() || | 
 |       llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI); | 
 | } | 
 |  | 
 | void SelectionDAG::allnodes_clear() { | 
 |   assert(&*AllNodes.begin() == &EntryNode); | 
 |   AllNodes.remove(AllNodes.begin()); | 
 |   while (!AllNodes.empty()) | 
 |     DeallocateNode(&AllNodes.front()); | 
 | #ifndef NDEBUG | 
 |   NextPersistentId = 0; | 
 | #endif | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, | 
 |                                           void *&InsertPos) { | 
 |   SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); | 
 |   if (N) { | 
 |     switch (N->getOpcode()) { | 
 |     default: break; | 
 |     case ISD::Constant: | 
 |     case ISD::ConstantFP: | 
 |       llvm_unreachable("Querying for Constant and ConstantFP nodes requires " | 
 |                        "debug location.  Use another overload."); | 
 |     } | 
 |   } | 
 |   return N; | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID, | 
 |                                           const SDLoc &DL, void *&InsertPos) { | 
 |   SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos); | 
 |   if (N) { | 
 |     switch (N->getOpcode()) { | 
 |     case ISD::Constant: | 
 |     case ISD::ConstantFP: | 
 |       // Erase debug location from the node if the node is used at several | 
 |       // different places. Do not propagate one location to all uses as it | 
 |       // will cause a worse single stepping debugging experience. | 
 |       if (N->getDebugLoc() != DL.getDebugLoc()) | 
 |         N->setDebugLoc(DebugLoc()); | 
 |       break; | 
 |     default: | 
 |       // When the node's point of use is located earlier in the instruction | 
 |       // sequence than its prior point of use, update its debug info to the | 
 |       // earlier location. | 
 |       if (DL.getIROrder() && DL.getIROrder() < N->getIROrder()) | 
 |         N->setDebugLoc(DL.getDebugLoc()); | 
 |       break; | 
 |     } | 
 |   } | 
 |   return N; | 
 | } | 
 |  | 
 | void SelectionDAG::clear() { | 
 |   allnodes_clear(); | 
 |   OperandRecycler.clear(OperandAllocator); | 
 |   OperandAllocator.Reset(); | 
 |   CSEMap.clear(); | 
 |  | 
 |   ExtendedValueTypeNodes.clear(); | 
 |   ExternalSymbols.clear(); | 
 |   TargetExternalSymbols.clear(); | 
 |   MCSymbols.clear(); | 
 |   SDEI.clear(); | 
 |   std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), | 
 |             static_cast<CondCodeSDNode*>(nullptr)); | 
 |   std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), | 
 |             static_cast<SDNode*>(nullptr)); | 
 |  | 
 |   EntryNode.UseList = nullptr; | 
 |   InsertNode(&EntryNode); | 
 |   Root = getEntryNode(); | 
 |   DbgInfo->clear(); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) { | 
 |   return VT.bitsGT(Op.getValueType()) | 
 |              ? getNode(ISD::FP_EXTEND, DL, VT, Op) | 
 |              : getNode(ISD::FP_ROUND, DL, VT, Op, | 
 |                        getIntPtrConstant(0, DL, /*isTarget=*/true)); | 
 | } | 
 |  | 
 | std::pair<SDValue, SDValue> | 
 | SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain, | 
 |                                        const SDLoc &DL, EVT VT) { | 
 |   assert(!VT.bitsEq(Op.getValueType()) && | 
 |          "Strict no-op FP extend/round not allowed."); | 
 |   SDValue Res = | 
 |       VT.bitsGT(Op.getValueType()) | 
 |           ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op}) | 
 |           : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other}, | 
 |                     {Chain, Op, getIntPtrConstant(0, DL)}); | 
 |  | 
 |   return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1)); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { | 
 |   return VT.bitsGT(Op.getValueType()) ? | 
 |     getNode(ISD::ANY_EXTEND, DL, VT, Op) : | 
 |     getNode(ISD::TRUNCATE, DL, VT, Op); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { | 
 |   return VT.bitsGT(Op.getValueType()) ? | 
 |     getNode(ISD::SIGN_EXTEND, DL, VT, Op) : | 
 |     getNode(ISD::TRUNCATE, DL, VT, Op); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { | 
 |   return VT.bitsGT(Op.getValueType()) ? | 
 |     getNode(ISD::ZERO_EXTEND, DL, VT, Op) : | 
 |     getNode(ISD::TRUNCATE, DL, VT, Op); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getBitcastedAnyExtOrTrunc(SDValue Op, const SDLoc &DL, | 
 |                                                  EVT VT) { | 
 |   assert(!VT.isVector()); | 
 |   auto Type = Op.getValueType(); | 
 |   SDValue DestOp; | 
 |   if (Type == VT) | 
 |     return Op; | 
 |   auto Size = Op.getValueSizeInBits(); | 
 |   DestOp = getBitcast(MVT::getIntegerVT(Size), Op); | 
 |   if (DestOp.getValueType() == VT) | 
 |     return DestOp; | 
 |  | 
 |   return getAnyExtOrTrunc(DestOp, DL, VT); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getBitcastedSExtOrTrunc(SDValue Op, const SDLoc &DL, | 
 |                                                EVT VT) { | 
 |   assert(!VT.isVector()); | 
 |   auto Type = Op.getValueType(); | 
 |   SDValue DestOp; | 
 |   if (Type == VT) | 
 |     return Op; | 
 |   auto Size = Op.getValueSizeInBits(); | 
 |   DestOp = getBitcast(MVT::getIntegerVT(Size), Op); | 
 |   if (DestOp.getValueType() == VT) | 
 |     return DestOp; | 
 |  | 
 |   return getSExtOrTrunc(DestOp, DL, VT); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getBitcastedZExtOrTrunc(SDValue Op, const SDLoc &DL, | 
 |                                                EVT VT) { | 
 |   assert(!VT.isVector()); | 
 |   auto Type = Op.getValueType(); | 
 |   SDValue DestOp; | 
 |   if (Type == VT) | 
 |     return Op; | 
 |   auto Size = Op.getValueSizeInBits(); | 
 |   DestOp = getBitcast(MVT::getIntegerVT(Size), Op); | 
 |   if (DestOp.getValueType() == VT) | 
 |     return DestOp; | 
 |  | 
 |   return getZExtOrTrunc(DestOp, DL, VT); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, | 
 |                                         EVT OpVT) { | 
 |   if (VT.bitsLE(Op.getValueType())) | 
 |     return getNode(ISD::TRUNCATE, SL, VT, Op); | 
 |  | 
 |   TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT); | 
 |   return getNode(TLI->getExtendForContent(BType), SL, VT, Op); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { | 
 |   EVT OpVT = Op.getValueType(); | 
 |   assert(VT.isInteger() && OpVT.isInteger() && | 
 |          "Cannot getZeroExtendInReg FP types"); | 
 |   assert(VT.isVector() == OpVT.isVector() && | 
 |          "getZeroExtendInReg type should be vector iff the operand " | 
 |          "type is vector!"); | 
 |   assert((!VT.isVector() || | 
 |           VT.getVectorElementCount() == OpVT.getVectorElementCount()) && | 
 |          "Vector element counts must match in getZeroExtendInReg"); | 
 |   assert(VT.bitsLE(OpVT) && "Not extending!"); | 
 |   if (OpVT == VT) | 
 |     return Op; | 
 |   APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(), | 
 |                                    VT.getScalarSizeInBits()); | 
 |   return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT)); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) { | 
 |   // Only unsigned pointer semantics are supported right now. In the future this | 
 |   // might delegate to TLI to check pointer signedness. | 
 |   return getZExtOrTrunc(Op, DL, VT); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) { | 
 |   // Only unsigned pointer semantics are supported right now. In the future this | 
 |   // might delegate to TLI to check pointer signedness. | 
 |   return getZeroExtendInReg(Op, DL, VT); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNegative(SDValue Val, const SDLoc &DL, EVT VT) { | 
 |   return getNode(ISD::SUB, DL, VT, getConstant(0, DL, VT), Val); | 
 | } | 
 |  | 
 | /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). | 
 | SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) { | 
 |   return getNode(ISD::XOR, DL, VT, Val, getAllOnesConstant(DL, VT)); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) { | 
 |   SDValue TrueValue = getBoolConstant(true, DL, VT, VT); | 
 |   return getNode(ISD::XOR, DL, VT, Val, TrueValue); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getVPLogicalNOT(const SDLoc &DL, SDValue Val, | 
 |                                       SDValue Mask, SDValue EVL, EVT VT) { | 
 |   SDValue TrueValue = getBoolConstant(true, DL, VT, VT); | 
 |   return getNode(ISD::VP_XOR, DL, VT, Val, TrueValue, Mask, EVL); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, | 
 |                                          SDValue Mask, SDValue EVL) { | 
 |   return getVPZExtOrTrunc(DL, VT, Op, Mask, EVL); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, | 
 |                                        SDValue Mask, SDValue EVL) { | 
 |   if (VT.bitsGT(Op.getValueType())) | 
 |     return getNode(ISD::VP_ZERO_EXTEND, DL, VT, Op, Mask, EVL); | 
 |   if (VT.bitsLT(Op.getValueType())) | 
 |     return getNode(ISD::VP_TRUNCATE, DL, VT, Op, Mask, EVL); | 
 |   return Op; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT, | 
 |                                       EVT OpVT) { | 
 |   if (!V) | 
 |     return getConstant(0, DL, VT); | 
 |  | 
 |   switch (TLI->getBooleanContents(OpVT)) { | 
 |   case TargetLowering::ZeroOrOneBooleanContent: | 
 |   case TargetLowering::UndefinedBooleanContent: | 
 |     return getConstant(1, DL, VT); | 
 |   case TargetLowering::ZeroOrNegativeOneBooleanContent: | 
 |     return getAllOnesConstant(DL, VT); | 
 |   } | 
 |   llvm_unreachable("Unexpected boolean content enum!"); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, | 
 |                                   bool isT, bool isO) { | 
 |   EVT EltVT = VT.getScalarType(); | 
 |   assert((EltVT.getSizeInBits() >= 64 || | 
 |           (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && | 
 |          "getConstant with a uint64_t value that doesn't fit in the type!"); | 
 |   return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, | 
 |                                   bool isT, bool isO) { | 
 |   return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL, | 
 |                                   EVT VT, bool isT, bool isO) { | 
 |   assert(VT.isInteger() && "Cannot create FP integer constant!"); | 
 |  | 
 |   EVT EltVT = VT.getScalarType(); | 
 |   const ConstantInt *Elt = &Val; | 
 |  | 
 |   // In some cases the vector type is legal but the element type is illegal and | 
 |   // needs to be promoted, for example v8i8 on ARM.  In this case, promote the | 
 |   // inserted value (the type does not need to match the vector element type). | 
 |   // Any extra bits introduced will be truncated away. | 
 |   if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == | 
 |                            TargetLowering::TypePromoteInteger) { | 
 |     EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); | 
 |     APInt NewVal; | 
 |     if (TLI->isSExtCheaperThanZExt(VT.getScalarType(), EltVT)) | 
 |       NewVal = Elt->getValue().sextOrTrunc(EltVT.getSizeInBits()); | 
 |     else | 
 |       NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits()); | 
 |     Elt = ConstantInt::get(*getContext(), NewVal); | 
 |   } | 
 |   // In other cases the element type is illegal and needs to be expanded, for | 
 |   // example v2i64 on MIPS32. In this case, find the nearest legal type, split | 
 |   // the value into n parts and use a vector type with n-times the elements. | 
 |   // Then bitcast to the type requested. | 
 |   // Legalizing constants too early makes the DAGCombiner's job harder so we | 
 |   // only legalize if the DAG tells us we must produce legal types. | 
 |   else if (NewNodesMustHaveLegalTypes && VT.isVector() && | 
 |            TLI->getTypeAction(*getContext(), EltVT) == | 
 |                TargetLowering::TypeExpandInteger) { | 
 |     const APInt &NewVal = Elt->getValue(); | 
 |     EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); | 
 |     unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits(); | 
 |  | 
 |     // For scalable vectors, try to use a SPLAT_VECTOR_PARTS node. | 
 |     if (VT.isScalableVector() || | 
 |         TLI->isOperationLegal(ISD::SPLAT_VECTOR, VT)) { | 
 |       assert(EltVT.getSizeInBits() % ViaEltSizeInBits == 0 && | 
 |              "Can only handle an even split!"); | 
 |       unsigned Parts = EltVT.getSizeInBits() / ViaEltSizeInBits; | 
 |  | 
 |       SmallVector<SDValue, 2> ScalarParts; | 
 |       for (unsigned i = 0; i != Parts; ++i) | 
 |         ScalarParts.push_back(getConstant( | 
 |             NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL, | 
 |             ViaEltVT, isT, isO)); | 
 |  | 
 |       return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts); | 
 |     } | 
 |  | 
 |     unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits; | 
 |     EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts); | 
 |  | 
 |     // Check the temporary vector is the correct size. If this fails then | 
 |     // getTypeToTransformTo() probably returned a type whose size (in bits) | 
 |     // isn't a power-of-2 factor of the requested type size. | 
 |     assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits()); | 
 |  | 
 |     SmallVector<SDValue, 2> EltParts; | 
 |     for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) | 
 |       EltParts.push_back(getConstant( | 
 |           NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL, | 
 |           ViaEltVT, isT, isO)); | 
 |  | 
 |     // EltParts is currently in little endian order. If we actually want | 
 |     // big-endian order then reverse it now. | 
 |     if (getDataLayout().isBigEndian()) | 
 |       std::reverse(EltParts.begin(), EltParts.end()); | 
 |  | 
 |     // The elements must be reversed when the element order is different | 
 |     // to the endianness of the elements (because the BITCAST is itself a | 
 |     // vector shuffle in this situation). However, we do not need any code to | 
 |     // perform this reversal because getConstant() is producing a vector | 
 |     // splat. | 
 |     // This situation occurs in MIPS MSA. | 
 |  | 
 |     SmallVector<SDValue, 8> Ops; | 
 |     for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) | 
 |       llvm::append_range(Ops, EltParts); | 
 |  | 
 |     SDValue V = | 
 |         getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops)); | 
 |     return V; | 
 |   } | 
 |  | 
 |   assert(Elt->getBitWidth() == EltVT.getSizeInBits() && | 
 |          "APInt size does not match type size!"); | 
 |   unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opc, getVTList(EltVT), std::nullopt); | 
 |   ID.AddPointer(Elt); | 
 |   ID.AddBoolean(isO); | 
 |   void *IP = nullptr; | 
 |   SDNode *N = nullptr; | 
 |   if ((N = FindNodeOrInsertPos(ID, DL, IP))) | 
 |     if (!VT.isVector()) | 
 |       return SDValue(N, 0); | 
 |  | 
 |   if (!N) { | 
 |     N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT); | 
 |     CSEMap.InsertNode(N, IP); | 
 |     InsertNode(N); | 
 |     NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this); | 
 |   } | 
 |  | 
 |   SDValue Result(N, 0); | 
 |   if (VT.isVector()) | 
 |     Result = getSplat(VT, DL, Result); | 
 |   return Result; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL, | 
 |                                         bool isTarget) { | 
 |   return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT, | 
 |                                              const SDLoc &DL, bool LegalTypes) { | 
 |   assert(VT.isInteger() && "Shift amount is not an integer type!"); | 
 |   EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes); | 
 |   return getConstant(Val, DL, ShiftVT); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL, | 
 |                                            bool isTarget) { | 
 |   return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT, | 
 |                                     bool isTarget) { | 
 |   return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL, | 
 |                                     EVT VT, bool isTarget) { | 
 |   assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); | 
 |  | 
 |   EVT EltVT = VT.getScalarType(); | 
 |  | 
 |   // Do the map lookup using the actual bit pattern for the floating point | 
 |   // value, so that we don't have problems with 0.0 comparing equal to -0.0, and | 
 |   // we don't have issues with SNANs. | 
 |   unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opc, getVTList(EltVT), std::nullopt); | 
 |   ID.AddPointer(&V); | 
 |   void *IP = nullptr; | 
 |   SDNode *N = nullptr; | 
 |   if ((N = FindNodeOrInsertPos(ID, DL, IP))) | 
 |     if (!VT.isVector()) | 
 |       return SDValue(N, 0); | 
 |  | 
 |   if (!N) { | 
 |     N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT); | 
 |     CSEMap.InsertNode(N, IP); | 
 |     InsertNode(N); | 
 |   } | 
 |  | 
 |   SDValue Result(N, 0); | 
 |   if (VT.isVector()) | 
 |     Result = getSplat(VT, DL, Result); | 
 |   NewSDValueDbgMsg(Result, "Creating fp constant: ", this); | 
 |   return Result; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT, | 
 |                                     bool isTarget) { | 
 |   EVT EltVT = VT.getScalarType(); | 
 |   if (EltVT == MVT::f32) | 
 |     return getConstantFP(APFloat((float)Val), DL, VT, isTarget); | 
 |   if (EltVT == MVT::f64) | 
 |     return getConstantFP(APFloat(Val), DL, VT, isTarget); | 
 |   if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 || | 
 |       EltVT == MVT::f16 || EltVT == MVT::bf16) { | 
 |     bool Ignored; | 
 |     APFloat APF = APFloat(Val); | 
 |     APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, | 
 |                 &Ignored); | 
 |     return getConstantFP(APF, DL, VT, isTarget); | 
 |   } | 
 |   llvm_unreachable("Unsupported type in getConstantFP"); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, | 
 |                                        EVT VT, int64_t Offset, bool isTargetGA, | 
 |                                        unsigned TargetFlags) { | 
 |   assert((TargetFlags == 0 || isTargetGA) && | 
 |          "Cannot set target flags on target-independent globals"); | 
 |  | 
 |   // Truncate (with sign-extension) the offset value to the pointer size. | 
 |   unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); | 
 |   if (BitWidth < 64) | 
 |     Offset = SignExtend64(Offset, BitWidth); | 
 |  | 
 |   unsigned Opc; | 
 |   if (GV->isThreadLocal()) | 
 |     Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; | 
 |   else | 
 |     Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; | 
 |  | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); | 
 |   ID.AddPointer(GV); | 
 |   ID.AddInteger(Offset); | 
 |   ID.AddInteger(TargetFlags); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<GlobalAddressSDNode>( | 
 |       Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags); | 
 |   CSEMap.InsertNode(N, IP); | 
 |     InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { | 
 |   unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); | 
 |   ID.AddInteger(FI); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, | 
 |                                    unsigned TargetFlags) { | 
 |   assert((TargetFlags == 0 || isTarget) && | 
 |          "Cannot set target flags on target-independent jump tables"); | 
 |   unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); | 
 |   ID.AddInteger(JTI); | 
 |   ID.AddInteger(TargetFlags); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getJumpTableDebugInfo(int JTI, SDValue Chain, | 
 |                                             const SDLoc &DL) { | 
 |   EVT PTy = getTargetLoweringInfo().getPointerTy(getDataLayout()); | 
 |   return getNode(ISD::JUMP_TABLE_DEBUG_INFO, DL, MVT::Glue, Chain, | 
 |                  getTargetConstant(static_cast<uint64_t>(JTI), DL, PTy, true)); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, | 
 |                                       MaybeAlign Alignment, int Offset, | 
 |                                       bool isTarget, unsigned TargetFlags) { | 
 |   assert((TargetFlags == 0 || isTarget) && | 
 |          "Cannot set target flags on target-independent globals"); | 
 |   if (!Alignment) | 
 |     Alignment = shouldOptForSize() | 
 |                     ? getDataLayout().getABITypeAlign(C->getType()) | 
 |                     : getDataLayout().getPrefTypeAlign(C->getType()); | 
 |   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); | 
 |   ID.AddInteger(Alignment->value()); | 
 |   ID.AddInteger(Offset); | 
 |   ID.AddPointer(C); | 
 |   ID.AddInteger(TargetFlags); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, | 
 |                                           TargetFlags); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V = SDValue(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new constant pool: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, | 
 |                                       MaybeAlign Alignment, int Offset, | 
 |                                       bool isTarget, unsigned TargetFlags) { | 
 |   assert((TargetFlags == 0 || isTarget) && | 
 |          "Cannot set target flags on target-independent globals"); | 
 |   if (!Alignment) | 
 |     Alignment = getDataLayout().getPrefTypeAlign(C->getType()); | 
 |   unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); | 
 |   ID.AddInteger(Alignment->value()); | 
 |   ID.AddInteger(Offset); | 
 |   C->addSelectionDAGCSEId(ID); | 
 |   ID.AddInteger(TargetFlags); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment, | 
 |                                           TargetFlags); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), std::nullopt); | 
 |   ID.AddPointer(MBB); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<BasicBlockSDNode>(MBB); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getValueType(EVT VT) { | 
 |   if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= | 
 |       ValueTypeNodes.size()) | 
 |     ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); | 
 |  | 
 |   SDNode *&N = VT.isExtended() ? | 
 |     ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; | 
 |  | 
 |   if (N) return SDValue(N, 0); | 
 |   N = newSDNode<VTSDNode>(VT); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { | 
 |   SDNode *&N = ExternalSymbols[Sym]; | 
 |   if (N) return SDValue(N, 0); | 
 |   N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) { | 
 |   SDNode *&N = MCSymbols[Sym]; | 
 |   if (N) | 
 |     return SDValue(N, 0); | 
 |   N = newSDNode<MCSymbolSDNode>(Sym, VT); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, | 
 |                                               unsigned TargetFlags) { | 
 |   SDNode *&N = | 
 |       TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)]; | 
 |   if (N) return SDValue(N, 0); | 
 |   N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { | 
 |   if ((unsigned)Cond >= CondCodeNodes.size()) | 
 |     CondCodeNodes.resize(Cond+1); | 
 |  | 
 |   if (!CondCodeNodes[Cond]) { | 
 |     auto *N = newSDNode<CondCodeSDNode>(Cond); | 
 |     CondCodeNodes[Cond] = N; | 
 |     InsertNode(N); | 
 |   } | 
 |  | 
 |   return SDValue(CondCodeNodes[Cond], 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getVScale(const SDLoc &DL, EVT VT, APInt MulImm, | 
 |                                 bool ConstantFold) { | 
 |   assert(MulImm.getBitWidth() == VT.getSizeInBits() && | 
 |          "APInt size does not match type size!"); | 
 |  | 
 |   if (MulImm == 0) | 
 |     return getConstant(0, DL, VT); | 
 |  | 
 |   if (ConstantFold) { | 
 |     const MachineFunction &MF = getMachineFunction(); | 
 |     const Function &F = MF.getFunction(); | 
 |     ConstantRange CR = getVScaleRange(&F, 64); | 
 |     if (const APInt *C = CR.getSingleElement()) | 
 |       return getConstant(MulImm * C->getZExtValue(), DL, VT); | 
 |   } | 
 |  | 
 |   return getNode(ISD::VSCALE, DL, VT, getConstant(MulImm, DL, VT)); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, | 
 |                                       bool ConstantFold) { | 
 |   if (EC.isScalable()) | 
 |     return getVScale(DL, VT, | 
 |                      APInt(VT.getSizeInBits(), EC.getKnownMinValue())); | 
 |  | 
 |   return getConstant(EC.getKnownMinValue(), DL, VT); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT) { | 
 |   APInt One(ResVT.getScalarSizeInBits(), 1); | 
 |   return getStepVector(DL, ResVT, One); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal) { | 
 |   assert(ResVT.getScalarSizeInBits() == StepVal.getBitWidth()); | 
 |   if (ResVT.isScalableVector()) | 
 |     return getNode( | 
 |         ISD::STEP_VECTOR, DL, ResVT, | 
 |         getTargetConstant(StepVal, DL, ResVT.getVectorElementType())); | 
 |  | 
 |   SmallVector<SDValue, 16> OpsStepConstants; | 
 |   for (uint64_t i = 0; i < ResVT.getVectorNumElements(); i++) | 
 |     OpsStepConstants.push_back( | 
 |         getConstant(StepVal * i, DL, ResVT.getVectorElementType())); | 
 |   return getBuildVector(ResVT, DL, OpsStepConstants); | 
 | } | 
 |  | 
 | /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that | 
 | /// point at N1 to point at N2 and indices that point at N2 to point at N1. | 
 | static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) { | 
 |   std::swap(N1, N2); | 
 |   ShuffleVectorSDNode::commuteMask(M); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, | 
 |                                        SDValue N2, ArrayRef<int> Mask) { | 
 |   assert(VT.getVectorNumElements() == Mask.size() && | 
 |          "Must have the same number of vector elements as mask elements!"); | 
 |   assert(VT == N1.getValueType() && VT == N2.getValueType() && | 
 |          "Invalid VECTOR_SHUFFLE"); | 
 |  | 
 |   // Canonicalize shuffle undef, undef -> undef | 
 |   if (N1.isUndef() && N2.isUndef()) | 
 |     return getUNDEF(VT); | 
 |  | 
 |   // Validate that all indices in Mask are within the range of the elements | 
 |   // input to the shuffle. | 
 |   int NElts = Mask.size(); | 
 |   assert(llvm::all_of(Mask, | 
 |                       [&](int M) { return M < (NElts * 2) && M >= -1; }) && | 
 |          "Index out of range"); | 
 |  | 
 |   // Copy the mask so we can do any needed cleanup. | 
 |   SmallVector<int, 8> MaskVec(Mask); | 
 |  | 
 |   // Canonicalize shuffle v, v -> v, undef | 
 |   if (N1 == N2) { | 
 |     N2 = getUNDEF(VT); | 
 |     for (int i = 0; i != NElts; ++i) | 
 |       if (MaskVec[i] >= NElts) MaskVec[i] -= NElts; | 
 |   } | 
 |  | 
 |   // Canonicalize shuffle undef, v -> v, undef.  Commute the shuffle mask. | 
 |   if (N1.isUndef()) | 
 |     commuteShuffle(N1, N2, MaskVec); | 
 |  | 
 |   if (TLI->hasVectorBlend()) { | 
 |     // If shuffling a splat, try to blend the splat instead. We do this here so | 
 |     // that even when this arises during lowering we don't have to re-handle it. | 
 |     auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) { | 
 |       BitVector UndefElements; | 
 |       SDValue Splat = BV->getSplatValue(&UndefElements); | 
 |       if (!Splat) | 
 |         return; | 
 |  | 
 |       for (int i = 0; i < NElts; ++i) { | 
 |         if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts)) | 
 |           continue; | 
 |  | 
 |         // If this input comes from undef, mark it as such. | 
 |         if (UndefElements[MaskVec[i] - Offset]) { | 
 |           MaskVec[i] = -1; | 
 |           continue; | 
 |         } | 
 |  | 
 |         // If we can blend a non-undef lane, use that instead. | 
 |         if (!UndefElements[i]) | 
 |           MaskVec[i] = i + Offset; | 
 |       } | 
 |     }; | 
 |     if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) | 
 |       BlendSplat(N1BV, 0); | 
 |     if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2)) | 
 |       BlendSplat(N2BV, NElts); | 
 |   } | 
 |  | 
 |   // Canonicalize all index into lhs, -> shuffle lhs, undef | 
 |   // Canonicalize all index into rhs, -> shuffle rhs, undef | 
 |   bool AllLHS = true, AllRHS = true; | 
 |   bool N2Undef = N2.isUndef(); | 
 |   for (int i = 0; i != NElts; ++i) { | 
 |     if (MaskVec[i] >= NElts) { | 
 |       if (N2Undef) | 
 |         MaskVec[i] = -1; | 
 |       else | 
 |         AllLHS = false; | 
 |     } else if (MaskVec[i] >= 0) { | 
 |       AllRHS = false; | 
 |     } | 
 |   } | 
 |   if (AllLHS && AllRHS) | 
 |     return getUNDEF(VT); | 
 |   if (AllLHS && !N2Undef) | 
 |     N2 = getUNDEF(VT); | 
 |   if (AllRHS) { | 
 |     N1 = getUNDEF(VT); | 
 |     commuteShuffle(N1, N2, MaskVec); | 
 |   } | 
 |   // Reset our undef status after accounting for the mask. | 
 |   N2Undef = N2.isUndef(); | 
 |   // Re-check whether both sides ended up undef. | 
 |   if (N1.isUndef() && N2Undef) | 
 |     return getUNDEF(VT); | 
 |  | 
 |   // If Identity shuffle return that node. | 
 |   bool Identity = true, AllSame = true; | 
 |   for (int i = 0; i != NElts; ++i) { | 
 |     if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false; | 
 |     if (MaskVec[i] != MaskVec[0]) AllSame = false; | 
 |   } | 
 |   if (Identity && NElts) | 
 |     return N1; | 
 |  | 
 |   // Shuffling a constant splat doesn't change the result. | 
 |   if (N2Undef) { | 
 |     SDValue V = N1; | 
 |  | 
 |     // Look through any bitcasts. We check that these don't change the number | 
 |     // (and size) of elements and just changes their types. | 
 |     while (V.getOpcode() == ISD::BITCAST) | 
 |       V = V->getOperand(0); | 
 |  | 
 |     // A splat should always show up as a build vector node. | 
 |     if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { | 
 |       BitVector UndefElements; | 
 |       SDValue Splat = BV->getSplatValue(&UndefElements); | 
 |       // If this is a splat of an undef, shuffling it is also undef. | 
 |       if (Splat && Splat.isUndef()) | 
 |         return getUNDEF(VT); | 
 |  | 
 |       bool SameNumElts = | 
 |           V.getValueType().getVectorNumElements() == VT.getVectorNumElements(); | 
 |  | 
 |       // We only have a splat which can skip shuffles if there is a splatted | 
 |       // value and no undef lanes rearranged by the shuffle. | 
 |       if (Splat && UndefElements.none()) { | 
 |         // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the | 
 |         // number of elements match or the value splatted is a zero constant. | 
 |         if (SameNumElts || isNullConstant(Splat)) | 
 |           return N1; | 
 |       } | 
 |  | 
 |       // If the shuffle itself creates a splat, build the vector directly. | 
 |       if (AllSame && SameNumElts) { | 
 |         EVT BuildVT = BV->getValueType(0); | 
 |         const SDValue &Splatted = BV->getOperand(MaskVec[0]); | 
 |         SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted); | 
 |  | 
 |         // We may have jumped through bitcasts, so the type of the | 
 |         // BUILD_VECTOR may not match the type of the shuffle. | 
 |         if (BuildVT != VT) | 
 |           NewBV = getNode(ISD::BITCAST, dl, VT, NewBV); | 
 |         return NewBV; | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   FoldingSetNodeID ID; | 
 |   SDValue Ops[2] = { N1, N2 }; | 
 |   AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops); | 
 |   for (int i = 0; i != NElts; ++i) | 
 |     ID.AddInteger(MaskVec[i]); | 
 |  | 
 |   void* IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   // Allocate the mask array for the node out of the BumpPtrAllocator, since | 
 |   // SDNode doesn't have access to it.  This memory will be "leaked" when | 
 |   // the node is deallocated, but recovered when the NodeAllocator is released. | 
 |   int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); | 
 |   llvm::copy(MaskVec, MaskAlloc); | 
 |  | 
 |   auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(), | 
 |                                            dl.getDebugLoc(), MaskAlloc); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V = SDValue(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) { | 
 |   EVT VT = SV.getValueType(0); | 
 |   SmallVector<int, 8> MaskVec(SV.getMask()); | 
 |   ShuffleVectorSDNode::commuteMask(MaskVec); | 
 |  | 
 |   SDValue Op0 = SV.getOperand(0); | 
 |   SDValue Op1 = SV.getOperand(1); | 
 |   return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::Register, getVTList(VT), std::nullopt); | 
 |   ID.AddInteger(RegNo); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<RegisterSDNode>(RegNo, VT); | 
 |   N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, UA); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), std::nullopt); | 
 |   ID.AddPointer(RegMask); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<RegisterMaskSDNode>(RegMask); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root, | 
 |                                  MCSymbol *Label) { | 
 |   return getLabelNode(ISD::EH_LABEL, dl, Root, Label); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl, | 
 |                                    SDValue Root, MCSymbol *Label) { | 
 |   FoldingSetNodeID ID; | 
 |   SDValue Ops[] = { Root }; | 
 |   AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops); | 
 |   ID.AddPointer(Label); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = | 
 |       newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, | 
 |                                       int64_t Offset, bool isTarget, | 
 |                                       unsigned TargetFlags) { | 
 |   unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; | 
 |  | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opc, getVTList(VT), std::nullopt); | 
 |   ID.AddPointer(BA); | 
 |   ID.AddInteger(Offset); | 
 |   ID.AddInteger(TargetFlags); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getSrcValue(const Value *V) { | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), std::nullopt); | 
 |   ID.AddPointer(V); | 
 |  | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<SrcValueSDNode>(V); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMDNode(const MDNode *MD) { | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), std::nullopt); | 
 |   ID.AddPointer(MD); | 
 |  | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<MDNodeSDNode>(MD); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) { | 
 |   if (VT == V.getValueType()) | 
 |     return V; | 
 |  | 
 |   return getNode(ISD::BITCAST, SDLoc(V), VT, V); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, | 
 |                                        unsigned SrcAS, unsigned DestAS) { | 
 |   SDValue Ops[] = {Ptr}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops); | 
 |   ID.AddInteger(SrcAS); | 
 |   ID.AddInteger(DestAS); | 
 |  | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(), | 
 |                                            VT, SrcAS, DestAS); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getFreeze(SDValue V) { | 
 |   return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V); | 
 | } | 
 |  | 
 | /// getShiftAmountOperand - Return the specified value casted to | 
 | /// the target's desired shift amount type. | 
 | SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { | 
 |   EVT OpTy = Op.getValueType(); | 
 |   EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout()); | 
 |   if (OpTy == ShTy || OpTy.isVector()) return Op; | 
 |  | 
 |   return getZExtOrTrunc(Op, SDLoc(Op), ShTy); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::expandVAArg(SDNode *Node) { | 
 |   SDLoc dl(Node); | 
 |   const TargetLowering &TLI = getTargetLoweringInfo(); | 
 |   const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); | 
 |   EVT VT = Node->getValueType(0); | 
 |   SDValue Tmp1 = Node->getOperand(0); | 
 |   SDValue Tmp2 = Node->getOperand(1); | 
 |   const MaybeAlign MA(Node->getConstantOperandVal(3)); | 
 |  | 
 |   SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, | 
 |                                Tmp2, MachinePointerInfo(V)); | 
 |   SDValue VAList = VAListLoad; | 
 |  | 
 |   if (MA && *MA > TLI.getMinStackArgumentAlignment()) { | 
 |     VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, | 
 |                      getConstant(MA->value() - 1, dl, VAList.getValueType())); | 
 |  | 
 |     VAList = | 
 |         getNode(ISD::AND, dl, VAList.getValueType(), VAList, | 
 |                 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType())); | 
 |   } | 
 |  | 
 |   // Increment the pointer, VAList, to the next vaarg | 
 |   Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList, | 
 |                  getConstant(getDataLayout().getTypeAllocSize( | 
 |                                                VT.getTypeForEVT(*getContext())), | 
 |                              dl, VAList.getValueType())); | 
 |   // Store the incremented VAList to the legalized pointer | 
 |   Tmp1 = | 
 |       getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V)); | 
 |   // Load the actual argument out of the pointer VAList | 
 |   return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo()); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::expandVACopy(SDNode *Node) { | 
 |   SDLoc dl(Node); | 
 |   const TargetLowering &TLI = getTargetLoweringInfo(); | 
 |   // This defaults to loading a pointer from the input and storing it to the | 
 |   // output, returning the chain. | 
 |   const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue(); | 
 |   const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue(); | 
 |   SDValue Tmp1 = | 
 |       getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0), | 
 |               Node->getOperand(2), MachinePointerInfo(VS)); | 
 |   return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1), | 
 |                   MachinePointerInfo(VD)); | 
 | } | 
 |  | 
 | Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) { | 
 |   const DataLayout &DL = getDataLayout(); | 
 |   Type *Ty = VT.getTypeForEVT(*getContext()); | 
 |   Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); | 
 |  | 
 |   if (TLI->isTypeLegal(VT) || !VT.isVector()) | 
 |     return RedAlign; | 
 |  | 
 |   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); | 
 |   const Align StackAlign = TFI->getStackAlign(); | 
 |  | 
 |   // See if we can choose a smaller ABI alignment in cases where it's an | 
 |   // illegal vector type that will get broken down. | 
 |   if (RedAlign > StackAlign) { | 
 |     EVT IntermediateVT; | 
 |     MVT RegisterVT; | 
 |     unsigned NumIntermediates; | 
 |     TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT, | 
 |                                 NumIntermediates, RegisterVT); | 
 |     Ty = IntermediateVT.getTypeForEVT(*getContext()); | 
 |     Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty); | 
 |     if (RedAlign2 < RedAlign) | 
 |       RedAlign = RedAlign2; | 
 |   } | 
 |  | 
 |   return RedAlign; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) { | 
 |   MachineFrameInfo &MFI = MF->getFrameInfo(); | 
 |   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); | 
 |   int StackID = 0; | 
 |   if (Bytes.isScalable()) | 
 |     StackID = TFI->getStackIDForScalableVectors(); | 
 |   // The stack id gives an indication of whether the object is scalable or | 
 |   // not, so it's safe to pass in the minimum size here. | 
 |   int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinValue(), Alignment, | 
 |                                        false, nullptr, StackID); | 
 |   return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { | 
 |   Type *Ty = VT.getTypeForEVT(*getContext()); | 
 |   Align StackAlign = | 
 |       std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign)); | 
 |   return CreateStackTemporary(VT.getStoreSize(), StackAlign); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { | 
 |   TypeSize VT1Size = VT1.getStoreSize(); | 
 |   TypeSize VT2Size = VT2.getStoreSize(); | 
 |   assert(VT1Size.isScalable() == VT2Size.isScalable() && | 
 |          "Don't know how to choose the maximum size when creating a stack " | 
 |          "temporary"); | 
 |   TypeSize Bytes = VT1Size.getKnownMinValue() > VT2Size.getKnownMinValue() | 
 |                        ? VT1Size | 
 |                        : VT2Size; | 
 |  | 
 |   Type *Ty1 = VT1.getTypeForEVT(*getContext()); | 
 |   Type *Ty2 = VT2.getTypeForEVT(*getContext()); | 
 |   const DataLayout &DL = getDataLayout(); | 
 |   Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2)); | 
 |   return CreateStackTemporary(Bytes, Align); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2, | 
 |                                 ISD::CondCode Cond, const SDLoc &dl) { | 
 |   EVT OpVT = N1.getValueType(); | 
 |  | 
 |   auto GetUndefBooleanConstant = [&]() { | 
 |     if (VT.getScalarType() == MVT::i1 || | 
 |         TLI->getBooleanContents(OpVT) == | 
 |             TargetLowering::UndefinedBooleanContent) | 
 |       return getUNDEF(VT); | 
 |     // ZeroOrOne / ZeroOrNegative require specific values for the high bits, | 
 |     // so we cannot use getUNDEF(). Return zero instead. | 
 |     return getConstant(0, dl, VT); | 
 |   }; | 
 |  | 
 |   // These setcc operations always fold. | 
 |   switch (Cond) { | 
 |   default: break; | 
 |   case ISD::SETFALSE: | 
 |   case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT); | 
 |   case ISD::SETTRUE: | 
 |   case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT); | 
 |  | 
 |   case ISD::SETOEQ: | 
 |   case ISD::SETOGT: | 
 |   case ISD::SETOGE: | 
 |   case ISD::SETOLT: | 
 |   case ISD::SETOLE: | 
 |   case ISD::SETONE: | 
 |   case ISD::SETO: | 
 |   case ISD::SETUO: | 
 |   case ISD::SETUEQ: | 
 |   case ISD::SETUNE: | 
 |     assert(!OpVT.isInteger() && "Illegal setcc for integer!"); | 
 |     break; | 
 |   } | 
 |  | 
 |   if (OpVT.isInteger()) { | 
 |     // For EQ and NE, we can always pick a value for the undef to make the | 
 |     // predicate pass or fail, so we can return undef. | 
 |     // Matches behavior in llvm::ConstantFoldCompareInstruction. | 
 |     // icmp eq/ne X, undef -> undef. | 
 |     if ((N1.isUndef() || N2.isUndef()) && | 
 |         (Cond == ISD::SETEQ || Cond == ISD::SETNE)) | 
 |       return GetUndefBooleanConstant(); | 
 |  | 
 |     // If both operands are undef, we can return undef for int comparison. | 
 |     // icmp undef, undef -> undef. | 
 |     if (N1.isUndef() && N2.isUndef()) | 
 |       return GetUndefBooleanConstant(); | 
 |  | 
 |     // icmp X, X -> true/false | 
 |     // icmp X, undef -> true/false because undef could be X. | 
 |     if (N1.isUndef() || N2.isUndef() || N1 == N2) | 
 |       return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT); | 
 |   } | 
 |  | 
 |   if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) { | 
 |     const APInt &C2 = N2C->getAPIntValue(); | 
 |     if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) { | 
 |       const APInt &C1 = N1C->getAPIntValue(); | 
 |  | 
 |       return getBoolConstant(ICmpInst::compare(C1, C2, getICmpCondCode(Cond)), | 
 |                              dl, VT, OpVT); | 
 |     } | 
 |   } | 
 |  | 
 |   auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1); | 
 |   auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2); | 
 |  | 
 |   if (N1CFP && N2CFP) { | 
 |     APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF()); | 
 |     switch (Cond) { | 
 |     default: break; | 
 |     case ISD::SETEQ:  if (R==APFloat::cmpUnordered) | 
 |                         return GetUndefBooleanConstant(); | 
 |                       [[fallthrough]]; | 
 |     case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT, | 
 |                                              OpVT); | 
 |     case ISD::SETNE:  if (R==APFloat::cmpUnordered) | 
 |                         return GetUndefBooleanConstant(); | 
 |                       [[fallthrough]]; | 
 |     case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan || | 
 |                                              R==APFloat::cmpLessThan, dl, VT, | 
 |                                              OpVT); | 
 |     case ISD::SETLT:  if (R==APFloat::cmpUnordered) | 
 |                         return GetUndefBooleanConstant(); | 
 |                       [[fallthrough]]; | 
 |     case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT, | 
 |                                              OpVT); | 
 |     case ISD::SETGT:  if (R==APFloat::cmpUnordered) | 
 |                         return GetUndefBooleanConstant(); | 
 |                       [[fallthrough]]; | 
 |     case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl, | 
 |                                              VT, OpVT); | 
 |     case ISD::SETLE:  if (R==APFloat::cmpUnordered) | 
 |                         return GetUndefBooleanConstant(); | 
 |                       [[fallthrough]]; | 
 |     case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan || | 
 |                                              R==APFloat::cmpEqual, dl, VT, | 
 |                                              OpVT); | 
 |     case ISD::SETGE:  if (R==APFloat::cmpUnordered) | 
 |                         return GetUndefBooleanConstant(); | 
 |                       [[fallthrough]]; | 
 |     case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan || | 
 |                                          R==APFloat::cmpEqual, dl, VT, OpVT); | 
 |     case ISD::SETO:   return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT, | 
 |                                              OpVT); | 
 |     case ISD::SETUO:  return getBoolConstant(R==APFloat::cmpUnordered, dl, VT, | 
 |                                              OpVT); | 
 |     case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered || | 
 |                                              R==APFloat::cmpEqual, dl, VT, | 
 |                                              OpVT); | 
 |     case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT, | 
 |                                              OpVT); | 
 |     case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered || | 
 |                                              R==APFloat::cmpLessThan, dl, VT, | 
 |                                              OpVT); | 
 |     case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan || | 
 |                                              R==APFloat::cmpUnordered, dl, VT, | 
 |                                              OpVT); | 
 |     case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl, | 
 |                                              VT, OpVT); | 
 |     case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT, | 
 |                                              OpVT); | 
 |     } | 
 |   } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) { | 
 |     // Ensure that the constant occurs on the RHS. | 
 |     ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond); | 
 |     if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT())) | 
 |       return SDValue(); | 
 |     return getSetCC(dl, VT, N2, N1, SwappedCond); | 
 |   } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) || | 
 |              (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) { | 
 |     // If an operand is known to be a nan (or undef that could be a nan), we can | 
 |     // fold it. | 
 |     // Choosing NaN for the undef will always make unordered comparison succeed | 
 |     // and ordered comparison fails. | 
 |     // Matches behavior in llvm::ConstantFoldCompareInstruction. | 
 |     switch (ISD::getUnorderedFlavor(Cond)) { | 
 |     default: | 
 |       llvm_unreachable("Unknown flavor!"); | 
 |     case 0: // Known false. | 
 |       return getBoolConstant(false, dl, VT, OpVT); | 
 |     case 1: // Known true. | 
 |       return getBoolConstant(true, dl, VT, OpVT); | 
 |     case 2: // Undefined. | 
 |       return GetUndefBooleanConstant(); | 
 |     } | 
 |   } | 
 |  | 
 |   // Could not fold it. | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | /// SignBitIsZero - Return true if the sign bit of Op is known to be zero.  We | 
 | /// use this predicate to simplify operations downstream. | 
 | bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { | 
 |   unsigned BitWidth = Op.getScalarValueSizeInBits(); | 
 |   return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth); | 
 | } | 
 |  | 
 | /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero.  We use | 
 | /// this predicate to simplify operations downstream.  Mask is known to be zero | 
 | /// for bits that V cannot have. | 
 | bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, | 
 |                                      unsigned Depth) const { | 
 |   return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero); | 
 | } | 
 |  | 
 | /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in | 
 | /// DemandedElts.  We use this predicate to simplify operations downstream. | 
 | /// Mask is known to be zero for bits that V cannot have. | 
 | bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask, | 
 |                                      const APInt &DemandedElts, | 
 |                                      unsigned Depth) const { | 
 |   return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero); | 
 | } | 
 |  | 
 | /// MaskedVectorIsZero - Return true if 'Op' is known to be zero in | 
 | /// DemandedElts.  We use this predicate to simplify operations downstream. | 
 | bool SelectionDAG::MaskedVectorIsZero(SDValue V, const APInt &DemandedElts, | 
 |                                       unsigned Depth /* = 0 */) const { | 
 |   return computeKnownBits(V, DemandedElts, Depth).isZero(); | 
 | } | 
 |  | 
 | /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'. | 
 | bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask, | 
 |                                         unsigned Depth) const { | 
 |   return Mask.isSubsetOf(computeKnownBits(V, Depth).One); | 
 | } | 
 |  | 
 | APInt SelectionDAG::computeVectorKnownZeroElements(SDValue Op, | 
 |                                                    const APInt &DemandedElts, | 
 |                                                    unsigned Depth) const { | 
 |   EVT VT = Op.getValueType(); | 
 |   assert(VT.isVector() && !VT.isScalableVector() && "Only for fixed vectors!"); | 
 |  | 
 |   unsigned NumElts = VT.getVectorNumElements(); | 
 |   assert(DemandedElts.getBitWidth() == NumElts && "Unexpected demanded mask."); | 
 |  | 
 |   APInt KnownZeroElements = APInt::getZero(NumElts); | 
 |   for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) { | 
 |     if (!DemandedElts[EltIdx]) | 
 |       continue; // Don't query elements that are not demanded. | 
 |     APInt Mask = APInt::getOneBitSet(NumElts, EltIdx); | 
 |     if (MaskedVectorIsZero(Op, Mask, Depth)) | 
 |       KnownZeroElements.setBit(EltIdx); | 
 |   } | 
 |   return KnownZeroElements; | 
 | } | 
 |  | 
 | /// isSplatValue - Return true if the vector V has the same value | 
 | /// across all DemandedElts. For scalable vectors, we don't know the | 
 | /// number of lanes at compile time.  Instead, we use a 1 bit APInt | 
 | /// to represent a conservative value for all lanes; that is, that | 
 | /// one bit value is implicitly splatted across all lanes. | 
 | bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts, | 
 |                                 APInt &UndefElts, unsigned Depth) const { | 
 |   unsigned Opcode = V.getOpcode(); | 
 |   EVT VT = V.getValueType(); | 
 |   assert(VT.isVector() && "Vector type expected"); | 
 |   assert((!VT.isScalableVector() || DemandedElts.getBitWidth() == 1) && | 
 |          "scalable demanded bits are ignored"); | 
 |  | 
 |   if (!DemandedElts) | 
 |     return false; // No demanded elts, better to assume we don't know anything. | 
 |  | 
 |   if (Depth >= MaxRecursionDepth) | 
 |     return false; // Limit search depth. | 
 |  | 
 |   // Deal with some common cases here that work for both fixed and scalable | 
 |   // vector types. | 
 |   switch (Opcode) { | 
 |   case ISD::SPLAT_VECTOR: | 
 |     UndefElts = V.getOperand(0).isUndef() | 
 |                     ? APInt::getAllOnes(DemandedElts.getBitWidth()) | 
 |                     : APInt(DemandedElts.getBitWidth(), 0); | 
 |     return true; | 
 |   case ISD::ADD: | 
 |   case ISD::SUB: | 
 |   case ISD::AND: | 
 |   case ISD::XOR: | 
 |   case ISD::OR: { | 
 |     APInt UndefLHS, UndefRHS; | 
 |     SDValue LHS = V.getOperand(0); | 
 |     SDValue RHS = V.getOperand(1); | 
 |     if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) && | 
 |         isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) { | 
 |       UndefElts = UndefLHS | UndefRHS; | 
 |       return true; | 
 |     } | 
 |     return false; | 
 |   } | 
 |   case ISD::ABS: | 
 |   case ISD::TRUNCATE: | 
 |   case ISD::SIGN_EXTEND: | 
 |   case ISD::ZERO_EXTEND: | 
 |     return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1); | 
 |   default: | 
 |     if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN || | 
 |         Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID) | 
 |       return TLI->isSplatValueForTargetNode(V, DemandedElts, UndefElts, *this, | 
 |                                             Depth); | 
 |     break; | 
 | } | 
 |  | 
 |   // We don't support other cases than those above for scalable vectors at | 
 |   // the moment. | 
 |   if (VT.isScalableVector()) | 
 |     return false; | 
 |  | 
 |   unsigned NumElts = VT.getVectorNumElements(); | 
 |   assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch"); | 
 |   UndefElts = APInt::getZero(NumElts); | 
 |  | 
 |   switch (Opcode) { | 
 |   case ISD::BUILD_VECTOR: { | 
 |     SDValue Scl; | 
 |     for (unsigned i = 0; i != NumElts; ++i) { | 
 |       SDValue Op = V.getOperand(i); | 
 |       if (Op.isUndef()) { | 
 |         UndefElts.setBit(i); | 
 |         continue; | 
 |       } | 
 |       if (!DemandedElts[i]) | 
 |         continue; | 
 |       if (Scl && Scl != Op) | 
 |         return false; | 
 |       Scl = Op; | 
 |     } | 
 |     return true; | 
 |   } | 
 |   case ISD::VECTOR_SHUFFLE: { | 
 |     // Check if this is a shuffle node doing a splat or a shuffle of a splat. | 
 |     APInt DemandedLHS = APInt::getZero(NumElts); | 
 |     APInt DemandedRHS = APInt::getZero(NumElts); | 
 |     ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask(); | 
 |     for (int i = 0; i != (int)NumElts; ++i) { | 
 |       int M = Mask[i]; | 
 |       if (M < 0) { | 
 |         UndefElts.setBit(i); | 
 |         continue; | 
 |       } | 
 |       if (!DemandedElts[i]) | 
 |         continue; | 
 |       if (M < (int)NumElts) | 
 |         DemandedLHS.setBit(M); | 
 |       else | 
 |         DemandedRHS.setBit(M - NumElts); | 
 |     } | 
 |  | 
 |     // If we aren't demanding either op, assume there's no splat. | 
 |     // If we are demanding both ops, assume there's no splat. | 
 |     if ((DemandedLHS.isZero() && DemandedRHS.isZero()) || | 
 |         (!DemandedLHS.isZero() && !DemandedRHS.isZero())) | 
 |       return false; | 
 |  | 
 |     // See if the demanded elts of the source op is a splat or we only demand | 
 |     // one element, which should always be a splat. | 
 |     // TODO: Handle source ops splats with undefs. | 
 |     auto CheckSplatSrc = [&](SDValue Src, const APInt &SrcElts) { | 
 |       APInt SrcUndefs; | 
 |       return (SrcElts.popcount() == 1) || | 
 |              (isSplatValue(Src, SrcElts, SrcUndefs, Depth + 1) && | 
 |               (SrcElts & SrcUndefs).isZero()); | 
 |     }; | 
 |     if (!DemandedLHS.isZero()) | 
 |       return CheckSplatSrc(V.getOperand(0), DemandedLHS); | 
 |     return CheckSplatSrc(V.getOperand(1), DemandedRHS); | 
 |   } | 
 |   case ISD::EXTRACT_SUBVECTOR: { | 
 |     // Offset the demanded elts by the subvector index. | 
 |     SDValue Src = V.getOperand(0); | 
 |     // We don't support scalable vectors at the moment. | 
 |     if (Src.getValueType().isScalableVector()) | 
 |       return false; | 
 |     uint64_t Idx = V.getConstantOperandVal(1); | 
 |     unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); | 
 |     APInt UndefSrcElts; | 
 |     APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); | 
 |     if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) { | 
 |       UndefElts = UndefSrcElts.extractBits(NumElts, Idx); | 
 |       return true; | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::ANY_EXTEND_VECTOR_INREG: | 
 |   case ISD::SIGN_EXTEND_VECTOR_INREG: | 
 |   case ISD::ZERO_EXTEND_VECTOR_INREG: { | 
 |     // Widen the demanded elts by the src element count. | 
 |     SDValue Src = V.getOperand(0); | 
 |     // We don't support scalable vectors at the moment. | 
 |     if (Src.getValueType().isScalableVector()) | 
 |       return false; | 
 |     unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); | 
 |     APInt UndefSrcElts; | 
 |     APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts); | 
 |     if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) { | 
 |       UndefElts = UndefSrcElts.trunc(NumElts); | 
 |       return true; | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::BITCAST: { | 
 |     SDValue Src = V.getOperand(0); | 
 |     EVT SrcVT = Src.getValueType(); | 
 |     unsigned SrcBitWidth = SrcVT.getScalarSizeInBits(); | 
 |     unsigned BitWidth = VT.getScalarSizeInBits(); | 
 |  | 
 |     // Ignore bitcasts from unsupported types. | 
 |     // TODO: Add fp support? | 
 |     if (!SrcVT.isVector() || !SrcVT.isInteger() || !VT.isInteger()) | 
 |       break; | 
 |  | 
 |     // Bitcast 'small element' vector to 'large element' vector. | 
 |     if ((BitWidth % SrcBitWidth) == 0) { | 
 |       // See if each sub element is a splat. | 
 |       unsigned Scale = BitWidth / SrcBitWidth; | 
 |       unsigned NumSrcElts = SrcVT.getVectorNumElements(); | 
 |       APInt ScaledDemandedElts = | 
 |           APIntOps::ScaleBitMask(DemandedElts, NumSrcElts); | 
 |       for (unsigned I = 0; I != Scale; ++I) { | 
 |         APInt SubUndefElts; | 
 |         APInt SubDemandedElt = APInt::getOneBitSet(Scale, I); | 
 |         APInt SubDemandedElts = APInt::getSplat(NumSrcElts, SubDemandedElt); | 
 |         SubDemandedElts &= ScaledDemandedElts; | 
 |         if (!isSplatValue(Src, SubDemandedElts, SubUndefElts, Depth + 1)) | 
 |           return false; | 
 |         // TODO: Add support for merging sub undef elements. | 
 |         if (!SubUndefElts.isZero()) | 
 |           return false; | 
 |       } | 
 |       return true; | 
 |     } | 
 |     break; | 
 |   } | 
 |   } | 
 |  | 
 |   // Fallback - this is a splat if all demanded elts are the same constant. | 
 |   if (computeKnownBits(V, DemandedElts, Depth).isConstant()) { | 
 |     UndefElts = ~DemandedElts; | 
 |     return true; | 
 |   } | 
 |  | 
 |   return false; | 
 | } | 
 |  | 
 | /// Helper wrapper to main isSplatValue function. | 
 | bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) const { | 
 |   EVT VT = V.getValueType(); | 
 |   assert(VT.isVector() && "Vector type expected"); | 
 |  | 
 |   APInt UndefElts; | 
 |   // Since the number of lanes in a scalable vector is unknown at compile time, | 
 |   // we track one bit which is implicitly broadcast to all lanes.  This means | 
 |   // that all lanes in a scalable vector are considered demanded. | 
 |   APInt DemandedElts | 
 |     = APInt::getAllOnes(VT.isScalableVector() ? 1 : VT.getVectorNumElements()); | 
 |   return isSplatValue(V, DemandedElts, UndefElts) && | 
 |          (AllowUndefs || !UndefElts); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) { | 
 |   V = peekThroughExtractSubvectors(V); | 
 |  | 
 |   EVT VT = V.getValueType(); | 
 |   unsigned Opcode = V.getOpcode(); | 
 |   switch (Opcode) { | 
 |   default: { | 
 |     APInt UndefElts; | 
 |     // Since the number of lanes in a scalable vector is unknown at compile time, | 
 |     // we track one bit which is implicitly broadcast to all lanes.  This means | 
 |     // that all lanes in a scalable vector are considered demanded. | 
 |     APInt DemandedElts | 
 |       = APInt::getAllOnes(VT.isScalableVector() ? 1 : VT.getVectorNumElements()); | 
 |  | 
 |     if (isSplatValue(V, DemandedElts, UndefElts)) { | 
 |       if (VT.isScalableVector()) { | 
 |         // DemandedElts and UndefElts are ignored for scalable vectors, since | 
 |         // the only supported cases are SPLAT_VECTOR nodes. | 
 |         SplatIdx = 0; | 
 |       } else { | 
 |         // Handle case where all demanded elements are UNDEF. | 
 |         if (DemandedElts.isSubsetOf(UndefElts)) { | 
 |           SplatIdx = 0; | 
 |           return getUNDEF(VT); | 
 |         } | 
 |         SplatIdx = (UndefElts & DemandedElts).countr_one(); | 
 |       } | 
 |       return V; | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::SPLAT_VECTOR: | 
 |     SplatIdx = 0; | 
 |     return V; | 
 |   case ISD::VECTOR_SHUFFLE: { | 
 |     assert(!VT.isScalableVector()); | 
 |     // Check if this is a shuffle node doing a splat. | 
 |     // TODO - remove this and rely purely on SelectionDAG::isSplatValue, | 
 |     // getTargetVShiftNode currently struggles without the splat source. | 
 |     auto *SVN = cast<ShuffleVectorSDNode>(V); | 
 |     if (!SVN->isSplat()) | 
 |       break; | 
 |     int Idx = SVN->getSplatIndex(); | 
 |     int NumElts = V.getValueType().getVectorNumElements(); | 
 |     SplatIdx = Idx % NumElts; | 
 |     return V.getOperand(Idx / NumElts); | 
 |   } | 
 |   } | 
 |  | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getSplatValue(SDValue V, bool LegalTypes) { | 
 |   int SplatIdx; | 
 |   if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) { | 
 |     EVT SVT = SrcVector.getValueType().getScalarType(); | 
 |     EVT LegalSVT = SVT; | 
 |     if (LegalTypes && !TLI->isTypeLegal(SVT)) { | 
 |       if (!SVT.isInteger()) | 
 |         return SDValue(); | 
 |       LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); | 
 |       if (LegalSVT.bitsLT(SVT)) | 
 |         return SDValue(); | 
 |     } | 
 |     return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), LegalSVT, SrcVector, | 
 |                    getVectorIdxConstant(SplatIdx, SDLoc(V))); | 
 |   } | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | const APInt * | 
 | SelectionDAG::getValidShiftAmountConstant(SDValue V, | 
 |                                           const APInt &DemandedElts) const { | 
 |   assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || | 
 |           V.getOpcode() == ISD::SRA) && | 
 |          "Unknown shift node"); | 
 |   unsigned BitWidth = V.getScalarValueSizeInBits(); | 
 |   if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) { | 
 |     // Shifting more than the bitwidth is not valid. | 
 |     const APInt &ShAmt = SA->getAPIntValue(); | 
 |     if (ShAmt.ult(BitWidth)) | 
 |       return &ShAmt; | 
 |   } | 
 |   return nullptr; | 
 | } | 
 |  | 
 | const APInt *SelectionDAG::getValidMinimumShiftAmountConstant( | 
 |     SDValue V, const APInt &DemandedElts) const { | 
 |   assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || | 
 |           V.getOpcode() == ISD::SRA) && | 
 |          "Unknown shift node"); | 
 |   if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) | 
 |     return ValidAmt; | 
 |   unsigned BitWidth = V.getScalarValueSizeInBits(); | 
 |   auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); | 
 |   if (!BV) | 
 |     return nullptr; | 
 |   const APInt *MinShAmt = nullptr; | 
 |   for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { | 
 |     if (!DemandedElts[i]) | 
 |       continue; | 
 |     auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); | 
 |     if (!SA) | 
 |       return nullptr; | 
 |     // Shifting more than the bitwidth is not valid. | 
 |     const APInt &ShAmt = SA->getAPIntValue(); | 
 |     if (ShAmt.uge(BitWidth)) | 
 |       return nullptr; | 
 |     if (MinShAmt && MinShAmt->ule(ShAmt)) | 
 |       continue; | 
 |     MinShAmt = &ShAmt; | 
 |   } | 
 |   return MinShAmt; | 
 | } | 
 |  | 
 | const APInt *SelectionDAG::getValidMaximumShiftAmountConstant( | 
 |     SDValue V, const APInt &DemandedElts) const { | 
 |   assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL || | 
 |           V.getOpcode() == ISD::SRA) && | 
 |          "Unknown shift node"); | 
 |   if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts)) | 
 |     return ValidAmt; | 
 |   unsigned BitWidth = V.getScalarValueSizeInBits(); | 
 |   auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1)); | 
 |   if (!BV) | 
 |     return nullptr; | 
 |   const APInt *MaxShAmt = nullptr; | 
 |   for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { | 
 |     if (!DemandedElts[i]) | 
 |       continue; | 
 |     auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i)); | 
 |     if (!SA) | 
 |       return nullptr; | 
 |     // Shifting more than the bitwidth is not valid. | 
 |     const APInt &ShAmt = SA->getAPIntValue(); | 
 |     if (ShAmt.uge(BitWidth)) | 
 |       return nullptr; | 
 |     if (MaxShAmt && MaxShAmt->uge(ShAmt)) | 
 |       continue; | 
 |     MaxShAmt = &ShAmt; | 
 |   } | 
 |   return MaxShAmt; | 
 | } | 
 |  | 
 | /// Determine which bits of Op are known to be either zero or one and return | 
 | /// them in Known. For vectors, the known bits are those that are shared by | 
 | /// every vector element. | 
 | KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const { | 
 |   EVT VT = Op.getValueType(); | 
 |  | 
 |   // Since the number of lanes in a scalable vector is unknown at compile time, | 
 |   // we track one bit which is implicitly broadcast to all lanes.  This means | 
 |   // that all lanes in a scalable vector are considered demanded. | 
 |   APInt DemandedElts = VT.isFixedLengthVector() | 
 |                            ? APInt::getAllOnes(VT.getVectorNumElements()) | 
 |                            : APInt(1, 1); | 
 |   return computeKnownBits(Op, DemandedElts, Depth); | 
 | } | 
 |  | 
 | /// Determine which bits of Op are known to be either zero or one and return | 
 | /// them in Known. The DemandedElts argument allows us to only collect the known | 
 | /// bits that are shared by the requested vector elements. | 
 | KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, | 
 |                                          unsigned Depth) const { | 
 |   unsigned BitWidth = Op.getScalarValueSizeInBits(); | 
 |  | 
 |   KnownBits Known(BitWidth);   // Don't know anything. | 
 |  | 
 |   if (auto *C = dyn_cast<ConstantSDNode>(Op)) { | 
 |     // We know all of the bits for a constant! | 
 |     return KnownBits::makeConstant(C->getAPIntValue()); | 
 |   } | 
 |   if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) { | 
 |     // We know all of the bits for a constant fp! | 
 |     return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt()); | 
 |   } | 
 |  | 
 |   if (Depth >= MaxRecursionDepth) | 
 |     return Known;  // Limit search depth. | 
 |  | 
 |   KnownBits Known2; | 
 |   unsigned NumElts = DemandedElts.getBitWidth(); | 
 |   assert((!Op.getValueType().isFixedLengthVector() || | 
 |           NumElts == Op.getValueType().getVectorNumElements()) && | 
 |          "Unexpected vector size"); | 
 |  | 
 |   if (!DemandedElts) | 
 |     return Known;  // No demanded elts, better to assume we don't know anything. | 
 |  | 
 |   unsigned Opcode = Op.getOpcode(); | 
 |   switch (Opcode) { | 
 |   case ISD::MERGE_VALUES: | 
 |     return computeKnownBits(Op.getOperand(Op.getResNo()), DemandedElts, | 
 |                             Depth + 1); | 
 |   case ISD::SPLAT_VECTOR: { | 
 |     SDValue SrcOp = Op.getOperand(0); | 
 |     assert(SrcOp.getValueSizeInBits() >= BitWidth && | 
 |            "Expected SPLAT_VECTOR implicit truncation"); | 
 |     // Implicitly truncate the bits to match the official semantics of | 
 |     // SPLAT_VECTOR. | 
 |     Known = computeKnownBits(SrcOp, Depth + 1).trunc(BitWidth); | 
 |     break; | 
 |   } | 
 |   case ISD::SPLAT_VECTOR_PARTS: { | 
 |     unsigned ScalarSize = Op.getOperand(0).getScalarValueSizeInBits(); | 
 |     assert(ScalarSize * Op.getNumOperands() == BitWidth && | 
 |            "Expected SPLAT_VECTOR_PARTS scalars to cover element width"); | 
 |     for (auto [I, SrcOp] : enumerate(Op->ops())) { | 
 |       Known.insertBits(computeKnownBits(SrcOp, Depth + 1), ScalarSize * I); | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::BUILD_VECTOR: | 
 |     assert(!Op.getValueType().isScalableVector()); | 
 |     // Collect the known bits that are shared by every demanded vector element. | 
 |     Known.Zero.setAllBits(); Known.One.setAllBits(); | 
 |     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { | 
 |       if (!DemandedElts[i]) | 
 |         continue; | 
 |  | 
 |       SDValue SrcOp = Op.getOperand(i); | 
 |       Known2 = computeKnownBits(SrcOp, Depth + 1); | 
 |  | 
 |       // BUILD_VECTOR can implicitly truncate sources, we must handle this. | 
 |       if (SrcOp.getValueSizeInBits() != BitWidth) { | 
 |         assert(SrcOp.getValueSizeInBits() > BitWidth && | 
 |                "Expected BUILD_VECTOR implicit truncation"); | 
 |         Known2 = Known2.trunc(BitWidth); | 
 |       } | 
 |  | 
 |       // Known bits are the values that are shared by every demanded element. | 
 |       Known = Known.intersectWith(Known2); | 
 |  | 
 |       // If we don't know any bits, early out. | 
 |       if (Known.isUnknown()) | 
 |         break; | 
 |     } | 
 |     break; | 
 |   case ISD::VECTOR_SHUFFLE: { | 
 |     assert(!Op.getValueType().isScalableVector()); | 
 |     // Collect the known bits that are shared by every vector element referenced | 
 |     // by the shuffle. | 
 |     APInt DemandedLHS, DemandedRHS; | 
 |     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); | 
 |     assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); | 
 |     if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts, | 
 |                                 DemandedLHS, DemandedRHS)) | 
 |       break; | 
 |  | 
 |     // Known bits are the values that are shared by every demanded element. | 
 |     Known.Zero.setAllBits(); Known.One.setAllBits(); | 
 |     if (!!DemandedLHS) { | 
 |       SDValue LHS = Op.getOperand(0); | 
 |       Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1); | 
 |       Known = Known.intersectWith(Known2); | 
 |     } | 
 |     // If we don't know any bits, early out. | 
 |     if (Known.isUnknown()) | 
 |       break; | 
 |     if (!!DemandedRHS) { | 
 |       SDValue RHS = Op.getOperand(1); | 
 |       Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1); | 
 |       Known = Known.intersectWith(Known2); | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::VSCALE: { | 
 |     const Function &F = getMachineFunction().getFunction(); | 
 |     const APInt &Multiplier = Op.getConstantOperandAPInt(0); | 
 |     Known = getVScaleRange(&F, BitWidth).multiply(Multiplier).toKnownBits(); | 
 |     break; | 
 |   } | 
 |   case ISD::CONCAT_VECTORS: { | 
 |     if (Op.getValueType().isScalableVector()) | 
 |       break; | 
 |     // Split DemandedElts and test each of the demanded subvectors. | 
 |     Known.Zero.setAllBits(); Known.One.setAllBits(); | 
 |     EVT SubVectorVT = Op.getOperand(0).getValueType(); | 
 |     unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); | 
 |     unsigned NumSubVectors = Op.getNumOperands(); | 
 |     for (unsigned i = 0; i != NumSubVectors; ++i) { | 
 |       APInt DemandedSub = | 
 |           DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts); | 
 |       if (!!DemandedSub) { | 
 |         SDValue Sub = Op.getOperand(i); | 
 |         Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1); | 
 |         Known = Known.intersectWith(Known2); | 
 |       } | 
 |       // If we don't know any bits, early out. | 
 |       if (Known.isUnknown()) | 
 |         break; | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::INSERT_SUBVECTOR: { | 
 |     if (Op.getValueType().isScalableVector()) | 
 |       break; | 
 |     // Demand any elements from the subvector and the remainder from the src its | 
 |     // inserted into. | 
 |     SDValue Src = Op.getOperand(0); | 
 |     SDValue Sub = Op.getOperand(1); | 
 |     uint64_t Idx = Op.getConstantOperandVal(2); | 
 |     unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); | 
 |     APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); | 
 |     APInt DemandedSrcElts = DemandedElts; | 
 |     DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); | 
 |  | 
 |     Known.One.setAllBits(); | 
 |     Known.Zero.setAllBits(); | 
 |     if (!!DemandedSubElts) { | 
 |       Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1); | 
 |       if (Known.isUnknown()) | 
 |         break; // early-out. | 
 |     } | 
 |     if (!!DemandedSrcElts) { | 
 |       Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1); | 
 |       Known = Known.intersectWith(Known2); | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::EXTRACT_SUBVECTOR: { | 
 |     // Offset the demanded elts by the subvector index. | 
 |     SDValue Src = Op.getOperand(0); | 
 |     // Bail until we can represent demanded elements for scalable vectors. | 
 |     if (Op.getValueType().isScalableVector() || Src.getValueType().isScalableVector()) | 
 |       break; | 
 |     uint64_t Idx = Op.getConstantOperandVal(1); | 
 |     unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); | 
 |     APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); | 
 |     Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1); | 
 |     break; | 
 |   } | 
 |   case ISD::SCALAR_TO_VECTOR: { | 
 |     if (Op.getValueType().isScalableVector()) | 
 |       break; | 
 |     // We know about scalar_to_vector as much as we know about it source, | 
 |     // which becomes the first element of otherwise unknown vector. | 
 |     if (DemandedElts != 1) | 
 |       break; | 
 |  | 
 |     SDValue N0 = Op.getOperand(0); | 
 |     Known = computeKnownBits(N0, Depth + 1); | 
 |     if (N0.getValueSizeInBits() != BitWidth) | 
 |       Known = Known.trunc(BitWidth); | 
 |  | 
 |     break; | 
 |   } | 
 |   case ISD::BITCAST: { | 
 |     if (Op.getValueType().isScalableVector()) | 
 |       break; | 
 |  | 
 |     SDValue N0 = Op.getOperand(0); | 
 |     EVT SubVT = N0.getValueType(); | 
 |     unsigned SubBitWidth = SubVT.getScalarSizeInBits(); | 
 |  | 
 |     // Ignore bitcasts from unsupported types. | 
 |     if (!(SubVT.isInteger() || SubVT.isFloatingPoint())) | 
 |       break; | 
 |  | 
 |     // Fast handling of 'identity' bitcasts. | 
 |     if (BitWidth == SubBitWidth) { | 
 |       Known = computeKnownBits(N0, DemandedElts, Depth + 1); | 
 |       break; | 
 |     } | 
 |  | 
 |     bool IsLE = getDataLayout().isLittleEndian(); | 
 |  | 
 |     // Bitcast 'small element' vector to 'large element' scalar/vector. | 
 |     if ((BitWidth % SubBitWidth) == 0) { | 
 |       assert(N0.getValueType().isVector() && "Expected bitcast from vector"); | 
 |  | 
 |       // Collect known bits for the (larger) output by collecting the known | 
 |       // bits from each set of sub elements and shift these into place. | 
 |       // We need to separately call computeKnownBits for each set of | 
 |       // sub elements as the knownbits for each is likely to be different. | 
 |       unsigned SubScale = BitWidth / SubBitWidth; | 
 |       APInt SubDemandedElts(NumElts * SubScale, 0); | 
 |       for (unsigned i = 0; i != NumElts; ++i) | 
 |         if (DemandedElts[i]) | 
 |           SubDemandedElts.setBit(i * SubScale); | 
 |  | 
 |       for (unsigned i = 0; i != SubScale; ++i) { | 
 |         Known2 = computeKnownBits(N0, SubDemandedElts.shl(i), | 
 |                          Depth + 1); | 
 |         unsigned Shifts = IsLE ? i : SubScale - 1 - i; | 
 |         Known.insertBits(Known2, SubBitWidth * Shifts); | 
 |       } | 
 |     } | 
 |  | 
 |     // Bitcast 'large element' scalar/vector to 'small element' vector. | 
 |     if ((SubBitWidth % BitWidth) == 0) { | 
 |       assert(Op.getValueType().isVector() && "Expected bitcast to vector"); | 
 |  | 
 |       // Collect known bits for the (smaller) output by collecting the known | 
 |       // bits from the overlapping larger input elements and extracting the | 
 |       // sub sections we actually care about. | 
 |       unsigned SubScale = SubBitWidth / BitWidth; | 
 |       APInt SubDemandedElts = | 
 |           APIntOps::ScaleBitMask(DemandedElts, NumElts / SubScale); | 
 |       Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1); | 
 |  | 
 |       Known.Zero.setAllBits(); Known.One.setAllBits(); | 
 |       for (unsigned i = 0; i != NumElts; ++i) | 
 |         if (DemandedElts[i]) { | 
 |           unsigned Shifts = IsLE ? i : NumElts - 1 - i; | 
 |           unsigned Offset = (Shifts % SubScale) * BitWidth; | 
 |           Known = Known.intersectWith(Known2.extractBits(BitWidth, Offset)); | 
 |           // If we don't know any bits, early out. | 
 |           if (Known.isUnknown()) | 
 |             break; | 
 |         } | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::AND: | 
 |     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |  | 
 |     Known &= Known2; | 
 |     break; | 
 |   case ISD::OR: | 
 |     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |  | 
 |     Known |= Known2; | 
 |     break; | 
 |   case ISD::XOR: | 
 |     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |  | 
 |     Known ^= Known2; | 
 |     break; | 
 |   case ISD::MUL: { | 
 |     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1); | 
 |     // TODO: SelfMultiply can be poison, but not undef. | 
 |     if (SelfMultiply) | 
 |       SelfMultiply &= isGuaranteedNotToBeUndefOrPoison( | 
 |           Op.getOperand(0), DemandedElts, false, Depth + 1); | 
 |     Known = KnownBits::mul(Known, Known2, SelfMultiply); | 
 |  | 
 |     // If the multiplication is known not to overflow, the product of a number | 
 |     // with itself is non-negative. Only do this if we didn't already computed | 
 |     // the opposite value for the sign bit. | 
 |     if (Op->getFlags().hasNoSignedWrap() && | 
 |         Op.getOperand(0) == Op.getOperand(1) && | 
 |         !Known.isNegative()) | 
 |       Known.makeNonNegative(); | 
 |     break; | 
 |   } | 
 |   case ISD::MULHU: { | 
 |     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::mulhu(Known, Known2); | 
 |     break; | 
 |   } | 
 |   case ISD::MULHS: { | 
 |     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::mulhs(Known, Known2); | 
 |     break; | 
 |   } | 
 |   case ISD::UMUL_LOHI: { | 
 |     assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result"); | 
 |     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1); | 
 |     if (Op.getResNo() == 0) | 
 |       Known = KnownBits::mul(Known, Known2, SelfMultiply); | 
 |     else | 
 |       Known = KnownBits::mulhu(Known, Known2); | 
 |     break; | 
 |   } | 
 |   case ISD::SMUL_LOHI: { | 
 |     assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result"); | 
 |     Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     bool SelfMultiply = Op.getOperand(0) == Op.getOperand(1); | 
 |     if (Op.getResNo() == 0) | 
 |       Known = KnownBits::mul(Known, Known2, SelfMultiply); | 
 |     else | 
 |       Known = KnownBits::mulhs(Known, Known2); | 
 |     break; | 
 |   } | 
 |   case ISD::AVGCEILU: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = Known.zext(BitWidth + 1); | 
 |     Known2 = Known2.zext(BitWidth + 1); | 
 |     KnownBits One = KnownBits::makeConstant(APInt(1, 1)); | 
 |     Known = KnownBits::computeForAddCarry(Known, Known2, One); | 
 |     Known = Known.extractBits(BitWidth, 1); | 
 |     break; | 
 |   } | 
 |   case ISD::SELECT: | 
 |   case ISD::VSELECT: | 
 |     Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); | 
 |     // If we don't know any bits, early out. | 
 |     if (Known.isUnknown()) | 
 |       break; | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1); | 
 |  | 
 |     // Only known if known in both the LHS and RHS. | 
 |     Known = Known.intersectWith(Known2); | 
 |     break; | 
 |   case ISD::SELECT_CC: | 
 |     Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1); | 
 |     // If we don't know any bits, early out. | 
 |     if (Known.isUnknown()) | 
 |       break; | 
 |     Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1); | 
 |  | 
 |     // Only known if known in both the LHS and RHS. | 
 |     Known = Known.intersectWith(Known2); | 
 |     break; | 
 |   case ISD::SMULO: | 
 |   case ISD::UMULO: | 
 |     if (Op.getResNo() != 1) | 
 |       break; | 
 |     // The boolean result conforms to getBooleanContents. | 
 |     // If we know the result of a setcc has the top bits zero, use this info. | 
 |     // We know that we have an integer-based boolean since these operations | 
 |     // are only available for integer. | 
 |     if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == | 
 |             TargetLowering::ZeroOrOneBooleanContent && | 
 |         BitWidth > 1) | 
 |       Known.Zero.setBitsFrom(1); | 
 |     break; | 
 |   case ISD::SETCC: | 
 |   case ISD::SETCCCARRY: | 
 |   case ISD::STRICT_FSETCC: | 
 |   case ISD::STRICT_FSETCCS: { | 
 |     unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; | 
 |     // If we know the result of a setcc has the top bits zero, use this info. | 
 |     if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == | 
 |             TargetLowering::ZeroOrOneBooleanContent && | 
 |         BitWidth > 1) | 
 |       Known.Zero.setBitsFrom(1); | 
 |     break; | 
 |   } | 
 |   case ISD::SHL: | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::shl(Known, Known2); | 
 |  | 
 |     // Minimum shift low bits are known zero. | 
 |     if (const APInt *ShMinAmt = | 
 |             getValidMinimumShiftAmountConstant(Op, DemandedElts)) | 
 |       Known.Zero.setLowBits(ShMinAmt->getZExtValue()); | 
 |     break; | 
 |   case ISD::SRL: | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::lshr(Known, Known2); | 
 |  | 
 |     // Minimum shift high bits are known zero. | 
 |     if (const APInt *ShMinAmt = | 
 |             getValidMinimumShiftAmountConstant(Op, DemandedElts)) | 
 |       Known.Zero.setHighBits(ShMinAmt->getZExtValue()); | 
 |     break; | 
 |   case ISD::SRA: | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::ashr(Known, Known2); | 
 |     break; | 
 |   case ISD::FSHL: | 
 |   case ISD::FSHR: | 
 |     if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) { | 
 |       unsigned Amt = C->getAPIntValue().urem(BitWidth); | 
 |  | 
 |       // For fshl, 0-shift returns the 1st arg. | 
 |       // For fshr, 0-shift returns the 2nd arg. | 
 |       if (Amt == 0) { | 
 |         Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1), | 
 |                                  DemandedElts, Depth + 1); | 
 |         break; | 
 |       } | 
 |  | 
 |       // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) | 
 |       // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) | 
 |       Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |       Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |       if (Opcode == ISD::FSHL) { | 
 |         Known.One <<= Amt; | 
 |         Known.Zero <<= Amt; | 
 |         Known2.One.lshrInPlace(BitWidth - Amt); | 
 |         Known2.Zero.lshrInPlace(BitWidth - Amt); | 
 |       } else { | 
 |         Known.One <<= BitWidth - Amt; | 
 |         Known.Zero <<= BitWidth - Amt; | 
 |         Known2.One.lshrInPlace(Amt); | 
 |         Known2.Zero.lshrInPlace(Amt); | 
 |       } | 
 |       Known = Known.unionWith(Known2); | 
 |     } | 
 |     break; | 
 |   case ISD::SHL_PARTS: | 
 |   case ISD::SRA_PARTS: | 
 |   case ISD::SRL_PARTS: { | 
 |     assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result"); | 
 |  | 
 |     // Collect lo/hi source values and concatenate. | 
 |     unsigned LoBits = Op.getOperand(0).getScalarValueSizeInBits(); | 
 |     unsigned HiBits = Op.getOperand(1).getScalarValueSizeInBits(); | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = Known2.concat(Known); | 
 |  | 
 |     // Collect shift amount. | 
 |     Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); | 
 |  | 
 |     if (Opcode == ISD::SHL_PARTS) | 
 |       Known = KnownBits::shl(Known, Known2); | 
 |     else if (Opcode == ISD::SRA_PARTS) | 
 |       Known = KnownBits::ashr(Known, Known2); | 
 |     else // if (Opcode == ISD::SRL_PARTS) | 
 |       Known = KnownBits::lshr(Known, Known2); | 
 |  | 
 |     // TODO: Minimum shift low/high bits are known zero. | 
 |  | 
 |     if (Op.getResNo() == 0) | 
 |       Known = Known.extractBits(LoBits, 0); | 
 |     else | 
 |       Known = Known.extractBits(HiBits, LoBits); | 
 |     break; | 
 |   } | 
 |   case ISD::SIGN_EXTEND_INREG: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); | 
 |     Known = Known.sextInReg(EVT.getScalarSizeInBits()); | 
 |     break; | 
 |   } | 
 |   case ISD::CTTZ: | 
 |   case ISD::CTTZ_ZERO_UNDEF: { | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     // If we have a known 1, its position is our upper bound. | 
 |     unsigned PossibleTZ = Known2.countMaxTrailingZeros(); | 
 |     unsigned LowBits = llvm::bit_width(PossibleTZ); | 
 |     Known.Zero.setBitsFrom(LowBits); | 
 |     break; | 
 |   } | 
 |   case ISD::CTLZ: | 
 |   case ISD::CTLZ_ZERO_UNDEF: { | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     // If we have a known 1, its position is our upper bound. | 
 |     unsigned PossibleLZ = Known2.countMaxLeadingZeros(); | 
 |     unsigned LowBits = llvm::bit_width(PossibleLZ); | 
 |     Known.Zero.setBitsFrom(LowBits); | 
 |     break; | 
 |   } | 
 |   case ISD::CTPOP: { | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     // If we know some of the bits are zero, they can't be one. | 
 |     unsigned PossibleOnes = Known2.countMaxPopulation(); | 
 |     Known.Zero.setBitsFrom(llvm::bit_width(PossibleOnes)); | 
 |     break; | 
 |   } | 
 |   case ISD::PARITY: { | 
 |     // Parity returns 0 everywhere but the LSB. | 
 |     Known.Zero.setBitsFrom(1); | 
 |     break; | 
 |   } | 
 |   case ISD::LOAD: { | 
 |     LoadSDNode *LD = cast<LoadSDNode>(Op); | 
 |     const Constant *Cst = TLI->getTargetConstantFromLoad(LD); | 
 |     if (ISD::isNON_EXTLoad(LD) && Cst) { | 
 |       // Determine any common known bits from the loaded constant pool value. | 
 |       Type *CstTy = Cst->getType(); | 
 |       if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits() && | 
 |           !Op.getValueType().isScalableVector()) { | 
 |         // If its a vector splat, then we can (quickly) reuse the scalar path. | 
 |         // NOTE: We assume all elements match and none are UNDEF. | 
 |         if (CstTy->isVectorTy()) { | 
 |           if (const Constant *Splat = Cst->getSplatValue()) { | 
 |             Cst = Splat; | 
 |             CstTy = Cst->getType(); | 
 |           } | 
 |         } | 
 |         // TODO - do we need to handle different bitwidths? | 
 |         if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) { | 
 |           // Iterate across all vector elements finding common known bits. | 
 |           Known.One.setAllBits(); | 
 |           Known.Zero.setAllBits(); | 
 |           for (unsigned i = 0; i != NumElts; ++i) { | 
 |             if (!DemandedElts[i]) | 
 |               continue; | 
 |             if (Constant *Elt = Cst->getAggregateElement(i)) { | 
 |               if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { | 
 |                 const APInt &Value = CInt->getValue(); | 
 |                 Known.One &= Value; | 
 |                 Known.Zero &= ~Value; | 
 |                 continue; | 
 |               } | 
 |               if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { | 
 |                 APInt Value = CFP->getValueAPF().bitcastToAPInt(); | 
 |                 Known.One &= Value; | 
 |                 Known.Zero &= ~Value; | 
 |                 continue; | 
 |               } | 
 |             } | 
 |             Known.One.clearAllBits(); | 
 |             Known.Zero.clearAllBits(); | 
 |             break; | 
 |           } | 
 |         } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) { | 
 |           if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { | 
 |             Known = KnownBits::makeConstant(CInt->getValue()); | 
 |           } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { | 
 |             Known = | 
 |                 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt()); | 
 |           } | 
 |         } | 
 |       } | 
 |     } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { | 
 |       // If this is a ZEXTLoad and we are looking at the loaded value. | 
 |       EVT VT = LD->getMemoryVT(); | 
 |       unsigned MemBits = VT.getScalarSizeInBits(); | 
 |       Known.Zero.setBitsFrom(MemBits); | 
 |     } else if (const MDNode *Ranges = LD->getRanges()) { | 
 |       EVT VT = LD->getValueType(0); | 
 |  | 
 |       // TODO: Handle for extending loads | 
 |       if (LD->getExtensionType() == ISD::NON_EXTLOAD) { | 
 |         if (VT.isVector()) { | 
 |           // Handle truncation to the first demanded element. | 
 |           // TODO: Figure out which demanded elements are covered | 
 |           if (DemandedElts != 1 || !getDataLayout().isLittleEndian()) | 
 |             break; | 
 |  | 
 |           // Handle the case where a load has a vector type, but scalar memory | 
 |           // with an attached range. | 
 |           EVT MemVT = LD->getMemoryVT(); | 
 |           KnownBits KnownFull(MemVT.getSizeInBits()); | 
 |  | 
 |           computeKnownBitsFromRangeMetadata(*Ranges, KnownFull); | 
 |           Known = KnownFull.trunc(BitWidth); | 
 |         } else | 
 |           computeKnownBitsFromRangeMetadata(*Ranges, Known); | 
 |       } | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::ZERO_EXTEND_VECTOR_INREG: { | 
 |     if (Op.getValueType().isScalableVector()) | 
 |       break; | 
 |     EVT InVT = Op.getOperand(0).getValueType(); | 
 |     APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements()); | 
 |     Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); | 
 |     Known = Known.zext(BitWidth); | 
 |     break; | 
 |   } | 
 |   case ISD::ZERO_EXTEND: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known = Known.zext(BitWidth); | 
 |     break; | 
 |   } | 
 |   case ISD::SIGN_EXTEND_VECTOR_INREG: { | 
 |     if (Op.getValueType().isScalableVector()) | 
 |       break; | 
 |     EVT InVT = Op.getOperand(0).getValueType(); | 
 |     APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements()); | 
 |     Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); | 
 |     // If the sign bit is known to be zero or one, then sext will extend | 
 |     // it to the top bits, else it will just zext. | 
 |     Known = Known.sext(BitWidth); | 
 |     break; | 
 |   } | 
 |   case ISD::SIGN_EXTEND: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     // If the sign bit is known to be zero or one, then sext will extend | 
 |     // it to the top bits, else it will just zext. | 
 |     Known = Known.sext(BitWidth); | 
 |     break; | 
 |   } | 
 |   case ISD::ANY_EXTEND_VECTOR_INREG: { | 
 |     if (Op.getValueType().isScalableVector()) | 
 |       break; | 
 |     EVT InVT = Op.getOperand(0).getValueType(); | 
 |     APInt InDemandedElts = DemandedElts.zext(InVT.getVectorNumElements()); | 
 |     Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); | 
 |     Known = Known.anyext(BitWidth); | 
 |     break; | 
 |   } | 
 |   case ISD::ANY_EXTEND: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known = Known.anyext(BitWidth); | 
 |     break; | 
 |   } | 
 |   case ISD::TRUNCATE: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known = Known.trunc(BitWidth); | 
 |     break; | 
 |   } | 
 |   case ISD::AssertZext: { | 
 |     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); | 
 |     APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known.Zero |= (~InMask); | 
 |     Known.One  &= (~Known.Zero); | 
 |     break; | 
 |   } | 
 |   case ISD::AssertAlign: { | 
 |     unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign()); | 
 |     assert(LogOfAlign != 0); | 
 |  | 
 |     // TODO: Should use maximum with source | 
 |     // If a node is guaranteed to be aligned, set low zero bits accordingly as | 
 |     // well as clearing one bits. | 
 |     Known.Zero.setLowBits(LogOfAlign); | 
 |     Known.One.clearLowBits(LogOfAlign); | 
 |     break; | 
 |   } | 
 |   case ISD::FGETSIGN: | 
 |     // All bits are zero except the low bit. | 
 |     Known.Zero.setBitsFrom(1); | 
 |     break; | 
 |   case ISD::ADD: | 
 |   case ISD::SUB: { | 
 |     SDNodeFlags Flags = Op.getNode()->getFlags(); | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::computeForAddSub(Op.getOpcode() == ISD::ADD, | 
 |                                         Flags.hasNoSignedWrap(), Known, Known2); | 
 |     break; | 
 |   } | 
 |   case ISD::USUBO: | 
 |   case ISD::SSUBO: | 
 |   case ISD::USUBO_CARRY: | 
 |   case ISD::SSUBO_CARRY: | 
 |     if (Op.getResNo() == 1) { | 
 |       // If we know the result of a setcc has the top bits zero, use this info. | 
 |       if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == | 
 |               TargetLowering::ZeroOrOneBooleanContent && | 
 |           BitWidth > 1) | 
 |         Known.Zero.setBitsFrom(1); | 
 |       break; | 
 |     } | 
 |     [[fallthrough]]; | 
 |   case ISD::SUBC: { | 
 |     assert(Op.getResNo() == 0 && | 
 |            "We only compute knownbits for the difference here."); | 
 |  | 
 |     // With USUBO_CARRY and SSUBO_CARRY a borrow bit may be added in. | 
 |     KnownBits Borrow(1); | 
 |     if (Opcode == ISD::USUBO_CARRY || Opcode == ISD::SSUBO_CARRY) { | 
 |       Borrow = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); | 
 |       // Borrow has bit width 1 | 
 |       Borrow = Borrow.trunc(1); | 
 |     } else { | 
 |       Borrow.setAllZero(); | 
 |     } | 
 |  | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::computeForSubBorrow(Known, Known2, Borrow); | 
 |     break; | 
 |   } | 
 |   case ISD::UADDO: | 
 |   case ISD::SADDO: | 
 |   case ISD::UADDO_CARRY: | 
 |   case ISD::SADDO_CARRY: | 
 |     if (Op.getResNo() == 1) { | 
 |       // If we know the result of a setcc has the top bits zero, use this info. | 
 |       if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) == | 
 |               TargetLowering::ZeroOrOneBooleanContent && | 
 |           BitWidth > 1) | 
 |         Known.Zero.setBitsFrom(1); | 
 |       break; | 
 |     } | 
 |     [[fallthrough]]; | 
 |   case ISD::ADDC: | 
 |   case ISD::ADDE: { | 
 |     assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here."); | 
 |  | 
 |     // With ADDE and UADDO_CARRY, a carry bit may be added in. | 
 |     KnownBits Carry(1); | 
 |     if (Opcode == ISD::ADDE) | 
 |       // Can't track carry from glue, set carry to unknown. | 
 |       Carry.resetAll(); | 
 |     else if (Opcode == ISD::UADDO_CARRY || Opcode == ISD::SADDO_CARRY) { | 
 |       Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1); | 
 |       // Carry has bit width 1 | 
 |       Carry = Carry.trunc(1); | 
 |     } else { | 
 |       Carry.setAllZero(); | 
 |     } | 
 |  | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::computeForAddCarry(Known, Known2, Carry); | 
 |     break; | 
 |   } | 
 |   case ISD::UDIV: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::udiv(Known, Known2, Op->getFlags().hasExact()); | 
 |     break; | 
 |   } | 
 |   case ISD::SDIV: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::sdiv(Known, Known2, Op->getFlags().hasExact()); | 
 |     break; | 
 |   } | 
 |   case ISD::SREM: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::srem(Known, Known2); | 
 |     break; | 
 |   } | 
 |   case ISD::UREM: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::urem(Known, Known2); | 
 |     break; | 
 |   } | 
 |   case ISD::EXTRACT_ELEMENT: { | 
 |     Known = computeKnownBits(Op.getOperand(0), Depth+1); | 
 |     const unsigned Index = Op.getConstantOperandVal(1); | 
 |     const unsigned EltBitWidth = Op.getValueSizeInBits(); | 
 |  | 
 |     // Remove low part of known bits mask | 
 |     Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth); | 
 |     Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth); | 
 |  | 
 |     // Remove high part of known bit mask | 
 |     Known = Known.trunc(EltBitWidth); | 
 |     break; | 
 |   } | 
 |   case ISD::EXTRACT_VECTOR_ELT: { | 
 |     SDValue InVec = Op.getOperand(0); | 
 |     SDValue EltNo = Op.getOperand(1); | 
 |     EVT VecVT = InVec.getValueType(); | 
 |     // computeKnownBits not yet implemented for scalable vectors. | 
 |     if (VecVT.isScalableVector()) | 
 |       break; | 
 |     const unsigned EltBitWidth = VecVT.getScalarSizeInBits(); | 
 |     const unsigned NumSrcElts = VecVT.getVectorNumElements(); | 
 |  | 
 |     // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know | 
 |     // anything about the extended bits. | 
 |     if (BitWidth > EltBitWidth) | 
 |       Known = Known.trunc(EltBitWidth); | 
 |  | 
 |     // If we know the element index, just demand that vector element, else for | 
 |     // an unknown element index, ignore DemandedElts and demand them all. | 
 |     APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts); | 
 |     auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); | 
 |     if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) | 
 |       DemandedSrcElts = | 
 |           APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); | 
 |  | 
 |     Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1); | 
 |     if (BitWidth > EltBitWidth) | 
 |       Known = Known.anyext(BitWidth); | 
 |     break; | 
 |   } | 
 |   case ISD::INSERT_VECTOR_ELT: { | 
 |     if (Op.getValueType().isScalableVector()) | 
 |       break; | 
 |  | 
 |     // If we know the element index, split the demand between the | 
 |     // source vector and the inserted element, otherwise assume we need | 
 |     // the original demanded vector elements and the value. | 
 |     SDValue InVec = Op.getOperand(0); | 
 |     SDValue InVal = Op.getOperand(1); | 
 |     SDValue EltNo = Op.getOperand(2); | 
 |     bool DemandedVal = true; | 
 |     APInt DemandedVecElts = DemandedElts; | 
 |     auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); | 
 |     if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { | 
 |       unsigned EltIdx = CEltNo->getZExtValue(); | 
 |       DemandedVal = !!DemandedElts[EltIdx]; | 
 |       DemandedVecElts.clearBit(EltIdx); | 
 |     } | 
 |     Known.One.setAllBits(); | 
 |     Known.Zero.setAllBits(); | 
 |     if (DemandedVal) { | 
 |       Known2 = computeKnownBits(InVal, Depth + 1); | 
 |       Known = Known.intersectWith(Known2.zextOrTrunc(BitWidth)); | 
 |     } | 
 |     if (!!DemandedVecElts) { | 
 |       Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1); | 
 |       Known = Known.intersectWith(Known2); | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::BITREVERSE: { | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known = Known2.reverseBits(); | 
 |     break; | 
 |   } | 
 |   case ISD::BSWAP: { | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known = Known2.byteSwap(); | 
 |     break; | 
 |   } | 
 |   case ISD::ABS: { | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known = Known2.abs(); | 
 |     break; | 
 |   } | 
 |   case ISD::USUBSAT: { | 
 |     // The result of usubsat will never be larger than the LHS. | 
 |     Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known.Zero.setHighBits(Known2.countMinLeadingZeros()); | 
 |     break; | 
 |   } | 
 |   case ISD::UMIN: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::umin(Known, Known2); | 
 |     break; | 
 |   } | 
 |   case ISD::UMAX: { | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     Known = KnownBits::umax(Known, Known2); | 
 |     break; | 
 |   } | 
 |   case ISD::SMIN: | 
 |   case ISD::SMAX: { | 
 |     // If we have a clamp pattern, we know that the number of sign bits will be | 
 |     // the minimum of the clamp min/max range. | 
 |     bool IsMax = (Opcode == ISD::SMAX); | 
 |     ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; | 
 |     if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) | 
 |       if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) | 
 |         CstHigh = | 
 |             isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); | 
 |     if (CstLow && CstHigh) { | 
 |       if (!IsMax) | 
 |         std::swap(CstLow, CstHigh); | 
 |  | 
 |       const APInt &ValueLow = CstLow->getAPIntValue(); | 
 |       const APInt &ValueHigh = CstHigh->getAPIntValue(); | 
 |       if (ValueLow.sle(ValueHigh)) { | 
 |         unsigned LowSignBits = ValueLow.getNumSignBits(); | 
 |         unsigned HighSignBits = ValueHigh.getNumSignBits(); | 
 |         unsigned MinSignBits = std::min(LowSignBits, HighSignBits); | 
 |         if (ValueLow.isNegative() && ValueHigh.isNegative()) { | 
 |           Known.One.setHighBits(MinSignBits); | 
 |           break; | 
 |         } | 
 |         if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) { | 
 |           Known.Zero.setHighBits(MinSignBits); | 
 |           break; | 
 |         } | 
 |       } | 
 |     } | 
 |  | 
 |     Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     if (IsMax) | 
 |       Known = KnownBits::smax(Known, Known2); | 
 |     else | 
 |       Known = KnownBits::smin(Known, Known2); | 
 |  | 
 |     // For SMAX, if CstLow is non-negative we know the result will be | 
 |     // non-negative and thus all sign bits are 0. | 
 |     // TODO: There's an equivalent of this for smin with negative constant for | 
 |     // known ones. | 
 |     if (IsMax && CstLow) { | 
 |       const APInt &ValueLow = CstLow->getAPIntValue(); | 
 |       if (ValueLow.isNonNegative()) { | 
 |         unsigned SignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); | 
 |         Known.Zero.setHighBits(std::min(SignBits, ValueLow.getNumSignBits())); | 
 |       } | 
 |     } | 
 |  | 
 |     break; | 
 |   } | 
 |   case ISD::FP_TO_UINT_SAT: { | 
 |     // FP_TO_UINT_SAT produces an unsigned value that fits in the saturating VT. | 
 |     EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); | 
 |     Known.Zero |= APInt::getBitsSetFrom(BitWidth, VT.getScalarSizeInBits()); | 
 |     break; | 
 |   } | 
 |   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: | 
 |     if (Op.getResNo() == 1) { | 
 |       // The boolean result conforms to getBooleanContents. | 
 |       // If we know the result of a setcc has the top bits zero, use this info. | 
 |       // We know that we have an integer-based boolean since these operations | 
 |       // are only available for integer. | 
 |       if (TLI->getBooleanContents(Op.getValueType().isVector(), false) == | 
 |               TargetLowering::ZeroOrOneBooleanContent && | 
 |           BitWidth > 1) | 
 |         Known.Zero.setBitsFrom(1); | 
 |       break; | 
 |     } | 
 |     [[fallthrough]]; | 
 |   case ISD::ATOMIC_CMP_SWAP: | 
 |   case ISD::ATOMIC_SWAP: | 
 |   case ISD::ATOMIC_LOAD_ADD: | 
 |   case ISD::ATOMIC_LOAD_SUB: | 
 |   case ISD::ATOMIC_LOAD_AND: | 
 |   case ISD::ATOMIC_LOAD_CLR: | 
 |   case ISD::ATOMIC_LOAD_OR: | 
 |   case ISD::ATOMIC_LOAD_XOR: | 
 |   case ISD::ATOMIC_LOAD_NAND: | 
 |   case ISD::ATOMIC_LOAD_MIN: | 
 |   case ISD::ATOMIC_LOAD_MAX: | 
 |   case ISD::ATOMIC_LOAD_UMIN: | 
 |   case ISD::ATOMIC_LOAD_UMAX: | 
 |   case ISD::ATOMIC_LOAD: { | 
 |     unsigned MemBits = | 
 |         cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits(); | 
 |     // If we are looking at the loaded value. | 
 |     if (Op.getResNo() == 0) { | 
 |       if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND) | 
 |         Known.Zero.setBitsFrom(MemBits); | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::FrameIndex: | 
 |   case ISD::TargetFrameIndex: | 
 |     TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(), | 
 |                                        Known, getMachineFunction()); | 
 |     break; | 
 |  | 
 |   default: | 
 |     if (Opcode < ISD::BUILTIN_OP_END) | 
 |       break; | 
 |     [[fallthrough]]; | 
 |   case ISD::INTRINSIC_WO_CHAIN: | 
 |   case ISD::INTRINSIC_W_CHAIN: | 
 |   case ISD::INTRINSIC_VOID: | 
 |     // TODO: Probably okay to remove after audit; here to reduce change size | 
 |     // in initial enablement patch for scalable vectors | 
 |     if (Op.getValueType().isScalableVector()) | 
 |       break; | 
 |  | 
 |     // Allow the target to implement this method for its nodes. | 
 |     TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth); | 
 |     break; | 
 |   } | 
 |  | 
 |   assert(!Known.hasConflict() && "Bits known to be one AND zero?"); | 
 |   return Known; | 
 | } | 
 |  | 
 | /// Convert ConstantRange OverflowResult into SelectionDAG::OverflowKind. | 
 | static SelectionDAG::OverflowKind mapOverflowResult(ConstantRange::OverflowResult OR) { | 
 |   switch (OR) { | 
 |   case ConstantRange::OverflowResult::MayOverflow: | 
 |     return SelectionDAG::OFK_Sometime; | 
 |   case ConstantRange::OverflowResult::AlwaysOverflowsLow: | 
 |   case ConstantRange::OverflowResult::AlwaysOverflowsHigh: | 
 |     return SelectionDAG::OFK_Always; | 
 |   case ConstantRange::OverflowResult::NeverOverflows: | 
 |     return SelectionDAG::OFK_Never; | 
 |   } | 
 |   llvm_unreachable("Unknown OverflowResult"); | 
 | } | 
 |  | 
 | SelectionDAG::OverflowKind | 
 | SelectionDAG::computeOverflowForSignedAdd(SDValue N0, SDValue N1) const { | 
 |   // X + 0 never overflow | 
 |   if (isNullConstant(N1)) | 
 |     return OFK_Never; | 
 |  | 
 |   // If both operands each have at least two sign bits, the addition | 
 |   // cannot overflow. | 
 |   if (ComputeNumSignBits(N0) > 1 && ComputeNumSignBits(N1) > 1) | 
 |     return OFK_Never; | 
 |  | 
 |   // TODO: Add ConstantRange::signedAddMayOverflow handling. | 
 |   return OFK_Sometime; | 
 | } | 
 |  | 
 | SelectionDAG::OverflowKind | 
 | SelectionDAG::computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const { | 
 |   // X + 0 never overflow | 
 |   if (isNullConstant(N1)) | 
 |     return OFK_Never; | 
 |  | 
 |   // mulhi + 1 never overflow | 
 |   KnownBits N1Known = computeKnownBits(N1); | 
 |   if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 && | 
 |       N1Known.getMaxValue().ult(2)) | 
 |     return OFK_Never; | 
 |  | 
 |   KnownBits N0Known = computeKnownBits(N0); | 
 |   if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1 && | 
 |       N0Known.getMaxValue().ult(2)) | 
 |     return OFK_Never; | 
 |  | 
 |   // Fallback to ConstantRange::unsignedAddMayOverflow handling. | 
 |   ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, false); | 
 |   ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, false); | 
 |   return mapOverflowResult(N0Range.unsignedAddMayOverflow(N1Range)); | 
 | } | 
 |  | 
 | SelectionDAG::OverflowKind | 
 | SelectionDAG::computeOverflowForSignedSub(SDValue N0, SDValue N1) const { | 
 |   // X - 0 never overflow | 
 |   if (isNullConstant(N1)) | 
 |     return OFK_Never; | 
 |  | 
 |   // If both operands each have at least two sign bits, the subtraction | 
 |   // cannot overflow. | 
 |   if (ComputeNumSignBits(N0) > 1 && ComputeNumSignBits(N1) > 1) | 
 |     return OFK_Never; | 
 |  | 
 |   KnownBits N0Known = computeKnownBits(N0); | 
 |   KnownBits N1Known = computeKnownBits(N1); | 
 |   ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, true); | 
 |   ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, true); | 
 |   return mapOverflowResult(N0Range.signedSubMayOverflow(N1Range)); | 
 | } | 
 |  | 
 | SelectionDAG::OverflowKind | 
 | SelectionDAG::computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const { | 
 |   // X - 0 never overflow | 
 |   if (isNullConstant(N1)) | 
 |     return OFK_Never; | 
 |  | 
 |   KnownBits N0Known = computeKnownBits(N0); | 
 |   KnownBits N1Known = computeKnownBits(N1); | 
 |   ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, false); | 
 |   ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, false); | 
 |   return mapOverflowResult(N0Range.unsignedSubMayOverflow(N1Range)); | 
 | } | 
 |  | 
 | SelectionDAG::OverflowKind | 
 | SelectionDAG::computeOverflowForUnsignedMul(SDValue N0, SDValue N1) const { | 
 |   // X * 0 and X * 1 never overflow. | 
 |   if (isNullConstant(N1) || isOneConstant(N1)) | 
 |     return OFK_Never; | 
 |  | 
 |   KnownBits N0Known = computeKnownBits(N0); | 
 |   KnownBits N1Known = computeKnownBits(N1); | 
 |   ConstantRange N0Range = ConstantRange::fromKnownBits(N0Known, false); | 
 |   ConstantRange N1Range = ConstantRange::fromKnownBits(N1Known, false); | 
 |   return mapOverflowResult(N0Range.unsignedMulMayOverflow(N1Range)); | 
 | } | 
 |  | 
 | SelectionDAG::OverflowKind | 
 | SelectionDAG::computeOverflowForSignedMul(SDValue N0, SDValue N1) const { | 
 |   // X * 0 and X * 1 never overflow. | 
 |   if (isNullConstant(N1) || isOneConstant(N1)) | 
 |     return OFK_Never; | 
 |  | 
 |   // Get the size of the result. | 
 |   unsigned BitWidth = N0.getScalarValueSizeInBits(); | 
 |  | 
 |   // Sum of the sign bits. | 
 |   unsigned SignBits = ComputeNumSignBits(N0) + ComputeNumSignBits(N1); | 
 |  | 
 |   // If we have enough sign bits, then there's no overflow. | 
 |   if (SignBits > BitWidth + 1) | 
 |     return OFK_Never; | 
 |  | 
 |   if (SignBits == BitWidth + 1) { | 
 |     // The overflow occurs when the true multiplication of the | 
 |     // the operands is the minimum negative number. | 
 |     KnownBits N0Known = computeKnownBits(N0); | 
 |     KnownBits N1Known = computeKnownBits(N1); | 
 |     // If one of the operands is non-negative, then there's no | 
 |     // overflow. | 
 |     if (N0Known.isNonNegative() || N1Known.isNonNegative()) | 
 |       return OFK_Never; | 
 |   } | 
 |  | 
 |   return OFK_Sometime; | 
 | } | 
 |  | 
 | bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth) const { | 
 |   if (Depth >= MaxRecursionDepth) | 
 |     return false; // Limit search depth. | 
 |  | 
 |   EVT OpVT = Val.getValueType(); | 
 |   unsigned BitWidth = OpVT.getScalarSizeInBits(); | 
 |  | 
 |   // Is the constant a known power of 2? | 
 |   if (ISD::matchUnaryPredicate(Val, [BitWidth](ConstantSDNode *C) { | 
 |         return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); | 
 |       })) | 
 |     return true; | 
 |  | 
 |   // A left-shift of a constant one will have exactly one bit set because | 
 |   // shifting the bit off the end is undefined. | 
 |   if (Val.getOpcode() == ISD::SHL) { | 
 |     auto *C = isConstOrConstSplat(Val.getOperand(0)); | 
 |     if (C && C->getAPIntValue() == 1) | 
 |       return true; | 
 |     return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1) && | 
 |            isKnownNeverZero(Val, Depth); | 
 |   } | 
 |  | 
 |   // Similarly, a logical right-shift of a constant sign-bit will have exactly | 
 |   // one bit set. | 
 |   if (Val.getOpcode() == ISD::SRL) { | 
 |     auto *C = isConstOrConstSplat(Val.getOperand(0)); | 
 |     if (C && C->getAPIntValue().isSignMask()) | 
 |       return true; | 
 |     return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1) && | 
 |            isKnownNeverZero(Val, Depth); | 
 |   } | 
 |  | 
 |   if (Val.getOpcode() == ISD::ROTL || Val.getOpcode() == ISD::ROTR) | 
 |     return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1); | 
 |  | 
 |   // Are all operands of a build vector constant powers of two? | 
 |   if (Val.getOpcode() == ISD::BUILD_VECTOR) | 
 |     if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) { | 
 |           if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E)) | 
 |             return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2(); | 
 |           return false; | 
 |         })) | 
 |       return true; | 
 |  | 
 |   // Is the operand of a splat vector a constant power of two? | 
 |   if (Val.getOpcode() == ISD::SPLAT_VECTOR) | 
 |     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val->getOperand(0))) | 
 |       if (C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2()) | 
 |         return true; | 
 |  | 
 |   // vscale(power-of-two) is a power-of-two for some targets | 
 |   if (Val.getOpcode() == ISD::VSCALE && | 
 |       getTargetLoweringInfo().isVScaleKnownToBeAPowerOfTwo() && | 
 |       isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1)) | 
 |     return true; | 
 |  | 
 |   if (Val.getOpcode() == ISD::SMIN || Val.getOpcode() == ISD::SMAX || | 
 |       Val.getOpcode() == ISD::UMIN || Val.getOpcode() == ISD::UMAX) | 
 |     return isKnownToBeAPowerOfTwo(Val.getOperand(1), Depth + 1) && | 
 |            isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1); | 
 |  | 
 |   if (Val.getOpcode() == ISD::SELECT || Val.getOpcode() == ISD::VSELECT) | 
 |     return isKnownToBeAPowerOfTwo(Val.getOperand(2), Depth + 1) && | 
 |            isKnownToBeAPowerOfTwo(Val.getOperand(1), Depth + 1); | 
 |  | 
 |   if (Val.getOpcode() == ISD::AND) { | 
 |     // Looking for `x & -x` pattern: | 
 |     // If x == 0: | 
 |     //    x & -x -> 0 | 
 |     // If x != 0: | 
 |     //    x & -x -> non-zero pow2 | 
 |     // so if we find the pattern return whether we know `x` is non-zero. | 
 |     for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx) { | 
 |       SDValue NegOp = Val.getOperand(OpIdx); | 
 |       if (NegOp.getOpcode() == ISD::SUB && | 
 |           NegOp.getOperand(1) == Val.getOperand(1 - OpIdx) && | 
 |           isNullOrNullSplat(NegOp.getOperand(0))) | 
 |         return isKnownNeverZero(Val.getOperand(1 - OpIdx), Depth); | 
 |     } | 
 |   } | 
 |  | 
 |   if (Val.getOpcode() == ISD::ZERO_EXTEND) | 
 |     return isKnownToBeAPowerOfTwo(Val.getOperand(0), Depth + 1); | 
 |  | 
 |   // More could be done here, though the above checks are enough | 
 |   // to handle some common cases. | 
 |   return false; | 
 | } | 
 |  | 
 | unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { | 
 |   EVT VT = Op.getValueType(); | 
 |  | 
 |   // Since the number of lanes in a scalable vector is unknown at compile time, | 
 |   // we track one bit which is implicitly broadcast to all lanes.  This means | 
 |   // that all lanes in a scalable vector are considered demanded. | 
 |   APInt DemandedElts = VT.isFixedLengthVector() | 
 |                            ? APInt::getAllOnes(VT.getVectorNumElements()) | 
 |                            : APInt(1, 1); | 
 |   return ComputeNumSignBits(Op, DemandedElts, Depth); | 
 | } | 
 |  | 
 | unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, | 
 |                                           unsigned Depth) const { | 
 |   EVT VT = Op.getValueType(); | 
 |   assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!"); | 
 |   unsigned VTBits = VT.getScalarSizeInBits(); | 
 |   unsigned NumElts = DemandedElts.getBitWidth(); | 
 |   unsigned Tmp, Tmp2; | 
 |   unsigned FirstAnswer = 1; | 
 |  | 
 |   if (auto *C = dyn_cast<ConstantSDNode>(Op)) { | 
 |     const APInt &Val = C->getAPIntValue(); | 
 |     return Val.getNumSignBits(); | 
 |   } | 
 |  | 
 |   if (Depth >= MaxRecursionDepth) | 
 |     return 1;  // Limit search depth. | 
 |  | 
 |   if (!DemandedElts) | 
 |     return 1;  // No demanded elts, better to assume we don't know anything. | 
 |  | 
 |   unsigned Opcode = Op.getOpcode(); | 
 |   switch (Opcode) { | 
 |   default: break; | 
 |   case ISD::AssertSext: | 
 |     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); | 
 |     return VTBits-Tmp+1; | 
 |   case ISD::AssertZext: | 
 |     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); | 
 |     return VTBits-Tmp; | 
 |   case ISD::MERGE_VALUES: | 
 |     return ComputeNumSignBits(Op.getOperand(Op.getResNo()), DemandedElts, | 
 |                               Depth + 1); | 
 |   case ISD::SPLAT_VECTOR: { | 
 |     // Check if the sign bits of source go down as far as the truncated value. | 
 |     unsigned NumSrcBits = Op.getOperand(0).getValueSizeInBits(); | 
 |     unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); | 
 |     if (NumSrcSignBits > (NumSrcBits - VTBits)) | 
 |       return NumSrcSignBits - (NumSrcBits - VTBits); | 
 |     break; | 
 |   } | 
 |   case ISD::BUILD_VECTOR: | 
 |     assert(!VT.isScalableVector()); | 
 |     Tmp = VTBits; | 
 |     for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { | 
 |       if (!DemandedElts[i]) | 
 |         continue; | 
 |  | 
 |       SDValue SrcOp = Op.getOperand(i); | 
 |       // BUILD_VECTOR can implicitly truncate sources, we handle this specially | 
 |       // for constant nodes to ensure we only look at the sign bits. | 
 |       if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(SrcOp)) { | 
 |         APInt T = C->getAPIntValue().trunc(VTBits); | 
 |         Tmp2 = T.getNumSignBits(); | 
 |       } else { | 
 |         Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1); | 
 |  | 
 |         if (SrcOp.getValueSizeInBits() != VTBits) { | 
 |           assert(SrcOp.getValueSizeInBits() > VTBits && | 
 |                  "Expected BUILD_VECTOR implicit truncation"); | 
 |           unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits; | 
 |           Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1); | 
 |         } | 
 |       } | 
 |       Tmp = std::min(Tmp, Tmp2); | 
 |     } | 
 |     return Tmp; | 
 |  | 
 |   case ISD::VECTOR_SHUFFLE: { | 
 |     // Collect the minimum number of sign bits that are shared by every vector | 
 |     // element referenced by the shuffle. | 
 |     APInt DemandedLHS, DemandedRHS; | 
 |     const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op); | 
 |     assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); | 
 |     if (!getShuffleDemandedElts(NumElts, SVN->getMask(), DemandedElts, | 
 |                                 DemandedLHS, DemandedRHS)) | 
 |       return 1; | 
 |  | 
 |     Tmp = std::numeric_limits<unsigned>::max(); | 
 |     if (!!DemandedLHS) | 
 |       Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); | 
 |     if (!!DemandedRHS) { | 
 |       Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); | 
 |       Tmp = std::min(Tmp, Tmp2); | 
 |     } | 
 |     // If we don't know anything, early out and try computeKnownBits fall-back. | 
 |     if (Tmp == 1) | 
 |       break; | 
 |     assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); | 
 |     return Tmp; | 
 |   } | 
 |  | 
 |   case ISD::BITCAST: { | 
 |     if (VT.isScalableVector()) | 
 |       break; | 
 |     SDValue N0 = Op.getOperand(0); | 
 |     EVT SrcVT = N0.getValueType(); | 
 |     unsigned SrcBits = SrcVT.getScalarSizeInBits(); | 
 |  | 
 |     // Ignore bitcasts from unsupported types.. | 
 |     if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint())) | 
 |       break; | 
 |  | 
 |     // Fast handling of 'identity' bitcasts. | 
 |     if (VTBits == SrcBits) | 
 |       return ComputeNumSignBits(N0, DemandedElts, Depth + 1); | 
 |  | 
 |     bool IsLE = getDataLayout().isLittleEndian(); | 
 |  | 
 |     // Bitcast 'large element' scalar/vector to 'small element' vector. | 
 |     if ((SrcBits % VTBits) == 0) { | 
 |       assert(VT.isVector() && "Expected bitcast to vector"); | 
 |  | 
 |       unsigned Scale = SrcBits / VTBits; | 
 |       APInt SrcDemandedElts = | 
 |           APIntOps::ScaleBitMask(DemandedElts, NumElts / Scale); | 
 |  | 
 |       // Fast case - sign splat can be simply split across the small elements. | 
 |       Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); | 
 |       if (Tmp == SrcBits) | 
 |         return VTBits; | 
 |  | 
 |       // Slow case - determine how far the sign extends into each sub-element. | 
 |       Tmp2 = VTBits; | 
 |       for (unsigned i = 0; i != NumElts; ++i) | 
 |         if (DemandedElts[i]) { | 
 |           unsigned SubOffset = i % Scale; | 
 |           SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); | 
 |           SubOffset = SubOffset * VTBits; | 
 |           if (Tmp <= SubOffset) | 
 |             return 1; | 
 |           Tmp2 = std::min(Tmp2, Tmp - SubOffset); | 
 |         } | 
 |       return Tmp2; | 
 |     } | 
 |     break; | 
 |   } | 
 |  | 
 |   case ISD::FP_TO_SINT_SAT: | 
 |     // FP_TO_SINT_SAT produces a signed value that fits in the saturating VT. | 
 |     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); | 
 |     return VTBits - Tmp + 1; | 
 |   case ISD::SIGN_EXTEND: | 
 |     Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); | 
 |     return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; | 
 |   case ISD::SIGN_EXTEND_INREG: | 
 |     // Max of the input and what this extends. | 
 |     Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits(); | 
 |     Tmp = VTBits-Tmp+1; | 
 |     Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); | 
 |     return std::max(Tmp, Tmp2); | 
 |   case ISD::SIGN_EXTEND_VECTOR_INREG: { | 
 |     if (VT.isScalableVector()) | 
 |       break; | 
 |     SDValue Src = Op.getOperand(0); | 
 |     EVT SrcVT = Src.getValueType(); | 
 |     APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements()); | 
 |     Tmp = VTBits - SrcVT.getScalarSizeInBits(); | 
 |     return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; | 
 |   } | 
 |   case ISD::SRA: | 
 |     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     // SRA X, C -> adds C sign bits. | 
 |     if (const APInt *ShAmt = | 
 |             getValidMinimumShiftAmountConstant(Op, DemandedElts)) | 
 |       Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits); | 
 |     return Tmp; | 
 |   case ISD::SHL: | 
 |     if (const APInt *ShAmt = | 
 |             getValidMaximumShiftAmountConstant(Op, DemandedElts)) { | 
 |       // shl destroys sign bits, ensure it doesn't shift out all sign bits. | 
 |       Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |       if (ShAmt->ult(Tmp)) | 
 |         return Tmp - ShAmt->getZExtValue(); | 
 |     } | 
 |     break; | 
 |   case ISD::AND: | 
 |   case ISD::OR: | 
 |   case ISD::XOR:    // NOT is handled here. | 
 |     // Logical binary ops preserve the number of sign bits at the worst. | 
 |     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); | 
 |     if (Tmp != 1) { | 
 |       Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); | 
 |       FirstAnswer = std::min(Tmp, Tmp2); | 
 |       // We computed what we know about the sign bits as our first | 
 |       // answer. Now proceed to the generic code that uses | 
 |       // computeKnownBits, and pick whichever answer is better. | 
 |     } | 
 |     break; | 
 |  | 
 |   case ISD::SELECT: | 
 |   case ISD::VSELECT: | 
 |     Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); | 
 |     if (Tmp == 1) return 1;  // Early out. | 
 |     Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); | 
 |     return std::min(Tmp, Tmp2); | 
 |   case ISD::SELECT_CC: | 
 |     Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1); | 
 |     if (Tmp == 1) return 1;  // Early out. | 
 |     Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1); | 
 |     return std::min(Tmp, Tmp2); | 
 |  | 
 |   case ISD::SMIN: | 
 |   case ISD::SMAX: { | 
 |     // If we have a clamp pattern, we know that the number of sign bits will be | 
 |     // the minimum of the clamp min/max range. | 
 |     bool IsMax = (Opcode == ISD::SMAX); | 
 |     ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr; | 
 |     if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts))) | 
 |       if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX)) | 
 |         CstHigh = | 
 |             isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts); | 
 |     if (CstLow && CstHigh) { | 
 |       if (!IsMax) | 
 |         std::swap(CstLow, CstHigh); | 
 |       if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) { | 
 |         Tmp = CstLow->getAPIntValue().getNumSignBits(); | 
 |         Tmp2 = CstHigh->getAPIntValue().getNumSignBits(); | 
 |         return std::min(Tmp, Tmp2); | 
 |       } | 
 |     } | 
 |  | 
 |     // Fallback - just get the minimum number of sign bits of the operands. | 
 |     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     if (Tmp == 1) | 
 |       return 1;  // Early out. | 
 |     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     return std::min(Tmp, Tmp2); | 
 |   } | 
 |   case ISD::UMIN: | 
 |   case ISD::UMAX: | 
 |     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     if (Tmp == 1) | 
 |       return 1;  // Early out. | 
 |     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     return std::min(Tmp, Tmp2); | 
 |   case ISD::SADDO: | 
 |   case ISD::UADDO: | 
 |   case ISD::SADDO_CARRY: | 
 |   case ISD::UADDO_CARRY: | 
 |   case ISD::SSUBO: | 
 |   case ISD::USUBO: | 
 |   case ISD::SSUBO_CARRY: | 
 |   case ISD::USUBO_CARRY: | 
 |   case ISD::SMULO: | 
 |   case ISD::UMULO: | 
 |     if (Op.getResNo() != 1) | 
 |       break; | 
 |     // The boolean result conforms to getBooleanContents.  Fall through. | 
 |     // If setcc returns 0/-1, all bits are sign bits. | 
 |     // We know that we have an integer-based boolean since these operations | 
 |     // are only available for integer. | 
 |     if (TLI->getBooleanContents(VT.isVector(), false) == | 
 |         TargetLowering::ZeroOrNegativeOneBooleanContent) | 
 |       return VTBits; | 
 |     break; | 
 |   case ISD::SETCC: | 
 |   case ISD::SETCCCARRY: | 
 |   case ISD::STRICT_FSETCC: | 
 |   case ISD::STRICT_FSETCCS: { | 
 |     unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0; | 
 |     // If setcc returns 0/-1, all bits are sign bits. | 
 |     if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) == | 
 |         TargetLowering::ZeroOrNegativeOneBooleanContent) | 
 |       return VTBits; | 
 |     break; | 
 |   } | 
 |   case ISD::ROTL: | 
 |   case ISD::ROTR: | 
 |     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |  | 
 |     // If we're rotating an 0/-1 value, then it stays an 0/-1 value. | 
 |     if (Tmp == VTBits) | 
 |       return VTBits; | 
 |  | 
 |     if (ConstantSDNode *C = | 
 |             isConstOrConstSplat(Op.getOperand(1), DemandedElts)) { | 
 |       unsigned RotAmt = C->getAPIntValue().urem(VTBits); | 
 |  | 
 |       // Handle rotate right by N like a rotate left by 32-N. | 
 |       if (Opcode == ISD::ROTR) | 
 |         RotAmt = (VTBits - RotAmt) % VTBits; | 
 |  | 
 |       // If we aren't rotating out all of the known-in sign bits, return the | 
 |       // number that are left.  This handles rotl(sext(x), 1) for example. | 
 |       if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt); | 
 |     } | 
 |     break; | 
 |   case ISD::ADD: | 
 |   case ISD::ADDC: | 
 |     // Add can have at most one carry bit.  Thus we know that the output | 
 |     // is, at worst, one more bit than the inputs. | 
 |     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     if (Tmp == 1) return 1; // Early out. | 
 |  | 
 |     // Special case decrementing a value (ADD X, -1): | 
 |     if (ConstantSDNode *CRHS = | 
 |             isConstOrConstSplat(Op.getOperand(1), DemandedElts)) | 
 |       if (CRHS->isAllOnes()) { | 
 |         KnownBits Known = | 
 |             computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |  | 
 |         // If the input is known to be 0 or 1, the output is 0/-1, which is all | 
 |         // sign bits set. | 
 |         if ((Known.Zero | 1).isAllOnes()) | 
 |           return VTBits; | 
 |  | 
 |         // If we are subtracting one from a positive number, there is no carry | 
 |         // out of the result. | 
 |         if (Known.isNonNegative()) | 
 |           return Tmp; | 
 |       } | 
 |  | 
 |     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     if (Tmp2 == 1) return 1; // Early out. | 
 |     return std::min(Tmp, Tmp2) - 1; | 
 |   case ISD::SUB: | 
 |     Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |     if (Tmp2 == 1) return 1; // Early out. | 
 |  | 
 |     // Handle NEG. | 
 |     if (ConstantSDNode *CLHS = | 
 |             isConstOrConstSplat(Op.getOperand(0), DemandedElts)) | 
 |       if (CLHS->isZero()) { | 
 |         KnownBits Known = | 
 |             computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); | 
 |         // If the input is known to be 0 or 1, the output is 0/-1, which is all | 
 |         // sign bits set. | 
 |         if ((Known.Zero | 1).isAllOnes()) | 
 |           return VTBits; | 
 |  | 
 |         // If the input is known to be positive (the sign bit is known clear), | 
 |         // the output of the NEG has the same number of sign bits as the input. | 
 |         if (Known.isNonNegative()) | 
 |           return Tmp2; | 
 |  | 
 |         // Otherwise, we treat this like a SUB. | 
 |       } | 
 |  | 
 |     // Sub can have at most one carry bit.  Thus we know that the output | 
 |     // is, at worst, one more bit than the inputs. | 
 |     Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |     if (Tmp == 1) return 1; // Early out. | 
 |     return std::min(Tmp, Tmp2) - 1; | 
 |   case ISD::MUL: { | 
 |     // The output of the Mul can be at most twice the valid bits in the inputs. | 
 |     unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1); | 
 |     if (SignBitsOp0 == 1) | 
 |       break; | 
 |     unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1); | 
 |     if (SignBitsOp1 == 1) | 
 |       break; | 
 |     unsigned OutValidBits = | 
 |         (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1); | 
 |     return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1; | 
 |   } | 
 |   case ISD::SREM: | 
 |     // The sign bit is the LHS's sign bit, except when the result of the | 
 |     // remainder is zero. The magnitude of the result should be less than or | 
 |     // equal to the magnitude of the LHS. Therefore, the result should have | 
 |     // at least as many sign bits as the left hand side. | 
 |     return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); | 
 |   case ISD::TRUNCATE: { | 
 |     // Check if the sign bits of source go down as far as the truncated value. | 
 |     unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits(); | 
 |     unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); | 
 |     if (NumSrcSignBits > (NumSrcBits - VTBits)) | 
 |       return NumSrcSignBits - (NumSrcBits - VTBits); | 
 |     break; | 
 |   } | 
 |   case ISD::EXTRACT_ELEMENT: { | 
 |     if (VT.isScalableVector()) | 
 |       break; | 
 |     const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); | 
 |     const int BitWidth = Op.getValueSizeInBits(); | 
 |     const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; | 
 |  | 
 |     // Get reverse index (starting from 1), Op1 value indexes elements from | 
 |     // little end. Sign starts at big end. | 
 |     const int rIndex = Items - 1 - Op.getConstantOperandVal(1); | 
 |  | 
 |     // If the sign portion ends in our element the subtraction gives correct | 
 |     // result. Otherwise it gives either negative or > bitwidth result | 
 |     return std::clamp(KnownSign - rIndex * BitWidth, 0, BitWidth); | 
 |   } | 
 |   case ISD::INSERT_VECTOR_ELT: { | 
 |     if (VT.isScalableVector()) | 
 |       break; | 
 |     // If we know the element index, split the demand between the | 
 |     // source vector and the inserted element, otherwise assume we need | 
 |     // the original demanded vector elements and the value. | 
 |     SDValue InVec = Op.getOperand(0); | 
 |     SDValue InVal = Op.getOperand(1); | 
 |     SDValue EltNo = Op.getOperand(2); | 
 |     bool DemandedVal = true; | 
 |     APInt DemandedVecElts = DemandedElts; | 
 |     auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo); | 
 |     if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { | 
 |       unsigned EltIdx = CEltNo->getZExtValue(); | 
 |       DemandedVal = !!DemandedElts[EltIdx]; | 
 |       DemandedVecElts.clearBit(EltIdx); | 
 |     } | 
 |     Tmp = std::numeric_limits<unsigned>::max(); | 
 |     if (DemandedVal) { | 
 |       // TODO - handle implicit truncation of inserted elements. | 
 |       if (InVal.getScalarValueSizeInBits() != VTBits) | 
 |         break; | 
 |       Tmp2 = ComputeNumSignBits(InVal, Depth + 1); | 
 |       Tmp = std::min(Tmp, Tmp2); | 
 |     } | 
 |     if (!!DemandedVecElts) { | 
 |       Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1); | 
 |       Tmp = std::min(Tmp, Tmp2); | 
 |     } | 
 |     assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); | 
 |     return Tmp; | 
 |   } | 
 |   case ISD::EXTRACT_VECTOR_ELT: { | 
 |     assert(!VT.isScalableVector()); | 
 |     SDValue InVec = Op.getOperand(0); | 
 |     SDValue EltNo = Op.getOperand(1); | 
 |     EVT VecVT = InVec.getValueType(); | 
 |     // ComputeNumSignBits not yet implemented for scalable vectors. | 
 |     if (VecVT.isScalableVector()) | 
 |       break; | 
 |     const unsigned BitWidth = Op.getValueSizeInBits(); | 
 |     const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits(); | 
 |     const unsigned NumSrcElts = VecVT.getVectorNumElements(); | 
 |  | 
 |     // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know | 
 |     // anything about sign bits. But if the sizes match we can derive knowledge | 
 |     // about sign bits from the vector operand. | 
 |     if (BitWidth != EltBitWidth) | 
 |       break; | 
 |  | 
 |     // If we know the element index, just demand that vector element, else for | 
 |     // an unknown element index, ignore DemandedElts and demand them all. | 
 |     APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts); | 
 |     auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); | 
 |     if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) | 
 |       DemandedSrcElts = | 
 |           APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue()); | 
 |  | 
 |     return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1); | 
 |   } | 
 |   case ISD::EXTRACT_SUBVECTOR: { | 
 |     // Offset the demanded elts by the subvector index. | 
 |     SDValue Src = Op.getOperand(0); | 
 |     // Bail until we can represent demanded elements for scalable vectors. | 
 |     if (Src.getValueType().isScalableVector()) | 
 |       break; | 
 |     uint64_t Idx = Op.getConstantOperandVal(1); | 
 |     unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); | 
 |     APInt DemandedSrcElts = DemandedElts.zext(NumSrcElts).shl(Idx); | 
 |     return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); | 
 |   } | 
 |   case ISD::CONCAT_VECTORS: { | 
 |     if (VT.isScalableVector()) | 
 |       break; | 
 |     // Determine the minimum number of sign bits across all demanded | 
 |     // elts of the input vectors. Early out if the result is already 1. | 
 |     Tmp = std::numeric_limits<unsigned>::max(); | 
 |     EVT SubVectorVT = Op.getOperand(0).getValueType(); | 
 |     unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements(); | 
 |     unsigned NumSubVectors = Op.getNumOperands(); | 
 |     for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) { | 
 |       APInt DemandedSub = | 
 |           DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts); | 
 |       if (!DemandedSub) | 
 |         continue; | 
 |       Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1); | 
 |       Tmp = std::min(Tmp, Tmp2); | 
 |     } | 
 |     assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); | 
 |     return Tmp; | 
 |   } | 
 |   case ISD::INSERT_SUBVECTOR: { | 
 |     if (VT.isScalableVector()) | 
 |       break; | 
 |     // Demand any elements from the subvector and the remainder from the src its | 
 |     // inserted into. | 
 |     SDValue Src = Op.getOperand(0); | 
 |     SDValue Sub = Op.getOperand(1); | 
 |     uint64_t Idx = Op.getConstantOperandVal(2); | 
 |     unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); | 
 |     APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); | 
 |     APInt DemandedSrcElts = DemandedElts; | 
 |     DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx); | 
 |  | 
 |     Tmp = std::numeric_limits<unsigned>::max(); | 
 |     if (!!DemandedSubElts) { | 
 |       Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1); | 
 |       if (Tmp == 1) | 
 |         return 1; // early-out | 
 |     } | 
 |     if (!!DemandedSrcElts) { | 
 |       Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); | 
 |       Tmp = std::min(Tmp, Tmp2); | 
 |     } | 
 |     assert(Tmp <= VTBits && "Failed to determine minimum sign bits"); | 
 |     return Tmp; | 
 |   } | 
 |   case ISD::LOAD: { | 
 |     LoadSDNode *LD = cast<LoadSDNode>(Op); | 
 |     if (const MDNode *Ranges = LD->getRanges()) { | 
 |       if (DemandedElts != 1) | 
 |         break; | 
 |  | 
 |       ConstantRange CR = getConstantRangeFromMetadata(*Ranges); | 
 |       if (VTBits > CR.getBitWidth()) { | 
 |         switch (LD->getExtensionType()) { | 
 |         case ISD::SEXTLOAD: | 
 |           CR = CR.signExtend(VTBits); | 
 |           break; | 
 |         case ISD::ZEXTLOAD: | 
 |           CR = CR.zeroExtend(VTBits); | 
 |           break; | 
 |         default: | 
 |           break; | 
 |         } | 
 |       } | 
 |  | 
 |       if (VTBits != CR.getBitWidth()) | 
 |         break; | 
 |       return std::min(CR.getSignedMin().getNumSignBits(), | 
 |                       CR.getSignedMax().getNumSignBits()); | 
 |     } | 
 |  | 
 |     break; | 
 |   } | 
 |   case ISD::ATOMIC_CMP_SWAP: | 
 |   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: | 
 |   case ISD::ATOMIC_SWAP: | 
 |   case ISD::ATOMIC_LOAD_ADD: | 
 |   case ISD::ATOMIC_LOAD_SUB: | 
 |   case ISD::ATOMIC_LOAD_AND: | 
 |   case ISD::ATOMIC_LOAD_CLR: | 
 |   case ISD::ATOMIC_LOAD_OR: | 
 |   case ISD::ATOMIC_LOAD_XOR: | 
 |   case ISD::ATOMIC_LOAD_NAND: | 
 |   case ISD::ATOMIC_LOAD_MIN: | 
 |   case ISD::ATOMIC_LOAD_MAX: | 
 |   case ISD::ATOMIC_LOAD_UMIN: | 
 |   case ISD::ATOMIC_LOAD_UMAX: | 
 |   case ISD::ATOMIC_LOAD: { | 
 |     Tmp = cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits(); | 
 |     // If we are looking at the loaded value. | 
 |     if (Op.getResNo() == 0) { | 
 |       if (Tmp == VTBits) | 
 |         return 1; // early-out | 
 |       if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND) | 
 |         return VTBits - Tmp + 1; | 
 |       if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND) | 
 |         return VTBits - Tmp; | 
 |     } | 
 |     break; | 
 |   } | 
 |   } | 
 |  | 
 |   // If we are looking at the loaded value of the SDNode. | 
 |   if (Op.getResNo() == 0) { | 
 |     // Handle LOADX separately here. EXTLOAD case will fallthrough. | 
 |     if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { | 
 |       unsigned ExtType = LD->getExtensionType(); | 
 |       switch (ExtType) { | 
 |       default: break; | 
 |       case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known. | 
 |         Tmp = LD->getMemoryVT().getScalarSizeInBits(); | 
 |         return VTBits - Tmp + 1; | 
 |       case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known. | 
 |         Tmp = LD->getMemoryVT().getScalarSizeInBits(); | 
 |         return VTBits - Tmp; | 
 |       case ISD::NON_EXTLOAD: | 
 |         if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) { | 
 |           // We only need to handle vectors - computeKnownBits should handle | 
 |           // scalar cases. | 
 |           Type *CstTy = Cst->getType(); | 
 |           if (CstTy->isVectorTy() && !VT.isScalableVector() && | 
 |               (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits() && | 
 |               VTBits == CstTy->getScalarSizeInBits()) { | 
 |             Tmp = VTBits; | 
 |             for (unsigned i = 0; i != NumElts; ++i) { | 
 |               if (!DemandedElts[i]) | 
 |                 continue; | 
 |               if (Constant *Elt = Cst->getAggregateElement(i)) { | 
 |                 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) { | 
 |                   const APInt &Value = CInt->getValue(); | 
 |                   Tmp = std::min(Tmp, Value.getNumSignBits()); | 
 |                   continue; | 
 |                 } | 
 |                 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) { | 
 |                   APInt Value = CFP->getValueAPF().bitcastToAPInt(); | 
 |                   Tmp = std::min(Tmp, Value.getNumSignBits()); | 
 |                   continue; | 
 |                 } | 
 |               } | 
 |               // Unknown type. Conservatively assume no bits match sign bit. | 
 |               return 1; | 
 |             } | 
 |             return Tmp; | 
 |           } | 
 |         } | 
 |         break; | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   // Allow the target to implement this method for its nodes. | 
 |   if (Opcode >= ISD::BUILTIN_OP_END || | 
 |       Opcode == ISD::INTRINSIC_WO_CHAIN || | 
 |       Opcode == ISD::INTRINSIC_W_CHAIN || | 
 |       Opcode == ISD::INTRINSIC_VOID) { | 
 |     // TODO: This can probably be removed once target code is audited.  This | 
 |     // is here purely to reduce patch size and review complexity. | 
 |     if (!VT.isScalableVector()) { | 
 |       unsigned NumBits = | 
 |         TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); | 
 |       if (NumBits > 1) | 
 |         FirstAnswer = std::max(FirstAnswer, NumBits); | 
 |     } | 
 |   } | 
 |  | 
 |   // Finally, if we can prove that the top bits of the result are 0's or 1's, | 
 |   // use this information. | 
 |   KnownBits Known = computeKnownBits(Op, DemandedElts, Depth); | 
 |   return std::max(FirstAnswer, Known.countMinSignBits()); | 
 | } | 
 |  | 
 | unsigned SelectionDAG::ComputeMaxSignificantBits(SDValue Op, | 
 |                                                  unsigned Depth) const { | 
 |   unsigned SignBits = ComputeNumSignBits(Op, Depth); | 
 |   return Op.getScalarValueSizeInBits() - SignBits + 1; | 
 | } | 
 |  | 
 | unsigned SelectionDAG::ComputeMaxSignificantBits(SDValue Op, | 
 |                                                  const APInt &DemandedElts, | 
 |                                                  unsigned Depth) const { | 
 |   unsigned SignBits = ComputeNumSignBits(Op, DemandedElts, Depth); | 
 |   return Op.getScalarValueSizeInBits() - SignBits + 1; | 
 | } | 
 |  | 
 | bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly, | 
 |                                                     unsigned Depth) const { | 
 |   // Early out for FREEZE. | 
 |   if (Op.getOpcode() == ISD::FREEZE) | 
 |     return true; | 
 |  | 
 |   // TODO: Assume we don't know anything for now. | 
 |   EVT VT = Op.getValueType(); | 
 |   if (VT.isScalableVector()) | 
 |     return false; | 
 |  | 
 |   APInt DemandedElts = VT.isVector() | 
 |                            ? APInt::getAllOnes(VT.getVectorNumElements()) | 
 |                            : APInt(1, 1); | 
 |   return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts, PoisonOnly, Depth); | 
 | } | 
 |  | 
 | bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, | 
 |                                                     const APInt &DemandedElts, | 
 |                                                     bool PoisonOnly, | 
 |                                                     unsigned Depth) const { | 
 |   unsigned Opcode = Op.getOpcode(); | 
 |  | 
 |   // Early out for FREEZE. | 
 |   if (Opcode == ISD::FREEZE) | 
 |     return true; | 
 |  | 
 |   if (Depth >= MaxRecursionDepth) | 
 |     return false; // Limit search depth. | 
 |  | 
 |   if (isIntOrFPConstant(Op)) | 
 |     return true; | 
 |  | 
 |   switch (Opcode) { | 
 |   case ISD::VALUETYPE: | 
 |   case ISD::FrameIndex: | 
 |   case ISD::TargetFrameIndex: | 
 |     return true; | 
 |  | 
 |   case ISD::UNDEF: | 
 |     return PoisonOnly; | 
 |  | 
 |   case ISD::BUILD_VECTOR: | 
 |     // NOTE: BUILD_VECTOR has implicit truncation of wider scalar elements - | 
 |     // this shouldn't affect the result. | 
 |     for (unsigned i = 0, e = Op.getNumOperands(); i < e; ++i) { | 
 |       if (!DemandedElts[i]) | 
 |         continue; | 
 |       if (!isGuaranteedNotToBeUndefOrPoison(Op.getOperand(i), PoisonOnly, | 
 |                                             Depth + 1)) | 
 |         return false; | 
 |     } | 
 |     return true; | 
 |  | 
 |     // TODO: Search for noundef attributes from library functions. | 
 |  | 
 |     // TODO: Pointers dereferenced by ISD::LOAD/STORE ops are noundef. | 
 |  | 
 |   default: | 
 |     // Allow the target to implement this method for its nodes. | 
 |     if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN || | 
 |         Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID) | 
 |       return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode( | 
 |           Op, DemandedElts, *this, PoisonOnly, Depth); | 
 |     break; | 
 |   } | 
 |  | 
 |   // If Op can't create undef/poison and none of its operands are undef/poison | 
 |   // then Op is never undef/poison. | 
 |   // NOTE: TargetNodes should handle this in themselves in | 
 |   // isGuaranteedNotToBeUndefOrPoisonForTargetNode. | 
 |   return !canCreateUndefOrPoison(Op, PoisonOnly, /*ConsiderFlags*/ true, | 
 |                                  Depth) && | 
 |          all_of(Op->ops(), [&](SDValue V) { | 
 |            return isGuaranteedNotToBeUndefOrPoison(V, PoisonOnly, Depth + 1); | 
 |          }); | 
 | } | 
 |  | 
 | bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, bool PoisonOnly, | 
 |                                           bool ConsiderFlags, | 
 |                                           unsigned Depth) const { | 
 |   // TODO: Assume we don't know anything for now. | 
 |   EVT VT = Op.getValueType(); | 
 |   if (VT.isScalableVector()) | 
 |     return true; | 
 |  | 
 |   APInt DemandedElts = VT.isVector() | 
 |                            ? APInt::getAllOnes(VT.getVectorNumElements()) | 
 |                            : APInt(1, 1); | 
 |   return canCreateUndefOrPoison(Op, DemandedElts, PoisonOnly, ConsiderFlags, | 
 |                                 Depth); | 
 | } | 
 |  | 
 | bool SelectionDAG::canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts, | 
 |                                           bool PoisonOnly, bool ConsiderFlags, | 
 |                                           unsigned Depth) const { | 
 |   // TODO: Assume we don't know anything for now. | 
 |   EVT VT = Op.getValueType(); | 
 |   if (VT.isScalableVector()) | 
 |     return true; | 
 |  | 
 |   unsigned Opcode = Op.getOpcode(); | 
 |   switch (Opcode) { | 
 |   case ISD::FREEZE: | 
 |   case ISD::CONCAT_VECTORS: | 
 |   case ISD::INSERT_SUBVECTOR: | 
 |   case ISD::AND: | 
 |   case ISD::OR: | 
 |   case ISD::XOR: | 
 |   case ISD::ROTL: | 
 |   case ISD::ROTR: | 
 |   case ISD::FSHL: | 
 |   case ISD::FSHR: | 
 |   case ISD::BSWAP: | 
 |   case ISD::CTPOP: | 
 |   case ISD::BITREVERSE: | 
 |   case ISD::PARITY: | 
 |   case ISD::SIGN_EXTEND: | 
 |   case ISD::TRUNCATE: | 
 |   case ISD::SIGN_EXTEND_INREG: | 
 |   case ISD::SIGN_EXTEND_VECTOR_INREG: | 
 |   case ISD::ZERO_EXTEND_VECTOR_INREG: | 
 |   case ISD::BITCAST: | 
 |   case ISD::BUILD_VECTOR: | 
 |   case ISD::BUILD_PAIR: | 
 |     return false; | 
 |  | 
 |   // Matches hasPoisonGeneratingFlags(). | 
 |   case ISD::ZERO_EXTEND: | 
 |     return ConsiderFlags && Op->getFlags().hasNonNeg(); | 
 |  | 
 |   case ISD::ADD: | 
 |   case ISD::SUB: | 
 |   case ISD::MUL: | 
 |     // Matches hasPoisonGeneratingFlags(). | 
 |     return ConsiderFlags && (Op->getFlags().hasNoSignedWrap() || | 
 |                              Op->getFlags().hasNoUnsignedWrap()); | 
 |  | 
 |   case ISD::SHL: | 
 |     // If the max shift amount isn't in range, then the shift can create poison. | 
 |     if (!getValidMaximumShiftAmountConstant(Op, DemandedElts)) | 
 |       return true; | 
 |  | 
 |     // Matches hasPoisonGeneratingFlags(). | 
 |     return ConsiderFlags && (Op->getFlags().hasNoSignedWrap() || | 
 |                              Op->getFlags().hasNoUnsignedWrap()); | 
 |  | 
 |   case ISD::INSERT_VECTOR_ELT:{ | 
 |     // Ensure that the element index is in bounds. | 
 |     EVT VecVT = Op.getOperand(0).getValueType(); | 
 |     KnownBits KnownIdx = computeKnownBits(Op.getOperand(2), Depth + 1); | 
 |     return KnownIdx.getMaxValue().uge(VecVT.getVectorMinNumElements()); | 
 |   } | 
 |  | 
 |   default: | 
 |     // Allow the target to implement this method for its nodes. | 
 |     if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN || | 
 |         Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID) | 
 |       return TLI->canCreateUndefOrPoisonForTargetNode( | 
 |           Op, DemandedElts, *this, PoisonOnly, ConsiderFlags, Depth); | 
 |     break; | 
 |   } | 
 |  | 
 |   // Be conservative and return true. | 
 |   return true; | 
 | } | 
 |  | 
 | bool SelectionDAG::isADDLike(SDValue Op) const { | 
 |   unsigned Opcode = Op.getOpcode(); | 
 |   if (Opcode == ISD::OR) | 
 |     return haveNoCommonBitsSet(Op.getOperand(0), Op.getOperand(1)); | 
 |   if (Opcode == ISD::XOR) | 
 |     return isMinSignedConstant(Op.getOperand(1)); | 
 |   return false; | 
 | } | 
 |  | 
 | bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { | 
 |   if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || | 
 |       !isa<ConstantSDNode>(Op.getOperand(1))) | 
 |     return false; | 
 |  | 
 |   if (Op.getOpcode() == ISD::OR && | 
 |       !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1))) | 
 |     return false; | 
 |  | 
 |   return true; | 
 | } | 
 |  | 
 | bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const { | 
 |   // If we're told that NaNs won't happen, assume they won't. | 
 |   if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs()) | 
 |     return true; | 
 |  | 
 |   if (Depth >= MaxRecursionDepth) | 
 |     return false; // Limit search depth. | 
 |  | 
 |   // If the value is a constant, we can obviously see if it is a NaN or not. | 
 |   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) { | 
 |     return !C->getValueAPF().isNaN() || | 
 |            (SNaN && !C->getValueAPF().isSignaling()); | 
 |   } | 
 |  | 
 |   unsigned Opcode = Op.getOpcode(); | 
 |   switch (Opcode) { | 
 |   case ISD::FADD: | 
 |   case ISD::FSUB: | 
 |   case ISD::FMUL: | 
 |   case ISD::FDIV: | 
 |   case ISD::FREM: | 
 |   case ISD::FSIN: | 
 |   case ISD::FCOS: | 
 |   case ISD::FMA: | 
 |   case ISD::FMAD: { | 
 |     if (SNaN) | 
 |       return true; | 
 |     // TODO: Need isKnownNeverInfinity | 
 |     return false; | 
 |   } | 
 |   case ISD::FCANONICALIZE: | 
 |   case ISD::FEXP: | 
 |   case ISD::FEXP2: | 
 |   case ISD::FEXP10: | 
 |   case ISD::FTRUNC: | 
 |   case ISD::FFLOOR: | 
 |   case ISD::FCEIL: | 
 |   case ISD::FROUND: | 
 |   case ISD::FROUNDEVEN: | 
 |   case ISD::FRINT: | 
 |   case ISD::LRINT: | 
 |   case ISD::LLRINT: | 
 |   case ISD::FNEARBYINT: | 
 |   case ISD::FLDEXP: { | 
 |     if (SNaN) | 
 |       return true; | 
 |     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); | 
 |   } | 
 |   case ISD::FABS: | 
 |   case ISD::FNEG: | 
 |   case ISD::FCOPYSIGN: { | 
 |     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); | 
 |   } | 
 |   case ISD::SELECT: | 
 |     return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && | 
 |            isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); | 
 |   case ISD::FP_EXTEND: | 
 |   case ISD::FP_ROUND: { | 
 |     if (SNaN) | 
 |       return true; | 
 |     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); | 
 |   } | 
 |   case ISD::SINT_TO_FP: | 
 |   case ISD::UINT_TO_FP: | 
 |     return true; | 
 |   case ISD::FSQRT: // Need is known positive | 
 |   case ISD::FLOG: | 
 |   case ISD::FLOG2: | 
 |   case ISD::FLOG10: | 
 |   case ISD::FPOWI: | 
 |   case ISD::FPOW: { | 
 |     if (SNaN) | 
 |       return true; | 
 |     // TODO: Refine on operand | 
 |     return false; | 
 |   } | 
 |   case ISD::FMINNUM: | 
 |   case ISD::FMAXNUM: { | 
 |     // Only one needs to be known not-nan, since it will be returned if the | 
 |     // other ends up being one. | 
 |     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) || | 
 |            isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); | 
 |   } | 
 |   case ISD::FMINNUM_IEEE: | 
 |   case ISD::FMAXNUM_IEEE: { | 
 |     if (SNaN) | 
 |       return true; | 
 |     // This can return a NaN if either operand is an sNaN, or if both operands | 
 |     // are NaN. | 
 |     return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) && | 
 |             isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) || | 
 |            (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) && | 
 |             isKnownNeverSNaN(Op.getOperand(0), Depth + 1)); | 
 |   } | 
 |   case ISD::FMINIMUM: | 
 |   case ISD::FMAXIMUM: { | 
 |     // TODO: Does this quiet or return the origina NaN as-is? | 
 |     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && | 
 |            isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); | 
 |   } | 
 |   case ISD::EXTRACT_VECTOR_ELT: { | 
 |     return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); | 
 |   } | 
 |   case ISD::BUILD_VECTOR: { | 
 |     for (const SDValue &Opnd : Op->ops()) | 
 |       if (!isKnownNeverNaN(Opnd, SNaN, Depth + 1)) | 
 |         return false; | 
 |     return true; | 
 |   } | 
 |   default: | 
 |     if (Opcode >= ISD::BUILTIN_OP_END || | 
 |         Opcode == ISD::INTRINSIC_WO_CHAIN || | 
 |         Opcode == ISD::INTRINSIC_W_CHAIN || | 
 |         Opcode == ISD::INTRINSIC_VOID) { | 
 |       return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth); | 
 |     } | 
 |  | 
 |     return false; | 
 |   } | 
 | } | 
 |  | 
 | bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const { | 
 |   assert(Op.getValueType().isFloatingPoint() && | 
 |          "Floating point type expected"); | 
 |  | 
 |   // If the value is a constant, we can obviously see if it is a zero or not. | 
 |   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) | 
 |     return !C->isZero(); | 
 |  | 
 |   // Return false if we find any zero in a vector. | 
 |   if (Op->getOpcode() == ISD::BUILD_VECTOR || | 
 |       Op->getOpcode() == ISD::SPLAT_VECTOR) { | 
 |     for (const SDValue &OpVal : Op->op_values()) { | 
 |       if (OpVal.isUndef()) | 
 |         return false; | 
 |       if (auto *C = dyn_cast<ConstantFPSDNode>(OpVal)) | 
 |         if (C->isZero()) | 
 |           return false; | 
 |     } | 
 |     return true; | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | bool SelectionDAG::isKnownNeverZero(SDValue Op, unsigned Depth) const { | 
 |   if (Depth >= MaxRecursionDepth) | 
 |     return false; // Limit search depth. | 
 |  | 
 |   assert(!Op.getValueType().isFloatingPoint() && | 
 |          "Floating point types unsupported - use isKnownNeverZeroFloat"); | 
 |  | 
 |   // If the value is a constant, we can obviously see if it is a zero or not. | 
 |   if (ISD::matchUnaryPredicate(Op, | 
 |                                [](ConstantSDNode *C) { return !C->isZero(); })) | 
 |     return true; | 
 |  | 
 |   // TODO: Recognize more cases here. Most of the cases are also incomplete to | 
 |   // some degree. | 
 |   switch (Op.getOpcode()) { | 
 |   default: | 
 |     break; | 
 |  | 
 |   case ISD::OR: | 
 |     return isKnownNeverZero(Op.getOperand(1), Depth + 1) || | 
 |            isKnownNeverZero(Op.getOperand(0), Depth + 1); | 
 |  | 
 |   case ISD::VSELECT: | 
 |   case ISD::SELECT: | 
 |     return isKnownNeverZero(Op.getOperand(1), Depth + 1) && | 
 |            isKnownNeverZero(Op.getOperand(2), Depth + 1); | 
 |  | 
 |   case ISD::SHL: { | 
 |     if (Op->getFlags().hasNoSignedWrap() || Op->getFlags().hasNoUnsignedWrap()) | 
 |       return isKnownNeverZero(Op.getOperand(0), Depth + 1); | 
 |     KnownBits ValKnown = computeKnownBits(Op.getOperand(0), Depth + 1); | 
 |     // 1 << X is never zero. | 
 |     if (ValKnown.One[0]) | 
 |       return true; | 
 |     // If max shift cnt of known ones is non-zero, result is non-zero. | 
 |     APInt MaxCnt = computeKnownBits(Op.getOperand(1), Depth + 1).getMaxValue(); | 
 |     if (MaxCnt.ult(ValKnown.getBitWidth()) && | 
 |         !ValKnown.One.shl(MaxCnt).isZero()) | 
 |       return true; | 
 |     break; | 
 |   } | 
 |   case ISD::UADDSAT: | 
 |   case ISD::UMAX: | 
 |     return isKnownNeverZero(Op.getOperand(1), Depth + 1) || | 
 |            isKnownNeverZero(Op.getOperand(0), Depth + 1); | 
 |  | 
 |     // TODO for smin/smax: If either operand is known negative/positive | 
 |     // respectively we don't need the other to be known at all. | 
 |   case ISD::SMAX: | 
 |   case ISD::SMIN: | 
 |   case ISD::UMIN: | 
 |     return isKnownNeverZero(Op.getOperand(1), Depth + 1) && | 
 |            isKnownNeverZero(Op.getOperand(0), Depth + 1); | 
 |  | 
 |   case ISD::ROTL: | 
 |   case ISD::ROTR: | 
 |   case ISD::BITREVERSE: | 
 |   case ISD::BSWAP: | 
 |   case ISD::CTPOP: | 
 |   case ISD::ABS: | 
 |     return isKnownNeverZero(Op.getOperand(0), Depth + 1); | 
 |  | 
 |   case ISD::SRA: | 
 |   case ISD::SRL: { | 
 |     if (Op->getFlags().hasExact()) | 
 |       return isKnownNeverZero(Op.getOperand(0), Depth + 1); | 
 |     KnownBits ValKnown = computeKnownBits(Op.getOperand(0), Depth + 1); | 
 |     if (ValKnown.isNegative()) | 
 |       return true; | 
 |     // If max shift cnt of known ones is non-zero, result is non-zero. | 
 |     APInt MaxCnt = computeKnownBits(Op.getOperand(1), Depth + 1).getMaxValue(); | 
 |     if (MaxCnt.ult(ValKnown.getBitWidth()) && | 
 |         !ValKnown.One.lshr(MaxCnt).isZero()) | 
 |       return true; | 
 |     break; | 
 |   } | 
 |   case ISD::UDIV: | 
 |   case ISD::SDIV: | 
 |     // div exact can only produce a zero if the dividend is zero. | 
 |     // TODO: For udiv this is also true if Op1 u<= Op0 | 
 |     if (Op->getFlags().hasExact()) | 
 |       return isKnownNeverZero(Op.getOperand(0), Depth + 1); | 
 |     break; | 
 |  | 
 |   case ISD::ADD: | 
 |     if (Op->getFlags().hasNoUnsignedWrap()) | 
 |       if (isKnownNeverZero(Op.getOperand(1), Depth + 1) || | 
 |           isKnownNeverZero(Op.getOperand(0), Depth + 1)) | 
 |         return true; | 
 |     // TODO: There are a lot more cases we can prove for add. | 
 |     break; | 
 |  | 
 |   case ISD::SUB: { | 
 |     if (isNullConstant(Op.getOperand(0))) | 
 |       return isKnownNeverZero(Op.getOperand(1), Depth + 1); | 
 |  | 
 |     std::optional<bool> ne = | 
 |         KnownBits::ne(computeKnownBits(Op.getOperand(0), Depth + 1), | 
 |                       computeKnownBits(Op.getOperand(1), Depth + 1)); | 
 |     return ne && *ne; | 
 |   } | 
 |  | 
 |   case ISD::MUL: | 
 |     if (Op->getFlags().hasNoSignedWrap() || Op->getFlags().hasNoUnsignedWrap()) | 
 |       if (isKnownNeverZero(Op.getOperand(1), Depth + 1) && | 
 |           isKnownNeverZero(Op.getOperand(0), Depth + 1)) | 
 |         return true; | 
 |     break; | 
 |  | 
 |   case ISD::ZERO_EXTEND: | 
 |   case ISD::SIGN_EXTEND: | 
 |     return isKnownNeverZero(Op.getOperand(0), Depth + 1); | 
 |   } | 
 |  | 
 |   return computeKnownBits(Op, Depth).isNonZero(); | 
 | } | 
 |  | 
 | bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { | 
 |   // Check the obvious case. | 
 |   if (A == B) return true; | 
 |  | 
 |   // For negative and positive zero. | 
 |   if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) | 
 |     if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) | 
 |       if (CA->isZero() && CB->isZero()) return true; | 
 |  | 
 |   // Otherwise they may not be equal. | 
 |   return false; | 
 | } | 
 |  | 
 | // Only bits set in Mask must be negated, other bits may be arbitrary. | 
 | SDValue llvm::getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs) { | 
 |   if (isBitwiseNot(V, AllowUndefs)) | 
 |     return V.getOperand(0); | 
 |  | 
 |   // Handle any_extend (not (truncate X)) pattern, where Mask only sets | 
 |   // bits in the non-extended part. | 
 |   ConstantSDNode *MaskC = isConstOrConstSplat(Mask); | 
 |   if (!MaskC || V.getOpcode() != ISD::ANY_EXTEND) | 
 |     return SDValue(); | 
 |   SDValue ExtArg = V.getOperand(0); | 
 |   if (ExtArg.getScalarValueSizeInBits() >= | 
 |           MaskC->getAPIntValue().getActiveBits() && | 
 |       isBitwiseNot(ExtArg, AllowUndefs) && | 
 |       ExtArg.getOperand(0).getOpcode() == ISD::TRUNCATE && | 
 |       ExtArg.getOperand(0).getOperand(0).getValueType() == V.getValueType()) | 
 |     return ExtArg.getOperand(0).getOperand(0); | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | static bool haveNoCommonBitsSetCommutative(SDValue A, SDValue B) { | 
 |   // Match masked merge pattern (X & ~M) op (Y & M) | 
 |   // Including degenerate case (X & ~M) op M | 
 |   auto MatchNoCommonBitsPattern = [&](SDValue Not, SDValue Mask, | 
 |                                       SDValue Other) { | 
 |     if (SDValue NotOperand = | 
 |             getBitwiseNotOperand(Not, Mask, /* AllowUndefs */ true)) { | 
 |       if (NotOperand->getOpcode() == ISD::ZERO_EXTEND || | 
 |           NotOperand->getOpcode() == ISD::TRUNCATE) | 
 |         NotOperand = NotOperand->getOperand(0); | 
 |  | 
 |       if (Other == NotOperand) | 
 |         return true; | 
 |       if (Other->getOpcode() == ISD::AND) | 
 |         return NotOperand == Other->getOperand(0) || | 
 |                NotOperand == Other->getOperand(1); | 
 |     } | 
 |     return false; | 
 |   }; | 
 |  | 
 |   if (A->getOpcode() == ISD::ZERO_EXTEND || A->getOpcode() == ISD::TRUNCATE) | 
 |     A = A->getOperand(0); | 
 |  | 
 |   if (B->getOpcode() == ISD::ZERO_EXTEND || B->getOpcode() == ISD::TRUNCATE) | 
 |     B = B->getOperand(0); | 
 |  | 
 |   if (A->getOpcode() == ISD::AND) | 
 |     return MatchNoCommonBitsPattern(A->getOperand(0), A->getOperand(1), B) || | 
 |            MatchNoCommonBitsPattern(A->getOperand(1), A->getOperand(0), B); | 
 |   return false; | 
 | } | 
 |  | 
 | // FIXME: unify with llvm::haveNoCommonBitsSet. | 
 | bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const { | 
 |   assert(A.getValueType() == B.getValueType() && | 
 |          "Values must have the same type"); | 
 |   if (haveNoCommonBitsSetCommutative(A, B) || | 
 |       haveNoCommonBitsSetCommutative(B, A)) | 
 |     return true; | 
 |   return KnownBits::haveNoCommonBitsSet(computeKnownBits(A), | 
 |                                         computeKnownBits(B)); | 
 | } | 
 |  | 
 | static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step, | 
 |                                SelectionDAG &DAG) { | 
 |   if (cast<ConstantSDNode>(Step)->isZero()) | 
 |     return DAG.getConstant(0, DL, VT); | 
 |  | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT, | 
 |                                 ArrayRef<SDValue> Ops, | 
 |                                 SelectionDAG &DAG) { | 
 |   int NumOps = Ops.size(); | 
 |   assert(NumOps != 0 && "Can't build an empty vector!"); | 
 |   assert(!VT.isScalableVector() && | 
 |          "BUILD_VECTOR cannot be used with scalable types"); | 
 |   assert(VT.getVectorNumElements() == (unsigned)NumOps && | 
 |          "Incorrect element count in BUILD_VECTOR!"); | 
 |  | 
 |   // BUILD_VECTOR of UNDEFs is UNDEF. | 
 |   if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) | 
 |     return DAG.getUNDEF(VT); | 
 |  | 
 |   // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity. | 
 |   SDValue IdentitySrc; | 
 |   bool IsIdentity = true; | 
 |   for (int i = 0; i != NumOps; ++i) { | 
 |     if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT || | 
 |         Ops[i].getOperand(0).getValueType() != VT || | 
 |         (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) || | 
 |         !isa<ConstantSDNode>(Ops[i].getOperand(1)) || | 
 |         Ops[i].getConstantOperandAPInt(1) != i) { | 
 |       IsIdentity = false; | 
 |       break; | 
 |     } | 
 |     IdentitySrc = Ops[i].getOperand(0); | 
 |   } | 
 |   if (IsIdentity) | 
 |     return IdentitySrc; | 
 |  | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | /// Try to simplify vector concatenation to an input value, undef, or build | 
 | /// vector. | 
 | static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT, | 
 |                                   ArrayRef<SDValue> Ops, | 
 |                                   SelectionDAG &DAG) { | 
 |   assert(!Ops.empty() && "Can't concatenate an empty list of vectors!"); | 
 |   assert(llvm::all_of(Ops, | 
 |                       [Ops](SDValue Op) { | 
 |                         return Ops[0].getValueType() == Op.getValueType(); | 
 |                       }) && | 
 |          "Concatenation of vectors with inconsistent value types!"); | 
 |   assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) == | 
 |              VT.getVectorElementCount() && | 
 |          "Incorrect element count in vector concatenation!"); | 
 |  | 
 |   if (Ops.size() == 1) | 
 |     return Ops[0]; | 
 |  | 
 |   // Concat of UNDEFs is UNDEF. | 
 |   if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); })) | 
 |     return DAG.getUNDEF(VT); | 
 |  | 
 |   // Scan the operands and look for extract operations from a single source | 
 |   // that correspond to insertion at the same location via this concatenation: | 
 |   // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ... | 
 |   SDValue IdentitySrc; | 
 |   bool IsIdentity = true; | 
 |   for (unsigned i = 0, e = Ops.size(); i != e; ++i) { | 
 |     SDValue Op = Ops[i]; | 
 |     unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements(); | 
 |     if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR || | 
 |         Op.getOperand(0).getValueType() != VT || | 
 |         (IdentitySrc && Op.getOperand(0) != IdentitySrc) || | 
 |         Op.getConstantOperandVal(1) != IdentityIndex) { | 
 |       IsIdentity = false; | 
 |       break; | 
 |     } | 
 |     assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) && | 
 |            "Unexpected identity source vector for concat of extracts"); | 
 |     IdentitySrc = Op.getOperand(0); | 
 |   } | 
 |   if (IsIdentity) { | 
 |     assert(IdentitySrc && "Failed to set source vector of extracts"); | 
 |     return IdentitySrc; | 
 |   } | 
 |  | 
 |   // The code below this point is only designed to work for fixed width | 
 |   // vectors, so we bail out for now. | 
 |   if (VT.isScalableVector()) | 
 |     return SDValue(); | 
 |  | 
 |   // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be | 
 |   // simplified to one big BUILD_VECTOR. | 
 |   // FIXME: Add support for SCALAR_TO_VECTOR as well. | 
 |   EVT SVT = VT.getScalarType(); | 
 |   SmallVector<SDValue, 16> Elts; | 
 |   for (SDValue Op : Ops) { | 
 |     EVT OpVT = Op.getValueType(); | 
 |     if (Op.isUndef()) | 
 |       Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT)); | 
 |     else if (Op.getOpcode() == ISD::BUILD_VECTOR) | 
 |       Elts.append(Op->op_begin(), Op->op_end()); | 
 |     else | 
 |       return SDValue(); | 
 |   } | 
 |  | 
 |   // BUILD_VECTOR requires all inputs to be of the same type, find the | 
 |   // maximum type and extend them all. | 
 |   for (SDValue Op : Elts) | 
 |     SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); | 
 |  | 
 |   if (SVT.bitsGT(VT.getScalarType())) { | 
 |     for (SDValue &Op : Elts) { | 
 |       if (Op.isUndef()) | 
 |         Op = DAG.getUNDEF(SVT); | 
 |       else | 
 |         Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT) | 
 |                  ? DAG.getZExtOrTrunc(Op, DL, SVT) | 
 |                  : DAG.getSExtOrTrunc(Op, DL, SVT); | 
 |     } | 
 |   } | 
 |  | 
 |   SDValue V = DAG.getBuildVector(VT, DL, Elts); | 
 |   NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG); | 
 |   return V; | 
 | } | 
 |  | 
 | /// Gets or creates the specified node. | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opcode, getVTList(VT), std::nullopt); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), | 
 |                               getVTList(VT)); | 
 |   CSEMap.InsertNode(N, IP); | 
 |  | 
 |   InsertNode(N); | 
 |   SDValue V = SDValue(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               SDValue N1) { | 
 |   SDNodeFlags Flags; | 
 |   if (Inserter) | 
 |     Flags = Inserter->getFlags(); | 
 |   return getNode(Opcode, DL, VT, N1, Flags); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               SDValue N1, const SDNodeFlags Flags) { | 
 |   assert(N1.getOpcode() != ISD::DELETED_NODE && "Operand is DELETED_NODE!"); | 
 |  | 
 |   // Constant fold unary operations with a vector integer or float operand. | 
 |   switch (Opcode) { | 
 |   default: | 
 |     // FIXME: Entirely reasonable to perform folding of other unary | 
 |     // operations here as the need arises. | 
 |     break; | 
 |   case ISD::FNEG: | 
 |   case ISD::FABS: | 
 |   case ISD::FCEIL: | 
 |   case ISD::FTRUNC: | 
 |   case ISD::FFLOOR: | 
 |   case ISD::FP_EXTEND: | 
 |   case ISD::FP_TO_SINT: | 
 |   case ISD::FP_TO_UINT: | 
 |   case ISD::FP_TO_FP16: | 
 |   case ISD::FP_TO_BF16: | 
 |   case ISD::TRUNCATE: | 
 |   case ISD::ANY_EXTEND: | 
 |   case ISD::ZERO_EXTEND: | 
 |   case ISD::SIGN_EXTEND: | 
 |   case ISD::UINT_TO_FP: | 
 |   case ISD::SINT_TO_FP: | 
 |   case ISD::FP16_TO_FP: | 
 |   case ISD::BF16_TO_FP: | 
 |   case ISD::BITCAST: | 
 |   case ISD::ABS: | 
 |   case ISD::BITREVERSE: | 
 |   case ISD::BSWAP: | 
 |   case ISD::CTLZ: | 
 |   case ISD::CTLZ_ZERO_UNDEF: | 
 |   case ISD::CTTZ: | 
 |   case ISD::CTTZ_ZERO_UNDEF: | 
 |   case ISD::CTPOP: | 
 |   case ISD::STEP_VECTOR: { | 
 |     SDValue Ops = {N1}; | 
 |     if (SDValue Fold = FoldConstantArithmetic(Opcode, DL, VT, Ops)) | 
 |       return Fold; | 
 |   } | 
 |   } | 
 |  | 
 |   unsigned OpOpcode = N1.getNode()->getOpcode(); | 
 |   switch (Opcode) { | 
 |   case ISD::STEP_VECTOR: | 
 |     assert(VT.isScalableVector() && | 
 |            "STEP_VECTOR can only be used with scalable types"); | 
 |     assert(OpOpcode == ISD::TargetConstant && | 
 |            VT.getVectorElementType() == N1.getValueType() && | 
 |            "Unexpected step operand"); | 
 |     break; | 
 |   case ISD::FREEZE: | 
 |     assert(VT == N1.getValueType() && "Unexpected VT!"); | 
 |     if (isGuaranteedNotToBeUndefOrPoison(N1, /*PoisonOnly*/ false, | 
 |                                          /*Depth*/ 1)) | 
 |       return N1; | 
 |     break; | 
 |   case ISD::TokenFactor: | 
 |   case ISD::MERGE_VALUES: | 
 |   case ISD::CONCAT_VECTORS: | 
 |     return N1;         // Factor, merge or concat of one node?  No need. | 
 |   case ISD::BUILD_VECTOR: { | 
 |     // Attempt to simplify BUILD_VECTOR. | 
 |     SDValue Ops[] = {N1}; | 
 |     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) | 
 |       return V; | 
 |     break; | 
 |   } | 
 |   case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); | 
 |   case ISD::FP_EXTEND: | 
 |     assert(VT.isFloatingPoint() && N1.getValueType().isFloatingPoint() && | 
 |            "Invalid FP cast!"); | 
 |     if (N1.getValueType() == VT) return N1;  // noop conversion. | 
 |     assert((!VT.isVector() || VT.getVectorElementCount() == | 
 |                                   N1.getValueType().getVectorElementCount()) && | 
 |            "Vector element count mismatch!"); | 
 |     assert(N1.getValueType().bitsLT(VT) && "Invalid fpext node, dst < src!"); | 
 |     if (N1.isUndef()) | 
 |       return getUNDEF(VT); | 
 |     break; | 
 |   case ISD::FP_TO_SINT: | 
 |   case ISD::FP_TO_UINT: | 
 |     if (N1.isUndef()) | 
 |       return getUNDEF(VT); | 
 |     break; | 
 |   case ISD::SINT_TO_FP: | 
 |   case ISD::UINT_TO_FP: | 
 |     // [us]itofp(undef) = 0, because the result value is bounded. | 
 |     if (N1.isUndef()) | 
 |       return getConstantFP(0.0, DL, VT); | 
 |     break; | 
 |   case ISD::SIGN_EXTEND: | 
 |     assert(VT.isInteger() && N1.getValueType().isInteger() && | 
 |            "Invalid SIGN_EXTEND!"); | 
 |     assert(VT.isVector() == N1.getValueType().isVector() && | 
 |            "SIGN_EXTEND result type type should be vector iff the operand " | 
 |            "type is vector!"); | 
 |     if (N1.getValueType() == VT) return N1;   // noop extension | 
 |     assert((!VT.isVector() || VT.getVectorElementCount() == | 
 |                                   N1.getValueType().getVectorElementCount()) && | 
 |            "Vector element count mismatch!"); | 
 |     assert(N1.getValueType().bitsLT(VT) && "Invalid sext node, dst < src!"); | 
 |     if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) | 
 |       return getNode(OpOpcode, DL, VT, N1.getOperand(0)); | 
 |     if (OpOpcode == ISD::UNDEF) | 
 |       // sext(undef) = 0, because the top bits will all be the same. | 
 |       return getConstant(0, DL, VT); | 
 |     break; | 
 |   case ISD::ZERO_EXTEND: | 
 |     assert(VT.isInteger() && N1.getValueType().isInteger() && | 
 |            "Invalid ZERO_EXTEND!"); | 
 |     assert(VT.isVector() == N1.getValueType().isVector() && | 
 |            "ZERO_EXTEND result type type should be vector iff the operand " | 
 |            "type is vector!"); | 
 |     if (N1.getValueType() == VT) return N1;   // noop extension | 
 |     assert((!VT.isVector() || VT.getVectorElementCount() == | 
 |                                   N1.getValueType().getVectorElementCount()) && | 
 |            "Vector element count mismatch!"); | 
 |     assert(N1.getValueType().bitsLT(VT) && "Invalid zext node, dst < src!"); | 
 |     if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) | 
 |       return getNode(ISD::ZERO_EXTEND, DL, VT, N1.getOperand(0)); | 
 |     if (OpOpcode == ISD::UNDEF) | 
 |       // zext(undef) = 0, because the top bits will be zero. | 
 |       return getConstant(0, DL, VT); | 
 |  | 
 |     // Skip unnecessary zext_inreg pattern: | 
 |     // (zext (trunc x)) -> x iff the upper bits are known zero. | 
 |     // TODO: Remove (zext (trunc (and x, c))) exception which some targets | 
 |     // use to recognise zext_inreg patterns. | 
 |     if (OpOpcode == ISD::TRUNCATE) { | 
 |       SDValue OpOp = N1.getOperand(0); | 
 |       if (OpOp.getValueType() == VT) { | 
 |         if (OpOp.getOpcode() != ISD::AND) { | 
 |           APInt HiBits = APInt::getBitsSetFrom(VT.getScalarSizeInBits(), | 
 |                                                N1.getScalarValueSizeInBits()); | 
 |           if (MaskedValueIsZero(OpOp, HiBits)) { | 
 |             transferDbgValues(N1, OpOp); | 
 |             return OpOp; | 
 |           } | 
 |         } | 
 |       } | 
 |     } | 
 |     break; | 
 |   case ISD::ANY_EXTEND: | 
 |     assert(VT.isInteger() && N1.getValueType().isInteger() && | 
 |            "Invalid ANY_EXTEND!"); | 
 |     assert(VT.isVector() == N1.getValueType().isVector() && | 
 |            "ANY_EXTEND result type type should be vector iff the operand " | 
 |            "type is vector!"); | 
 |     if (N1.getValueType() == VT) return N1;   // noop extension | 
 |     assert((!VT.isVector() || VT.getVectorElementCount() == | 
 |                                   N1.getValueType().getVectorElementCount()) && | 
 |            "Vector element count mismatch!"); | 
 |     assert(N1.getValueType().bitsLT(VT) && "Invalid anyext node, dst < src!"); | 
 |  | 
 |     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || | 
 |         OpOpcode == ISD::ANY_EXTEND) | 
 |       // (ext (zext x)) -> (zext x)  and  (ext (sext x)) -> (sext x) | 
 |       return getNode(OpOpcode, DL, VT, N1.getOperand(0)); | 
 |     if (OpOpcode == ISD::UNDEF) | 
 |       return getUNDEF(VT); | 
 |  | 
 |     // (ext (trunc x)) -> x | 
 |     if (OpOpcode == ISD::TRUNCATE) { | 
 |       SDValue OpOp = N1.getOperand(0); | 
 |       if (OpOp.getValueType() == VT) { | 
 |         transferDbgValues(N1, OpOp); | 
 |         return OpOp; | 
 |       } | 
 |     } | 
 |     break; | 
 |   case ISD::TRUNCATE: | 
 |     assert(VT.isInteger() && N1.getValueType().isInteger() && | 
 |            "Invalid TRUNCATE!"); | 
 |     assert(VT.isVector() == N1.getValueType().isVector() && | 
 |            "TRUNCATE result type type should be vector iff the operand " | 
 |            "type is vector!"); | 
 |     if (N1.getValueType() == VT) return N1;   // noop truncate | 
 |     assert((!VT.isVector() || VT.getVectorElementCount() == | 
 |                                   N1.getValueType().getVectorElementCount()) && | 
 |            "Vector element count mismatch!"); | 
 |     assert(N1.getValueType().bitsGT(VT) && "Invalid truncate node, src < dst!"); | 
 |     if (OpOpcode == ISD::TRUNCATE) | 
 |       return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0)); | 
 |     if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || | 
 |         OpOpcode == ISD::ANY_EXTEND) { | 
 |       // If the source is smaller than the dest, we still need an extend. | 
 |       if (N1.getOperand(0).getValueType().getScalarType().bitsLT( | 
 |               VT.getScalarType())) | 
 |         return getNode(OpOpcode, DL, VT, N1.getOperand(0)); | 
 |       if (N1.getOperand(0).getValueType().bitsGT(VT)) | 
 |         return getNode(ISD::TRUNCATE, DL, VT, N1.getOperand(0)); | 
 |       return N1.getOperand(0); | 
 |     } | 
 |     if (OpOpcode == ISD::UNDEF) | 
 |       return getUNDEF(VT); | 
 |     if (OpOpcode == ISD::VSCALE && !NewNodesMustHaveLegalTypes) | 
 |       return getVScale(DL, VT, | 
 |                        N1.getConstantOperandAPInt(0).trunc(VT.getSizeInBits())); | 
 |     break; | 
 |   case ISD::ANY_EXTEND_VECTOR_INREG: | 
 |   case ISD::ZERO_EXTEND_VECTOR_INREG: | 
 |   case ISD::SIGN_EXTEND_VECTOR_INREG: | 
 |     assert(VT.isVector() && "This DAG node is restricted to vector types."); | 
 |     assert(N1.getValueType().bitsLE(VT) && | 
 |            "The input must be the same size or smaller than the result."); | 
 |     assert(VT.getVectorMinNumElements() < | 
 |                N1.getValueType().getVectorMinNumElements() && | 
 |            "The destination vector type must have fewer lanes than the input."); | 
 |     break; | 
 |   case ISD::ABS: | 
 |     assert(VT.isInteger() && VT == N1.getValueType() && "Invalid ABS!"); | 
 |     if (OpOpcode == ISD::UNDEF) | 
 |       return getConstant(0, DL, VT); | 
 |     break; | 
 |   case ISD::BSWAP: | 
 |     assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BSWAP!"); | 
 |     assert((VT.getScalarSizeInBits() % 16 == 0) && | 
 |            "BSWAP types must be a multiple of 16 bits!"); | 
 |     if (OpOpcode == ISD::UNDEF) | 
 |       return getUNDEF(VT); | 
 |     // bswap(bswap(X)) -> X. | 
 |     if (OpOpcode == ISD::BSWAP) | 
 |       return N1.getOperand(0); | 
 |     break; | 
 |   case ISD::BITREVERSE: | 
 |     assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BITREVERSE!"); | 
 |     if (OpOpcode == ISD::UNDEF) | 
 |       return getUNDEF(VT); | 
 |     break; | 
 |   case ISD::BITCAST: | 
 |     assert(VT.getSizeInBits() == N1.getValueSizeInBits() && | 
 |            "Cannot BITCAST between types of different sizes!"); | 
 |     if (VT == N1.getValueType()) return N1;   // noop conversion. | 
 |     if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) | 
 |       return getNode(ISD::BITCAST, DL, VT, N1.getOperand(0)); | 
 |     if (OpOpcode == ISD::UNDEF) | 
 |       return getUNDEF(VT); | 
 |     break; | 
 |   case ISD::SCALAR_TO_VECTOR: | 
 |     assert(VT.isVector() && !N1.getValueType().isVector() && | 
 |            (VT.getVectorElementType() == N1.getValueType() || | 
 |             (VT.getVectorElementType().isInteger() && | 
 |              N1.getValueType().isInteger() && | 
 |              VT.getVectorElementType().bitsLE(N1.getValueType()))) && | 
 |            "Illegal SCALAR_TO_VECTOR node!"); | 
 |     if (OpOpcode == ISD::UNDEF) | 
 |       return getUNDEF(VT); | 
 |     // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. | 
 |     if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && | 
 |         isa<ConstantSDNode>(N1.getOperand(1)) && | 
 |         N1.getConstantOperandVal(1) == 0 && | 
 |         N1.getOperand(0).getValueType() == VT) | 
 |       return N1.getOperand(0); | 
 |     break; | 
 |   case ISD::FNEG: | 
 |     // Negation of an unknown bag of bits is still completely undefined. | 
 |     if (OpOpcode == ISD::UNDEF) | 
 |       return getUNDEF(VT); | 
 |  | 
 |     if (OpOpcode == ISD::FNEG) // --X -> X | 
 |       return N1.getOperand(0); | 
 |     break; | 
 |   case ISD::FABS: | 
 |     if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) | 
 |       return getNode(ISD::FABS, DL, VT, N1.getOperand(0)); | 
 |     break; | 
 |   case ISD::VSCALE: | 
 |     assert(VT == N1.getValueType() && "Unexpected VT!"); | 
 |     break; | 
 |   case ISD::CTPOP: | 
 |     if (N1.getValueType().getScalarType() == MVT::i1) | 
 |       return N1; | 
 |     break; | 
 |   case ISD::CTLZ: | 
 |   case ISD::CTTZ: | 
 |     if (N1.getValueType().getScalarType() == MVT::i1) | 
 |       return getNOT(DL, N1, N1.getValueType()); | 
 |     break; | 
 |   case ISD::VECREDUCE_ADD: | 
 |     if (N1.getValueType().getScalarType() == MVT::i1) | 
 |       return getNode(ISD::VECREDUCE_XOR, DL, VT, N1); | 
 |     break; | 
 |   case ISD::VECREDUCE_SMIN: | 
 |   case ISD::VECREDUCE_UMAX: | 
 |     if (N1.getValueType().getScalarType() == MVT::i1) | 
 |       return getNode(ISD::VECREDUCE_OR, DL, VT, N1); | 
 |     break; | 
 |   case ISD::VECREDUCE_SMAX: | 
 |   case ISD::VECREDUCE_UMIN: | 
 |     if (N1.getValueType().getScalarType() == MVT::i1) | 
 |       return getNode(ISD::VECREDUCE_AND, DL, VT, N1); | 
 |     break; | 
 |   } | 
 |  | 
 |   SDNode *N; | 
 |   SDVTList VTs = getVTList(VT); | 
 |   SDValue Ops[] = {N1}; | 
 |   if (VT != MVT::Glue) { // Don't CSE glue producing nodes | 
 |     FoldingSetNodeID ID; | 
 |     AddNodeIDNode(ID, Opcode, VTs, Ops); | 
 |     void *IP = nullptr; | 
 |     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { | 
 |       E->intersectFlagsWith(Flags); | 
 |       return SDValue(E, 0); | 
 |     } | 
 |  | 
 |     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); | 
 |     N->setFlags(Flags); | 
 |     createOperands(N, Ops); | 
 |     CSEMap.InsertNode(N, IP); | 
 |   } else { | 
 |     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); | 
 |     createOperands(N, Ops); | 
 |   } | 
 |  | 
 |   InsertNode(N); | 
 |   SDValue V = SDValue(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | static std::optional<APInt> FoldValue(unsigned Opcode, const APInt &C1, | 
 |                                       const APInt &C2) { | 
 |   switch (Opcode) { | 
 |   case ISD::ADD:  return C1 + C2; | 
 |   case ISD::SUB:  return C1 - C2; | 
 |   case ISD::MUL:  return C1 * C2; | 
 |   case ISD::AND:  return C1 & C2; | 
 |   case ISD::OR:   return C1 | C2; | 
 |   case ISD::XOR:  return C1 ^ C2; | 
 |   case ISD::SHL:  return C1 << C2; | 
 |   case ISD::SRL:  return C1.lshr(C2); | 
 |   case ISD::SRA:  return C1.ashr(C2); | 
 |   case ISD::ROTL: return C1.rotl(C2); | 
 |   case ISD::ROTR: return C1.rotr(C2); | 
 |   case ISD::SMIN: return C1.sle(C2) ? C1 : C2; | 
 |   case ISD::SMAX: return C1.sge(C2) ? C1 : C2; | 
 |   case ISD::UMIN: return C1.ule(C2) ? C1 : C2; | 
 |   case ISD::UMAX: return C1.uge(C2) ? C1 : C2; | 
 |   case ISD::SADDSAT: return C1.sadd_sat(C2); | 
 |   case ISD::UADDSAT: return C1.uadd_sat(C2); | 
 |   case ISD::SSUBSAT: return C1.ssub_sat(C2); | 
 |   case ISD::USUBSAT: return C1.usub_sat(C2); | 
 |   case ISD::SSHLSAT: return C1.sshl_sat(C2); | 
 |   case ISD::USHLSAT: return C1.ushl_sat(C2); | 
 |   case ISD::UDIV: | 
 |     if (!C2.getBoolValue()) | 
 |       break; | 
 |     return C1.udiv(C2); | 
 |   case ISD::UREM: | 
 |     if (!C2.getBoolValue()) | 
 |       break; | 
 |     return C1.urem(C2); | 
 |   case ISD::SDIV: | 
 |     if (!C2.getBoolValue()) | 
 |       break; | 
 |     return C1.sdiv(C2); | 
 |   case ISD::SREM: | 
 |     if (!C2.getBoolValue()) | 
 |       break; | 
 |     return C1.srem(C2); | 
 |   case ISD::MULHS: { | 
 |     unsigned FullWidth = C1.getBitWidth() * 2; | 
 |     APInt C1Ext = C1.sext(FullWidth); | 
 |     APInt C2Ext = C2.sext(FullWidth); | 
 |     return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth()); | 
 |   } | 
 |   case ISD::MULHU: { | 
 |     unsigned FullWidth = C1.getBitWidth() * 2; | 
 |     APInt C1Ext = C1.zext(FullWidth); | 
 |     APInt C2Ext = C2.zext(FullWidth); | 
 |     return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth()); | 
 |   } | 
 |   case ISD::AVGFLOORS: { | 
 |     unsigned FullWidth = C1.getBitWidth() + 1; | 
 |     APInt C1Ext = C1.sext(FullWidth); | 
 |     APInt C2Ext = C2.sext(FullWidth); | 
 |     return (C1Ext + C2Ext).extractBits(C1.getBitWidth(), 1); | 
 |   } | 
 |   case ISD::AVGFLOORU: { | 
 |     unsigned FullWidth = C1.getBitWidth() + 1; | 
 |     APInt C1Ext = C1.zext(FullWidth); | 
 |     APInt C2Ext = C2.zext(FullWidth); | 
 |     return (C1Ext + C2Ext).extractBits(C1.getBitWidth(), 1); | 
 |   } | 
 |   case ISD::AVGCEILS: { | 
 |     unsigned FullWidth = C1.getBitWidth() + 1; | 
 |     APInt C1Ext = C1.sext(FullWidth); | 
 |     APInt C2Ext = C2.sext(FullWidth); | 
 |     return (C1Ext + C2Ext + 1).extractBits(C1.getBitWidth(), 1); | 
 |   } | 
 |   case ISD::AVGCEILU: { | 
 |     unsigned FullWidth = C1.getBitWidth() + 1; | 
 |     APInt C1Ext = C1.zext(FullWidth); | 
 |     APInt C2Ext = C2.zext(FullWidth); | 
 |     return (C1Ext + C2Ext + 1).extractBits(C1.getBitWidth(), 1); | 
 |   } | 
 |   case ISD::ABDS: | 
 |     return APIntOps::smax(C1, C2) - APIntOps::smin(C1, C2); | 
 |   case ISD::ABDU: | 
 |     return APIntOps::umax(C1, C2) - APIntOps::umin(C1, C2); | 
 |   } | 
 |   return std::nullopt; | 
 | } | 
 |  | 
 | // Handle constant folding with UNDEF. | 
 | // TODO: Handle more cases. | 
 | static std::optional<APInt> FoldValueWithUndef(unsigned Opcode, const APInt &C1, | 
 |                                                bool IsUndef1, const APInt &C2, | 
 |                                                bool IsUndef2) { | 
 |   if (!(IsUndef1 || IsUndef2)) | 
 |     return FoldValue(Opcode, C1, C2); | 
 |  | 
 |   // Fold and(x, undef) -> 0 | 
 |   // Fold mul(x, undef) -> 0 | 
 |   if (Opcode == ISD::AND || Opcode == ISD::MUL) | 
 |     return APInt::getZero(C1.getBitWidth()); | 
 |  | 
 |   return std::nullopt; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT, | 
 |                                        const GlobalAddressSDNode *GA, | 
 |                                        const SDNode *N2) { | 
 |   if (GA->getOpcode() != ISD::GlobalAddress) | 
 |     return SDValue(); | 
 |   if (!TLI->isOffsetFoldingLegal(GA)) | 
 |     return SDValue(); | 
 |   auto *C2 = dyn_cast<ConstantSDNode>(N2); | 
 |   if (!C2) | 
 |     return SDValue(); | 
 |   int64_t Offset = C2->getSExtValue(); | 
 |   switch (Opcode) { | 
 |   case ISD::ADD: break; | 
 |   case ISD::SUB: Offset = -uint64_t(Offset); break; | 
 |   default: return SDValue(); | 
 |   } | 
 |   return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT, | 
 |                           GA->getOffset() + uint64_t(Offset)); | 
 | } | 
 |  | 
 | bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) { | 
 |   switch (Opcode) { | 
 |   case ISD::SDIV: | 
 |   case ISD::UDIV: | 
 |   case ISD::SREM: | 
 |   case ISD::UREM: { | 
 |     // If a divisor is zero/undef or any element of a divisor vector is | 
 |     // zero/undef, the whole op is undef. | 
 |     assert(Ops.size() == 2 && "Div/rem should have 2 operands"); | 
 |     SDValue Divisor = Ops[1]; | 
 |     if (Divisor.isUndef() || isNullConstant(Divisor)) | 
 |       return true; | 
 |  | 
 |     return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) && | 
 |            llvm::any_of(Divisor->op_values(), | 
 |                         [](SDValue V) { return V.isUndef() || | 
 |                                         isNullConstant(V); }); | 
 |     // TODO: Handle signed overflow. | 
 |   } | 
 |   // TODO: Handle oversized shifts. | 
 |   default: | 
 |     return false; | 
 |   } | 
 | } | 
 |  | 
 | SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, | 
 |                                              EVT VT, ArrayRef<SDValue> Ops) { | 
 |   // If the opcode is a target-specific ISD node, there's nothing we can | 
 |   // do here and the operand rules may not line up with the below, so | 
 |   // bail early. | 
 |   // We can't create a scalar CONCAT_VECTORS so skip it. It will break | 
 |   // for concats involving SPLAT_VECTOR. Concats of BUILD_VECTORS are handled by | 
 |   // foldCONCAT_VECTORS in getNode before this is called. | 
 |   if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::CONCAT_VECTORS) | 
 |     return SDValue(); | 
 |  | 
 |   unsigned NumOps = Ops.size(); | 
 |   if (NumOps == 0) | 
 |     return SDValue(); | 
 |  | 
 |   if (isUndef(Opcode, Ops)) | 
 |     return getUNDEF(VT); | 
 |  | 
 |   // Handle unary special cases. | 
 |   if (NumOps == 1) { | 
 |     SDValue N1 = Ops[0]; | 
 |  | 
 |     // Constant fold unary operations with an integer constant operand. Even | 
 |     // opaque constant will be folded, because the folding of unary operations | 
 |     // doesn't create new constants with different values. Nevertheless, the | 
 |     // opaque flag is preserved during folding to prevent future folding with | 
 |     // other constants. | 
 |     if (auto *C = dyn_cast<ConstantSDNode>(N1)) { | 
 |       const APInt &Val = C->getAPIntValue(); | 
 |       switch (Opcode) { | 
 |       case ISD::SIGN_EXTEND: | 
 |         return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, | 
 |                            C->isTargetOpcode(), C->isOpaque()); | 
 |       case ISD::TRUNCATE: | 
 |         if (C->isOpaque()) | 
 |           break; | 
 |         [[fallthrough]]; | 
 |       case ISD::ZERO_EXTEND: | 
 |         return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, | 
 |                            C->isTargetOpcode(), C->isOpaque()); | 
 |       case ISD::ANY_EXTEND: | 
 |         // Some targets like RISCV prefer to sign extend some types. | 
 |         if (TLI->isSExtCheaperThanZExt(N1.getValueType(), VT)) | 
 |           return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT, | 
 |                              C->isTargetOpcode(), C->isOpaque()); | 
 |         return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT, | 
 |                            C->isTargetOpcode(), C->isOpaque()); | 
 |       case ISD::ABS: | 
 |         return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(), | 
 |                            C->isOpaque()); | 
 |       case ISD::BITREVERSE: | 
 |         return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(), | 
 |                            C->isOpaque()); | 
 |       case ISD::BSWAP: | 
 |         return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(), | 
 |                            C->isOpaque()); | 
 |       case ISD::CTPOP: | 
 |         return getConstant(Val.popcount(), DL, VT, C->isTargetOpcode(), | 
 |                            C->isOpaque()); | 
 |       case ISD::CTLZ: | 
 |       case ISD::CTLZ_ZERO_UNDEF: | 
 |         return getConstant(Val.countl_zero(), DL, VT, C->isTargetOpcode(), | 
 |                            C->isOpaque()); | 
 |       case ISD::CTTZ: | 
 |       case ISD::CTTZ_ZERO_UNDEF: | 
 |         return getConstant(Val.countr_zero(), DL, VT, C->isTargetOpcode(), | 
 |                            C->isOpaque()); | 
 |       case ISD::UINT_TO_FP: | 
 |       case ISD::SINT_TO_FP: { | 
 |         APFloat apf(EVTToAPFloatSemantics(VT), | 
 |                     APInt::getZero(VT.getSizeInBits())); | 
 |         (void)apf.convertFromAPInt(Val, Opcode == ISD::SINT_TO_FP, | 
 |                                    APFloat::rmNearestTiesToEven); | 
 |         return getConstantFP(apf, DL, VT); | 
 |       } | 
 |       case ISD::FP16_TO_FP: | 
 |       case ISD::BF16_TO_FP: { | 
 |         bool Ignored; | 
 |         APFloat FPV(Opcode == ISD::FP16_TO_FP ? APFloat::IEEEhalf() | 
 |                                               : APFloat::BFloat(), | 
 |                     (Val.getBitWidth() == 16) ? Val : Val.trunc(16)); | 
 |  | 
 |         // This can return overflow, underflow, or inexact; we don't care. | 
 |         // FIXME need to be more flexible about rounding mode. | 
 |         (void)FPV.convert(EVTToAPFloatSemantics(VT), | 
 |                           APFloat::rmNearestTiesToEven, &Ignored); | 
 |         return getConstantFP(FPV, DL, VT); | 
 |       } | 
 |       case ISD::STEP_VECTOR: | 
 |         if (SDValue V = FoldSTEP_VECTOR(DL, VT, N1, *this)) | 
 |           return V; | 
 |         break; | 
 |       case ISD::BITCAST: | 
 |         if (VT == MVT::f16 && C->getValueType(0) == MVT::i16) | 
 |           return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT); | 
 |         if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) | 
 |           return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT); | 
 |         if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) | 
 |           return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT); | 
 |         if (VT == MVT::f128 && C->getValueType(0) == MVT::i128) | 
 |           return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT); | 
 |         break; | 
 |       } | 
 |     } | 
 |  | 
 |     // Constant fold unary operations with a floating point constant operand. | 
 |     if (auto *C = dyn_cast<ConstantFPSDNode>(N1)) { | 
 |       APFloat V = C->getValueAPF(); // make copy | 
 |       switch (Opcode) { | 
 |       case ISD::FNEG: | 
 |         V.changeSign(); | 
 |         return getConstantFP(V, DL, VT); | 
 |       case ISD::FABS: | 
 |         V.clearSign(); | 
 |         return getConstantFP(V, DL, VT); | 
 |       case ISD::FCEIL: { | 
 |         APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); | 
 |         if (fs == APFloat::opOK || fs == APFloat::opInexact) | 
 |           return getConstantFP(V, DL, VT); | 
 |         return SDValue(); | 
 |       } | 
 |       case ISD::FTRUNC: { | 
 |         APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); | 
 |         if (fs == APFloat::opOK || fs == APFloat::opInexact) | 
 |           return getConstantFP(V, DL, VT); | 
 |         return SDValue(); | 
 |       } | 
 |       case ISD::FFLOOR: { | 
 |         APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); | 
 |         if (fs == APFloat::opOK || fs == APFloat::opInexact) | 
 |           return getConstantFP(V, DL, VT); | 
 |         return SDValue(); | 
 |       } | 
 |       case ISD::FP_EXTEND: { | 
 |         bool ignored; | 
 |         // This can return overflow, underflow, or inexact; we don't care. | 
 |         // FIXME need to be more flexible about rounding mode. | 
 |         (void)V.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, | 
 |                         &ignored); | 
 |         return getConstantFP(V, DL, VT); | 
 |       } | 
 |       case ISD::FP_TO_SINT: | 
 |       case ISD::FP_TO_UINT: { | 
 |         bool ignored; | 
 |         APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT); | 
 |         // FIXME need to be more flexible about rounding mode. | 
 |         APFloat::opStatus s = | 
 |             V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored); | 
 |         if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual | 
 |           break; | 
 |         return getConstant(IntVal, DL, VT); | 
 |       } | 
 |       case ISD::FP_TO_FP16: | 
 |       case ISD::FP_TO_BF16: { | 
 |         bool Ignored; | 
 |         // This can return overflow, underflow, or inexact; we don't care. | 
 |         // FIXME need to be more flexible about rounding mode. | 
 |         (void)V.convert(Opcode == ISD::FP_TO_FP16 ? APFloat::IEEEhalf() | 
 |                                                   : APFloat::BFloat(), | 
 |                         APFloat::rmNearestTiesToEven, &Ignored); | 
 |         return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); | 
 |       } | 
 |       case ISD::BITCAST: | 
 |         if (VT == MVT::i16 && C->getValueType(0) == MVT::f16) | 
 |           return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, | 
 |                              VT); | 
 |         if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16) | 
 |           return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, | 
 |                              VT); | 
 |         if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) | 
 |           return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, | 
 |                              VT); | 
 |         if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) | 
 |           return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT); | 
 |         break; | 
 |       } | 
 |     } | 
 |  | 
 |     // Early-out if we failed to constant fold a bitcast. | 
 |     if (Opcode == ISD::BITCAST) | 
 |       return SDValue(); | 
 |   } | 
 |  | 
 |   // Handle binops special cases. | 
 |   if (NumOps == 2) { | 
 |     if (SDValue CFP = foldConstantFPMath(Opcode, DL, VT, Ops)) | 
 |       return CFP; | 
 |  | 
 |     if (auto *C1 = dyn_cast<ConstantSDNode>(Ops[0])) { | 
 |       if (auto *C2 = dyn_cast<ConstantSDNode>(Ops[1])) { | 
 |         if (C1->isOpaque() || C2->isOpaque()) | 
 |           return SDValue(); | 
 |  | 
 |         std::optional<APInt> FoldAttempt = | 
 |             FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()); | 
 |         if (!FoldAttempt) | 
 |           return SDValue(); | 
 |  | 
 |         SDValue Folded = getConstant(*FoldAttempt, DL, VT); | 
 |         assert((!Folded || !VT.isVector()) && | 
 |                "Can't fold vectors ops with scalar operands"); | 
 |         return Folded; | 
 |       } | 
 |     } | 
 |  | 
 |     // fold (add Sym, c) -> Sym+c | 
 |     if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ops[0])) | 
 |       return FoldSymbolOffset(Opcode, VT, GA, Ops[1].getNode()); | 
 |     if (TLI->isCommutativeBinOp(Opcode)) | 
 |       if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ops[1])) | 
 |         return FoldSymbolOffset(Opcode, VT, GA, Ops[0].getNode()); | 
 |   } | 
 |  | 
 |   // This is for vector folding only from here on. | 
 |   if (!VT.isVector()) | 
 |     return SDValue(); | 
 |  | 
 |   ElementCount NumElts = VT.getVectorElementCount(); | 
 |  | 
 |   // See if we can fold through bitcasted integer ops. | 
 |   if (NumOps == 2 && VT.isFixedLengthVector() && VT.isInteger() && | 
 |       Ops[0].getValueType() == VT && Ops[1].getValueType() == VT && | 
 |       Ops[0].getOpcode() == ISD::BITCAST && | 
 |       Ops[1].getOpcode() == ISD::BITCAST) { | 
 |     SDValue N1 = peekThroughBitcasts(Ops[0]); | 
 |     SDValue N2 = peekThroughBitcasts(Ops[1]); | 
 |     auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); | 
 |     auto *BV2 = dyn_cast<BuildVectorSDNode>(N2); | 
 |     EVT BVVT = N1.getValueType(); | 
 |     if (BV1 && BV2 && BVVT.isInteger() && BVVT == N2.getValueType()) { | 
 |       bool IsLE = getDataLayout().isLittleEndian(); | 
 |       unsigned EltBits = VT.getScalarSizeInBits(); | 
 |       SmallVector<APInt> RawBits1, RawBits2; | 
 |       BitVector UndefElts1, UndefElts2; | 
 |       if (BV1->getConstantRawBits(IsLE, EltBits, RawBits1, UndefElts1) && | 
 |           BV2->getConstantRawBits(IsLE, EltBits, RawBits2, UndefElts2)) { | 
 |         SmallVector<APInt> RawBits; | 
 |         for (unsigned I = 0, E = NumElts.getFixedValue(); I != E; ++I) { | 
 |           std::optional<APInt> Fold = FoldValueWithUndef( | 
 |               Opcode, RawBits1[I], UndefElts1[I], RawBits2[I], UndefElts2[I]); | 
 |           if (!Fold) | 
 |             break; | 
 |           RawBits.push_back(*Fold); | 
 |         } | 
 |         if (RawBits.size() == NumElts.getFixedValue()) { | 
 |           // We have constant folded, but we need to cast this again back to | 
 |           // the original (possibly legalized) type. | 
 |           SmallVector<APInt> DstBits; | 
 |           BitVector DstUndefs; | 
 |           BuildVectorSDNode::recastRawBits(IsLE, BVVT.getScalarSizeInBits(), | 
 |                                            DstBits, RawBits, DstUndefs, | 
 |                                            BitVector(RawBits.size(), false)); | 
 |           EVT BVEltVT = BV1->getOperand(0).getValueType(); | 
 |           unsigned BVEltBits = BVEltVT.getSizeInBits(); | 
 |           SmallVector<SDValue> Ops(DstBits.size(), getUNDEF(BVEltVT)); | 
 |           for (unsigned I = 0, E = DstBits.size(); I != E; ++I) { | 
 |             if (DstUndefs[I]) | 
 |               continue; | 
 |             Ops[I] = getConstant(DstBits[I].sext(BVEltBits), DL, BVEltVT); | 
 |           } | 
 |           return getBitcast(VT, getBuildVector(BVVT, DL, Ops)); | 
 |         } | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   // Fold (mul step_vector(C0), C1) to (step_vector(C0 * C1)). | 
 |   //      (shl step_vector(C0), C1) -> (step_vector(C0 << C1)) | 
 |   if ((Opcode == ISD::MUL || Opcode == ISD::SHL) && | 
 |       Ops[0].getOpcode() == ISD::STEP_VECTOR) { | 
 |     APInt RHSVal; | 
 |     if (ISD::isConstantSplatVector(Ops[1].getNode(), RHSVal)) { | 
 |       APInt NewStep = Opcode == ISD::MUL | 
 |                           ? Ops[0].getConstantOperandAPInt(0) * RHSVal | 
 |                           : Ops[0].getConstantOperandAPInt(0) << RHSVal; | 
 |       return getStepVector(DL, VT, NewStep); | 
 |     } | 
 |   } | 
 |  | 
 |   auto IsScalarOrSameVectorSize = [NumElts](const SDValue &Op) { | 
 |     return !Op.getValueType().isVector() || | 
 |            Op.getValueType().getVectorElementCount() == NumElts; | 
 |   }; | 
 |  | 
 |   auto IsBuildVectorSplatVectorOrUndef = [](const SDValue &Op) { | 
 |     return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE || | 
 |            Op.getOpcode() == ISD::BUILD_VECTOR || | 
 |            Op.getOpcode() == ISD::SPLAT_VECTOR; | 
 |   }; | 
 |  | 
 |   // All operands must be vector types with the same number of elements as | 
 |   // the result type and must be either UNDEF or a build/splat vector | 
 |   // or UNDEF scalars. | 
 |   if (!llvm::all_of(Ops, IsBuildVectorSplatVectorOrUndef) || | 
 |       !llvm::all_of(Ops, IsScalarOrSameVectorSize)) | 
 |     return SDValue(); | 
 |  | 
 |   // If we are comparing vectors, then the result needs to be a i1 boolean that | 
 |   // is then extended back to the legal result type depending on how booleans | 
 |   // are represented. | 
 |   EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType()); | 
 |   ISD::NodeType ExtendCode = | 
 |       (Opcode == ISD::SETCC && SVT != VT.getScalarType()) | 
 |           ? TargetLowering::getExtendForContent(TLI->getBooleanContents(VT)) | 
 |           : ISD::SIGN_EXTEND; | 
 |  | 
 |   // Find legal integer scalar type for constant promotion and | 
 |   // ensure that its scalar size is at least as large as source. | 
 |   EVT LegalSVT = VT.getScalarType(); | 
 |   if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { | 
 |     LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); | 
 |     if (LegalSVT.bitsLT(VT.getScalarType())) | 
 |       return SDValue(); | 
 |   } | 
 |  | 
 |   // For scalable vector types we know we're dealing with SPLAT_VECTORs. We | 
 |   // only have one operand to check. For fixed-length vector types we may have | 
 |   // a combination of BUILD_VECTOR and SPLAT_VECTOR. | 
 |   unsigned NumVectorElts = NumElts.isScalable() ? 1 : NumElts.getFixedValue(); | 
 |  | 
 |   // Constant fold each scalar lane separately. | 
 |   SmallVector<SDValue, 4> ScalarResults; | 
 |   for (unsigned I = 0; I != NumVectorElts; I++) { | 
 |     SmallVector<SDValue, 4> ScalarOps; | 
 |     for (SDValue Op : Ops) { | 
 |       EVT InSVT = Op.getValueType().getScalarType(); | 
 |       if (Op.getOpcode() != ISD::BUILD_VECTOR && | 
 |           Op.getOpcode() != ISD::SPLAT_VECTOR) { | 
 |         if (Op.isUndef()) | 
 |           ScalarOps.push_back(getUNDEF(InSVT)); | 
 |         else | 
 |           ScalarOps.push_back(Op); | 
 |         continue; | 
 |       } | 
 |  | 
 |       SDValue ScalarOp = | 
 |           Op.getOperand(Op.getOpcode() == ISD::SPLAT_VECTOR ? 0 : I); | 
 |       EVT ScalarVT = ScalarOp.getValueType(); | 
 |  | 
 |       // Build vector (integer) scalar operands may need implicit | 
 |       // truncation - do this before constant folding. | 
 |       if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT)) { | 
 |         // Don't create illegally-typed nodes unless they're constants or undef | 
 |         // - if we fail to constant fold we can't guarantee the (dead) nodes | 
 |         // we're creating will be cleaned up before being visited for | 
 |         // legalization. | 
 |         if (NewNodesMustHaveLegalTypes && !ScalarOp.isUndef() && | 
 |             !isa<ConstantSDNode>(ScalarOp) && | 
 |             TLI->getTypeAction(*getContext(), InSVT) != | 
 |                 TargetLowering::TypeLegal) | 
 |           return SDValue(); | 
 |         ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp); | 
 |       } | 
 |  | 
 |       ScalarOps.push_back(ScalarOp); | 
 |     } | 
 |  | 
 |     // Constant fold the scalar operands. | 
 |     SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps); | 
 |  | 
 |     // Legalize the (integer) scalar constant if necessary. | 
 |     if (LegalSVT != SVT) | 
 |       ScalarResult = getNode(ExtendCode, DL, LegalSVT, ScalarResult); | 
 |  | 
 |     // Scalar folding only succeeded if the result is a constant or UNDEF. | 
 |     if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant && | 
 |         ScalarResult.getOpcode() != ISD::ConstantFP) | 
 |       return SDValue(); | 
 |     ScalarResults.push_back(ScalarResult); | 
 |   } | 
 |  | 
 |   SDValue V = NumElts.isScalable() ? getSplatVector(VT, DL, ScalarResults[0]) | 
 |                                    : getBuildVector(VT, DL, ScalarResults); | 
 |   NewSDValueDbgMsg(V, "New node fold constant vector: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL, | 
 |                                          EVT VT, ArrayRef<SDValue> Ops) { | 
 |   // TODO: Add support for unary/ternary fp opcodes. | 
 |   if (Ops.size() != 2) | 
 |     return SDValue(); | 
 |  | 
 |   // TODO: We don't do any constant folding for strict FP opcodes here, but we | 
 |   //       should. That will require dealing with a potentially non-default | 
 |   //       rounding mode, checking the "opStatus" return value from the APFloat | 
 |   //       math calculations, and possibly other variations. | 
 |   SDValue N1 = Ops[0]; | 
 |   SDValue N2 = Ops[1]; | 
 |   ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1, /*AllowUndefs*/ false); | 
 |   ConstantFPSDNode *N2CFP = isConstOrConstSplatFP(N2, /*AllowUndefs*/ false); | 
 |   if (N1CFP && N2CFP) { | 
 |     APFloat C1 = N1CFP->getValueAPF(); // make copy | 
 |     const APFloat &C2 = N2CFP->getValueAPF(); | 
 |     switch (Opcode) { | 
 |     case ISD::FADD: | 
 |       C1.add(C2, APFloat::rmNearestTiesToEven); | 
 |       return getConstantFP(C1, DL, VT); | 
 |     case ISD::FSUB: | 
 |       C1.subtract(C2, APFloat::rmNearestTiesToEven); | 
 |       return getConstantFP(C1, DL, VT); | 
 |     case ISD::FMUL: | 
 |       C1.multiply(C2, APFloat::rmNearestTiesToEven); | 
 |       return getConstantFP(C1, DL, VT); | 
 |     case ISD::FDIV: | 
 |       C1.divide(C2, APFloat::rmNearestTiesToEven); | 
 |       return getConstantFP(C1, DL, VT); | 
 |     case ISD::FREM: | 
 |       C1.mod(C2); | 
 |       return getConstantFP(C1, DL, VT); | 
 |     case ISD::FCOPYSIGN: | 
 |       C1.copySign(C2); | 
 |       return getConstantFP(C1, DL, VT); | 
 |     case ISD::FMINNUM: | 
 |       return getConstantFP(minnum(C1, C2), DL, VT); | 
 |     case ISD::FMAXNUM: | 
 |       return getConstantFP(maxnum(C1, C2), DL, VT); | 
 |     case ISD::FMINIMUM: | 
 |       return getConstantFP(minimum(C1, C2), DL, VT); | 
 |     case ISD::FMAXIMUM: | 
 |       return getConstantFP(maximum(C1, C2), DL, VT); | 
 |     default: break; | 
 |     } | 
 |   } | 
 |   if (N1CFP && Opcode == ISD::FP_ROUND) { | 
 |     APFloat C1 = N1CFP->getValueAPF();    // make copy | 
 |     bool Unused; | 
 |     // This can return overflow, underflow, or inexact; we don't care. | 
 |     // FIXME need to be more flexible about rounding mode. | 
 |     (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven, | 
 |                       &Unused); | 
 |     return getConstantFP(C1, DL, VT); | 
 |   } | 
 |  | 
 |   switch (Opcode) { | 
 |   case ISD::FSUB: | 
 |     // -0.0 - undef --> undef (consistent with "fneg undef") | 
 |     if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1, /*AllowUndefs*/ true)) | 
 |       if (N1C && N1C->getValueAPF().isNegZero() && N2.isUndef()) | 
 |         return getUNDEF(VT); | 
 |     [[fallthrough]]; | 
 |  | 
 |   case ISD::FADD: | 
 |   case ISD::FMUL: | 
 |   case ISD::FDIV: | 
 |   case ISD::FREM: | 
 |     // If both operands are undef, the result is undef. If 1 operand is undef, | 
 |     // the result is NaN. This should match the behavior of the IR optimizer. | 
 |     if (N1.isUndef() && N2.isUndef()) | 
 |       return getUNDEF(VT); | 
 |     if (N1.isUndef() || N2.isUndef()) | 
 |       return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT); | 
 |   } | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) { | 
 |   assert(Val.getValueType().isInteger() && "Invalid AssertAlign!"); | 
 |  | 
 |   // There's no need to assert on a byte-aligned pointer. All pointers are at | 
 |   // least byte aligned. | 
 |   if (A == Align(1)) | 
 |     return Val; | 
 |  | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val}); | 
 |   ID.AddInteger(A.value()); | 
 |  | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(), | 
 |                                          Val.getValueType(), A); | 
 |   createOperands(N, {Val}); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |  | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               SDValue N1, SDValue N2) { | 
 |   SDNodeFlags Flags; | 
 |   if (Inserter) | 
 |     Flags = Inserter->getFlags(); | 
 |   return getNode(Opcode, DL, VT, N1, N2, Flags); | 
 | } | 
 |  | 
 | void SelectionDAG::canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1, | 
 |                                                 SDValue &N2) const { | 
 |   if (!TLI->isCommutativeBinOp(Opcode)) | 
 |     return; | 
 |  | 
 |   // Canonicalize: | 
 |   //   binop(const, nonconst) -> binop(nonconst, const) | 
 |   SDNode *N1C = isConstantIntBuildVectorOrConstantInt(N1); | 
 |   SDNode *N2C = isConstantIntBuildVectorOrConstantInt(N2); | 
 |   SDNode *N1CFP = isConstantFPBuildVectorOrConstantFP(N1); | 
 |   SDNode *N2CFP = isConstantFPBuildVectorOrConstantFP(N2); | 
 |   if ((N1C && !N2C) || (N1CFP && !N2CFP)) | 
 |     std::swap(N1, N2); | 
 |  | 
 |   // Canonicalize: | 
 |   //  binop(splat(x), step_vector) -> binop(step_vector, splat(x)) | 
 |   else if (N1.getOpcode() == ISD::SPLAT_VECTOR && | 
 |            N2.getOpcode() == ISD::STEP_VECTOR) | 
 |     std::swap(N1, N2); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               SDValue N1, SDValue N2, const SDNodeFlags Flags) { | 
 |   assert(N1.getOpcode() != ISD::DELETED_NODE && | 
 |          N2.getOpcode() != ISD::DELETED_NODE && | 
 |          "Operand is DELETED_NODE!"); | 
 |  | 
 |   canonicalizeCommutativeBinop(Opcode, N1, N2); | 
 |  | 
 |   auto *N1C = dyn_cast<ConstantSDNode>(N1); | 
 |   auto *N2C = dyn_cast<ConstantSDNode>(N2); | 
 |  | 
 |   // Don't allow undefs in vector splats - we might be returning N2 when folding | 
 |   // to zero etc. | 
 |   ConstantSDNode *N2CV = | 
 |       isConstOrConstSplat(N2, /*AllowUndefs*/ false, /*AllowTruncation*/ true); | 
 |  | 
 |   switch (Opcode) { | 
 |   default: break; | 
 |   case ISD::TokenFactor: | 
 |     assert(VT == MVT::Other && N1.getValueType() == MVT::Other && | 
 |            N2.getValueType() == MVT::Other && "Invalid token factor!"); | 
 |     // Fold trivial token factors. | 
 |     if (N1.getOpcode() == ISD::EntryToken) return N2; | 
 |     if (N2.getOpcode() == ISD::EntryToken) return N1; | 
 |     if (N1 == N2) return N1; | 
 |     break; | 
 |   case ISD::BUILD_VECTOR: { | 
 |     // Attempt to simplify BUILD_VECTOR. | 
 |     SDValue Ops[] = {N1, N2}; | 
 |     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) | 
 |       return V; | 
 |     break; | 
 |   } | 
 |   case ISD::CONCAT_VECTORS: { | 
 |     SDValue Ops[] = {N1, N2}; | 
 |     if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) | 
 |       return V; | 
 |     break; | 
 |   } | 
 |   case ISD::AND: | 
 |     assert(VT.isInteger() && "This operator does not apply to FP types!"); | 
 |     assert(N1.getValueType() == N2.getValueType() && | 
 |            N1.getValueType() == VT && "Binary operator types must match!"); | 
 |     // (X & 0) -> 0.  This commonly occurs when legalizing i64 values, so it's | 
 |     // worth handling here. | 
 |     if (N2CV && N2CV->isZero()) | 
 |       return N2; | 
 |     if (N2CV && N2CV->isAllOnes()) // X & -1 -> X | 
 |       return N1; | 
 |     break; | 
 |   case ISD::OR: | 
 |   case ISD::XOR: | 
 |   case ISD::ADD: | 
 |   case ISD::SUB: | 
 |     assert(VT.isInteger() && "This operator does not apply to FP types!"); | 
 |     assert(N1.getValueType() == N2.getValueType() && | 
 |            N1.getValueType() == VT && "Binary operator types must match!"); | 
 |     // (X ^|+- 0) -> X.  This commonly occurs when legalizing i64 values, so | 
 |     // it's worth handling here. | 
 |     if (N2CV && N2CV->isZero()) | 
 |       return N1; | 
 |     if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && VT.isVector() && | 
 |         VT.getVectorElementType() == MVT::i1) | 
 |       return getNode(ISD::XOR, DL, VT, N1, N2); | 
 |     break; | 
 |   case ISD::MUL: | 
 |     assert(VT.isInteger() && "This operator does not apply to FP types!"); | 
 |     assert(N1.getValueType() == N2.getValueType() && | 
 |            N1.getValueType() == VT && "Binary operator types must match!"); | 
 |     if (VT.isVector() && VT.getVectorElementType() == MVT::i1) | 
 |       return getNode(ISD::AND, DL, VT, N1, N2); | 
 |     if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { | 
 |       const APInt &MulImm = N1->getConstantOperandAPInt(0); | 
 |       const APInt &N2CImm = N2C->getAPIntValue(); | 
 |       return getVScale(DL, VT, MulImm * N2CImm); | 
 |     } | 
 |     break; | 
 |   case ISD::UDIV: | 
 |   case ISD::UREM: | 
 |   case ISD::MULHU: | 
 |   case ISD::MULHS: | 
 |   case ISD::SDIV: | 
 |   case ISD::SREM: | 
 |   case ISD::SADDSAT: | 
 |   case ISD::SSUBSAT: | 
 |   case ISD::UADDSAT: | 
 |   case ISD::USUBSAT: | 
 |     assert(VT.isInteger() && "This operator does not apply to FP types!"); | 
 |     assert(N1.getValueType() == N2.getValueType() && | 
 |            N1.getValueType() == VT && "Binary operator types must match!"); | 
 |     if (VT.isVector() && VT.getVectorElementType() == MVT::i1) { | 
 |       // fold (add_sat x, y) -> (or x, y) for bool types. | 
 |       if (Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT) | 
 |         return getNode(ISD::OR, DL, VT, N1, N2); | 
 |       // fold (sub_sat x, y) -> (and x, ~y) for bool types. | 
 |       if (Opcode == ISD::SSUBSAT || Opcode == ISD::USUBSAT) | 
 |         return getNode(ISD::AND, DL, VT, N1, getNOT(DL, N2, VT)); | 
 |     } | 
 |     break; | 
 |   case ISD::ABDS: | 
 |   case ISD::ABDU: | 
 |     assert(VT.isInteger() && "This operator does not apply to FP types!"); | 
 |     assert(N1.getValueType() == N2.getValueType() && | 
 |            N1.getValueType() == VT && "Binary operator types must match!"); | 
 |     break; | 
 |   case ISD::SMIN: | 
 |   case ISD::UMAX: | 
 |     assert(VT.isInteger() && "This operator does not apply to FP types!"); | 
 |     assert(N1.getValueType() == N2.getValueType() && | 
 |            N1.getValueType() == VT && "Binary operator types must match!"); | 
 |     if (VT.isVector() && VT.getVectorElementType() == MVT::i1) | 
 |       return getNode(ISD::OR, DL, VT, N1, N2); | 
 |     break; | 
 |   case ISD::SMAX: | 
 |   case ISD::UMIN: | 
 |     assert(VT.isInteger() && "This operator does not apply to FP types!"); | 
 |     assert(N1.getValueType() == N2.getValueType() && | 
 |            N1.getValueType() == VT && "Binary operator types must match!"); | 
 |     if (VT.isVector() && VT.getVectorElementType() == MVT::i1) | 
 |       return getNode(ISD::AND, DL, VT, N1, N2); | 
 |     break; | 
 |   case ISD::FADD: | 
 |   case ISD::FSUB: | 
 |   case ISD::FMUL: | 
 |   case ISD::FDIV: | 
 |   case ISD::FREM: | 
 |     assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); | 
 |     assert(N1.getValueType() == N2.getValueType() && | 
 |            N1.getValueType() == VT && "Binary operator types must match!"); | 
 |     if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags)) | 
 |       return V; | 
 |     break; | 
 |   case ISD::FCOPYSIGN:   // N1 and result must match.  N1/N2 need not match. | 
 |     assert(N1.getValueType() == VT && | 
 |            N1.getValueType().isFloatingPoint() && | 
 |            N2.getValueType().isFloatingPoint() && | 
 |            "Invalid FCOPYSIGN!"); | 
 |     break; | 
 |   case ISD::SHL: | 
 |     if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) { | 
 |       const APInt &MulImm = N1->getConstantOperandAPInt(0); | 
 |       const APInt &ShiftImm = N2C->getAPIntValue(); | 
 |       return getVScale(DL, VT, MulImm << ShiftImm); | 
 |     } | 
 |     [[fallthrough]]; | 
 |   case ISD::SRA: | 
 |   case ISD::SRL: | 
 |     if (SDValue V = simplifyShift(N1, N2)) | 
 |       return V; | 
 |     [[fallthrough]]; | 
 |   case ISD::ROTL: | 
 |   case ISD::ROTR: | 
 |     assert(VT == N1.getValueType() && | 
 |            "Shift operators return type must be the same as their first arg"); | 
 |     assert(VT.isInteger() && N2.getValueType().isInteger() && | 
 |            "Shifts only work on integers"); | 
 |     assert((!VT.isVector() || VT == N2.getValueType()) && | 
 |            "Vector shift amounts must be in the same as their first arg"); | 
 |     // Verify that the shift amount VT is big enough to hold valid shift | 
 |     // amounts.  This catches things like trying to shift an i1024 value by an | 
 |     // i8, which is easy to fall into in generic code that uses | 
 |     // TLI.getShiftAmount(). | 
 |     assert(N2.getValueType().getScalarSizeInBits() >= | 
 |                Log2_32_Ceil(VT.getScalarSizeInBits()) && | 
 |            "Invalid use of small shift amount with oversized value!"); | 
 |  | 
 |     // Always fold shifts of i1 values so the code generator doesn't need to | 
 |     // handle them.  Since we know the size of the shift has to be less than the | 
 |     // size of the value, the shift/rotate count is guaranteed to be zero. | 
 |     if (VT == MVT::i1) | 
 |       return N1; | 
 |     if (N2CV && N2CV->isZero()) | 
 |       return N1; | 
 |     break; | 
 |   case ISD::FP_ROUND: | 
 |     assert(VT.isFloatingPoint() && | 
 |            N1.getValueType().isFloatingPoint() && | 
 |            VT.bitsLE(N1.getValueType()) && | 
 |            N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) && | 
 |            "Invalid FP_ROUND!"); | 
 |     if (N1.getValueType() == VT) return N1;  // noop conversion. | 
 |     break; | 
 |   case ISD::AssertSext: | 
 |   case ISD::AssertZext: { | 
 |     EVT EVT = cast<VTSDNode>(N2)->getVT(); | 
 |     assert(VT == N1.getValueType() && "Not an inreg extend!"); | 
 |     assert(VT.isInteger() && EVT.isInteger() && | 
 |            "Cannot *_EXTEND_INREG FP types"); | 
 |     assert(!EVT.isVector() && | 
 |            "AssertSExt/AssertZExt type should be the vector element type " | 
 |            "rather than the vector type!"); | 
 |     assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!"); | 
 |     if (VT.getScalarType() == EVT) return N1; // noop assertion. | 
 |     break; | 
 |   } | 
 |   case ISD::SIGN_EXTEND_INREG: { | 
 |     EVT EVT = cast<VTSDNode>(N2)->getVT(); | 
 |     assert(VT == N1.getValueType() && "Not an inreg extend!"); | 
 |     assert(VT.isInteger() && EVT.isInteger() && | 
 |            "Cannot *_EXTEND_INREG FP types"); | 
 |     assert(EVT.isVector() == VT.isVector() && | 
 |            "SIGN_EXTEND_INREG type should be vector iff the operand " | 
 |            "type is vector!"); | 
 |     assert((!EVT.isVector() || | 
 |             EVT.getVectorElementCount() == VT.getVectorElementCount()) && | 
 |            "Vector element counts must match in SIGN_EXTEND_INREG"); | 
 |     assert(EVT.bitsLE(VT) && "Not extending!"); | 
 |     if (EVT == VT) return N1;  // Not actually extending | 
 |  | 
 |     auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) { | 
 |       unsigned FromBits = EVT.getScalarSizeInBits(); | 
 |       Val <<= Val.getBitWidth() - FromBits; | 
 |       Val.ashrInPlace(Val.getBitWidth() - FromBits); | 
 |       return getConstant(Val, DL, ConstantVT); | 
 |     }; | 
 |  | 
 |     if (N1C) { | 
 |       const APInt &Val = N1C->getAPIntValue(); | 
 |       return SignExtendInReg(Val, VT); | 
 |     } | 
 |  | 
 |     if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) { | 
 |       SmallVector<SDValue, 8> Ops; | 
 |       llvm::EVT OpVT = N1.getOperand(0).getValueType(); | 
 |       for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) { | 
 |         SDValue Op = N1.getOperand(i); | 
 |         if (Op.isUndef()) { | 
 |           Ops.push_back(getUNDEF(OpVT)); | 
 |           continue; | 
 |         } | 
 |         ConstantSDNode *C = cast<ConstantSDNode>(Op); | 
 |         APInt Val = C->getAPIntValue(); | 
 |         Ops.push_back(SignExtendInReg(Val, OpVT)); | 
 |       } | 
 |       return getBuildVector(VT, DL, Ops); | 
 |     } | 
 |  | 
 |     if (N1.getOpcode() == ISD::SPLAT_VECTOR && | 
 |         isa<ConstantSDNode>(N1.getOperand(0))) | 
 |       return getNode( | 
 |           ISD::SPLAT_VECTOR, DL, VT, | 
 |           SignExtendInReg(N1.getConstantOperandAPInt(0), | 
 |                           N1.getOperand(0).getValueType())); | 
 |     break; | 
 |   } | 
 |   case ISD::FP_TO_SINT_SAT: | 
 |   case ISD::FP_TO_UINT_SAT: { | 
 |     assert(VT.isInteger() && cast<VTSDNode>(N2)->getVT().isInteger() && | 
 |            N1.getValueType().isFloatingPoint() && "Invalid FP_TO_*INT_SAT"); | 
 |     assert(N1.getValueType().isVector() == VT.isVector() && | 
 |            "FP_TO_*INT_SAT type should be vector iff the operand type is " | 
 |            "vector!"); | 
 |     assert((!VT.isVector() || VT.getVectorElementCount() == | 
 |                                   N1.getValueType().getVectorElementCount()) && | 
 |            "Vector element counts must match in FP_TO_*INT_SAT"); | 
 |     assert(!cast<VTSDNode>(N2)->getVT().isVector() && | 
 |            "Type to saturate to must be a scalar."); | 
 |     assert(cast<VTSDNode>(N2)->getVT().bitsLE(VT.getScalarType()) && | 
 |            "Not extending!"); | 
 |     break; | 
 |   } | 
 |   case ISD::EXTRACT_VECTOR_ELT: | 
 |     assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() && | 
 |            "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \ | 
 |              element type of the vector."); | 
 |  | 
 |     // Extract from an undefined value or using an undefined index is undefined. | 
 |     if (N1.isUndef() || N2.isUndef()) | 
 |       return getUNDEF(VT); | 
 |  | 
 |     // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length | 
 |     // vectors. For scalable vectors we will provide appropriate support for | 
 |     // dealing with arbitrary indices. | 
 |     if (N2C && N1.getValueType().isFixedLengthVector() && | 
 |         N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements())) | 
 |       return getUNDEF(VT); | 
 |  | 
 |     // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is | 
 |     // expanding copies of large vectors from registers. This only works for | 
 |     // fixed length vectors, since we need to know the exact number of | 
 |     // elements. | 
 |     if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS && | 
 |         N1.getOperand(0).getValueType().isFixedLengthVector()) { | 
 |       unsigned Factor = | 
 |         N1.getOperand(0).getValueType().getVectorNumElements(); | 
 |       return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, | 
 |                      N1.getOperand(N2C->getZExtValue() / Factor), | 
 |                      getVectorIdxConstant(N2C->getZExtValue() % Factor, DL)); | 
 |     } | 
 |  | 
 |     // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while | 
 |     // lowering is expanding large vector constants. | 
 |     if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR || | 
 |                 N1.getOpcode() == ISD::SPLAT_VECTOR)) { | 
 |       assert((N1.getOpcode() != ISD::BUILD_VECTOR || | 
 |               N1.getValueType().isFixedLengthVector()) && | 
 |              "BUILD_VECTOR used for scalable vectors"); | 
 |       unsigned Index = | 
 |           N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0; | 
 |       SDValue Elt = N1.getOperand(Index); | 
 |  | 
 |       if (VT != Elt.getValueType()) | 
 |         // If the vector element type is not legal, the BUILD_VECTOR operands | 
 |         // are promoted and implicitly truncated, and the result implicitly | 
 |         // extended. Make that explicit here. | 
 |         Elt = getAnyExtOrTrunc(Elt, DL, VT); | 
 |  | 
 |       return Elt; | 
 |     } | 
 |  | 
 |     // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector | 
 |     // operations are lowered to scalars. | 
 |     if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { | 
 |       // If the indices are the same, return the inserted element else | 
 |       // if the indices are known different, extract the element from | 
 |       // the original vector. | 
 |       SDValue N1Op2 = N1.getOperand(2); | 
 |       ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2); | 
 |  | 
 |       if (N1Op2C && N2C) { | 
 |         if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { | 
 |           if (VT == N1.getOperand(1).getValueType()) | 
 |             return N1.getOperand(1); | 
 |           if (VT.isFloatingPoint()) { | 
 |             assert(VT.getSizeInBits() > N1.getOperand(1).getValueType().getSizeInBits()); | 
 |             return getFPExtendOrRound(N1.getOperand(1), DL, VT); | 
 |           } | 
 |           return getSExtOrTrunc(N1.getOperand(1), DL, VT); | 
 |         } | 
 |         return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); | 
 |       } | 
 |     } | 
 |  | 
 |     // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed | 
 |     // when vector types are scalarized and v1iX is legal. | 
 |     // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx). | 
 |     // Here we are completely ignoring the extract element index (N2), | 
 |     // which is fine for fixed width vectors, since any index other than 0 | 
 |     // is undefined anyway. However, this cannot be ignored for scalable | 
 |     // vectors - in theory we could support this, but we don't want to do this | 
 |     // without a profitability check. | 
 |     if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && | 
 |         N1.getValueType().isFixedLengthVector() && | 
 |         N1.getValueType().getVectorNumElements() == 1) { | 
 |       return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), | 
 |                      N1.getOperand(1)); | 
 |     } | 
 |     break; | 
 |   case ISD::EXTRACT_ELEMENT: | 
 |     assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); | 
 |     assert(!N1.getValueType().isVector() && !VT.isVector() && | 
 |            (N1.getValueType().isInteger() == VT.isInteger()) && | 
 |            N1.getValueType() != VT && | 
 |            "Wrong types for EXTRACT_ELEMENT!"); | 
 |  | 
 |     // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding | 
 |     // 64-bit integers into 32-bit parts.  Instead of building the extract of | 
 |     // the BUILD_PAIR, only to have legalize rip it apart, just do it now. | 
 |     if (N1.getOpcode() == ISD::BUILD_PAIR) | 
 |       return N1.getOperand(N2C->getZExtValue()); | 
 |  | 
 |     // EXTRACT_ELEMENT of a constant int is also very common. | 
 |     if (N1C) { | 
 |       unsigned ElementSize = VT.getSizeInBits(); | 
 |       unsigned Shift = ElementSize * N2C->getZExtValue(); | 
 |       const APInt &Val = N1C->getAPIntValue(); | 
 |       return getConstant(Val.extractBits(ElementSize, Shift), DL, VT); | 
 |     } | 
 |     break; | 
 |   case ISD::EXTRACT_SUBVECTOR: { | 
 |     EVT N1VT = N1.getValueType(); | 
 |     assert(VT.isVector() && N1VT.isVector() && | 
 |            "Extract subvector VTs must be vectors!"); | 
 |     assert(VT.getVectorElementType() == N1VT.getVectorElementType() && | 
 |            "Extract subvector VTs must have the same element type!"); | 
 |     assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) && | 
 |            "Cannot extract a scalable vector from a fixed length vector!"); | 
 |     assert((VT.isScalableVector() != N1VT.isScalableVector() || | 
 |             VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) && | 
 |            "Extract subvector must be from larger vector to smaller vector!"); | 
 |     assert(N2C && "Extract subvector index must be a constant"); | 
 |     assert((VT.isScalableVector() != N1VT.isScalableVector() || | 
 |             (VT.getVectorMinNumElements() + N2C->getZExtValue()) <= | 
 |                 N1VT.getVectorMinNumElements()) && | 
 |            "Extract subvector overflow!"); | 
 |     assert(N2C->getAPIntValue().getBitWidth() == | 
 |                TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() && | 
 |            "Constant index for EXTRACT_SUBVECTOR has an invalid size"); | 
 |  | 
 |     // Trivial extraction. | 
 |     if (VT == N1VT) | 
 |       return N1; | 
 |  | 
 |     // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF. | 
 |     if (N1.isUndef()) | 
 |       return getUNDEF(VT); | 
 |  | 
 |     // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of | 
 |     // the concat have the same type as the extract. | 
 |     if (N1.getOpcode() == ISD::CONCAT_VECTORS && | 
 |         VT == N1.getOperand(0).getValueType()) { | 
 |       unsigned Factor = VT.getVectorMinNumElements(); | 
 |       return N1.getOperand(N2C->getZExtValue() / Factor); | 
 |     } | 
 |  | 
 |     // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created | 
 |     // during shuffle legalization. | 
 |     if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) && | 
 |         VT == N1.getOperand(1).getValueType()) | 
 |       return N1.getOperand(1); | 
 |     break; | 
 |   } | 
 |   } | 
 |  | 
 |   // Perform trivial constant folding. | 
 |   if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2})) | 
 |     return SV; | 
 |  | 
 |   // Canonicalize an UNDEF to the RHS, even over a constant. | 
 |   if (N1.isUndef()) { | 
 |     if (TLI->isCommutativeBinOp(Opcode)) { | 
 |       std::swap(N1, N2); | 
 |     } else { | 
 |       switch (Opcode) { | 
 |       case ISD::SUB: | 
 |         return getUNDEF(VT);     // fold op(undef, arg2) -> undef | 
 |       case ISD::SIGN_EXTEND_INREG: | 
 |       case ISD::UDIV: | 
 |       case ISD::SDIV: | 
 |       case ISD::UREM: | 
 |       case ISD::SREM: | 
 |       case ISD::SSUBSAT: | 
 |       case ISD::USUBSAT: | 
 |         return getConstant(0, DL, VT);    // fold op(undef, arg2) -> 0 | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   // Fold a bunch of operators when the RHS is undef. | 
 |   if (N2.isUndef()) { | 
 |     switch (Opcode) { | 
 |     case ISD::XOR: | 
 |       if (N1.isUndef()) | 
 |         // Handle undef ^ undef -> 0 special case. This is a common | 
 |         // idiom (misuse). | 
 |         return getConstant(0, DL, VT); | 
 |       [[fallthrough]]; | 
 |     case ISD::ADD: | 
 |     case ISD::SUB: | 
 |     case ISD::UDIV: | 
 |     case ISD::SDIV: | 
 |     case ISD::UREM: | 
 |     case ISD::SREM: | 
 |       return getUNDEF(VT);       // fold op(arg1, undef) -> undef | 
 |     case ISD::MUL: | 
 |     case ISD::AND: | 
 |     case ISD::SSUBSAT: | 
 |     case ISD::USUBSAT: | 
 |       return getConstant(0, DL, VT);  // fold op(arg1, undef) -> 0 | 
 |     case ISD::OR: | 
 |     case ISD::SADDSAT: | 
 |     case ISD::UADDSAT: | 
 |       return getAllOnesConstant(DL, VT); | 
 |     } | 
 |   } | 
 |  | 
 |   // Memoize this node if possible. | 
 |   SDNode *N; | 
 |   SDVTList VTs = getVTList(VT); | 
 |   SDValue Ops[] = {N1, N2}; | 
 |   if (VT != MVT::Glue) { | 
 |     FoldingSetNodeID ID; | 
 |     AddNodeIDNode(ID, Opcode, VTs, Ops); | 
 |     void *IP = nullptr; | 
 |     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { | 
 |       E->intersectFlagsWith(Flags); | 
 |       return SDValue(E, 0); | 
 |     } | 
 |  | 
 |     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); | 
 |     N->setFlags(Flags); | 
 |     createOperands(N, Ops); | 
 |     CSEMap.InsertNode(N, IP); | 
 |   } else { | 
 |     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); | 
 |     createOperands(N, Ops); | 
 |   } | 
 |  | 
 |   InsertNode(N); | 
 |   SDValue V = SDValue(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               SDValue N1, SDValue N2, SDValue N3) { | 
 |   SDNodeFlags Flags; | 
 |   if (Inserter) | 
 |     Flags = Inserter->getFlags(); | 
 |   return getNode(Opcode, DL, VT, N1, N2, N3, Flags); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               SDValue N1, SDValue N2, SDValue N3, | 
 |                               const SDNodeFlags Flags) { | 
 |   assert(N1.getOpcode() != ISD::DELETED_NODE && | 
 |          N2.getOpcode() != ISD::DELETED_NODE && | 
 |          N3.getOpcode() != ISD::DELETED_NODE && | 
 |          "Operand is DELETED_NODE!"); | 
 |   // Perform various simplifications. | 
 |   switch (Opcode) { | 
 |   case ISD::FMA: | 
 |   case ISD::FMAD: { | 
 |     assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); | 
 |     assert(N1.getValueType() == VT && N2.getValueType() == VT && | 
 |            N3.getValueType() == VT && "FMA types must match!"); | 
 |     ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); | 
 |     ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); | 
 |     ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); | 
 |     if (N1CFP && N2CFP && N3CFP) { | 
 |       APFloat  V1 = N1CFP->getValueAPF(); | 
 |       const APFloat &V2 = N2CFP->getValueAPF(); | 
 |       const APFloat &V3 = N3CFP->getValueAPF(); | 
 |       if (Opcode == ISD::FMAD) { | 
 |         V1.multiply(V2, APFloat::rmNearestTiesToEven); | 
 |         V1.add(V3, APFloat::rmNearestTiesToEven); | 
 |       } else | 
 |         V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); | 
 |       return getConstantFP(V1, DL, VT); | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::BUILD_VECTOR: { | 
 |     // Attempt to simplify BUILD_VECTOR. | 
 |     SDValue Ops[] = {N1, N2, N3}; | 
 |     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) | 
 |       return V; | 
 |     break; | 
 |   } | 
 |   case ISD::CONCAT_VECTORS: { | 
 |     SDValue Ops[] = {N1, N2, N3}; | 
 |     if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) | 
 |       return V; | 
 |     break; | 
 |   } | 
 |   case ISD::SETCC: { | 
 |     assert(VT.isInteger() && "SETCC result type must be an integer!"); | 
 |     assert(N1.getValueType() == N2.getValueType() && | 
 |            "SETCC operands must have the same type!"); | 
 |     assert(VT.isVector() == N1.getValueType().isVector() && | 
 |            "SETCC type should be vector iff the operand type is vector!"); | 
 |     assert((!VT.isVector() || VT.getVectorElementCount() == | 
 |                                   N1.getValueType().getVectorElementCount()) && | 
 |            "SETCC vector element counts must match!"); | 
 |     // Use FoldSetCC to simplify SETCC's. | 
 |     if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL)) | 
 |       return V; | 
 |     // Vector constant folding. | 
 |     SDValue Ops[] = {N1, N2, N3}; | 
 |     if (SDValue V = FoldConstantArithmetic(Opcode, DL, VT, Ops)) { | 
 |       NewSDValueDbgMsg(V, "New node vector constant folding: ", this); | 
 |       return V; | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::SELECT: | 
 |   case ISD::VSELECT: | 
 |     if (SDValue V = simplifySelect(N1, N2, N3)) | 
 |       return V; | 
 |     break; | 
 |   case ISD::VECTOR_SHUFFLE: | 
 |     llvm_unreachable("should use getVectorShuffle constructor!"); | 
 |   case ISD::VECTOR_SPLICE: { | 
 |     if (cast<ConstantSDNode>(N3)->isZero()) | 
 |       return N1; | 
 |     break; | 
 |   } | 
 |   case ISD::INSERT_VECTOR_ELT: { | 
 |     ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3); | 
 |     // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except | 
 |     // for scalable vectors where we will generate appropriate code to | 
 |     // deal with out-of-bounds cases correctly. | 
 |     if (N3C && N1.getValueType().isFixedLengthVector() && | 
 |         N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) | 
 |       return getUNDEF(VT); | 
 |  | 
 |     // Undefined index can be assumed out-of-bounds, so that's UNDEF too. | 
 |     if (N3.isUndef()) | 
 |       return getUNDEF(VT); | 
 |  | 
 |     // If the inserted element is an UNDEF, just use the input vector. | 
 |     if (N2.isUndef()) | 
 |       return N1; | 
 |  | 
 |     break; | 
 |   } | 
 |   case ISD::INSERT_SUBVECTOR: { | 
 |     // Inserting undef into undef is still undef. | 
 |     if (N1.isUndef() && N2.isUndef()) | 
 |       return getUNDEF(VT); | 
 |  | 
 |     EVT N2VT = N2.getValueType(); | 
 |     assert(VT == N1.getValueType() && | 
 |            "Dest and insert subvector source types must match!"); | 
 |     assert(VT.isVector() && N2VT.isVector() && | 
 |            "Insert subvector VTs must be vectors!"); | 
 |     assert(VT.getVectorElementType() == N2VT.getVectorElementType() && | 
 |            "Insert subvector VTs must have the same element type!"); | 
 |     assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) && | 
 |            "Cannot insert a scalable vector into a fixed length vector!"); | 
 |     assert((VT.isScalableVector() != N2VT.isScalableVector() || | 
 |             VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) && | 
 |            "Insert subvector must be from smaller vector to larger vector!"); | 
 |     assert(isa<ConstantSDNode>(N3) && | 
 |            "Insert subvector index must be constant"); | 
 |     assert((VT.isScalableVector() != N2VT.isScalableVector() || | 
 |             (N2VT.getVectorMinNumElements() + | 
 |              cast<ConstantSDNode>(N3)->getZExtValue()) <= | 
 |                 VT.getVectorMinNumElements()) && | 
 |            "Insert subvector overflow!"); | 
 |     assert(cast<ConstantSDNode>(N3)->getAPIntValue().getBitWidth() == | 
 |                TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() && | 
 |            "Constant index for INSERT_SUBVECTOR has an invalid size"); | 
 |  | 
 |     // Trivial insertion. | 
 |     if (VT == N2VT) | 
 |       return N2; | 
 |  | 
 |     // If this is an insert of an extracted vector into an undef vector, we | 
 |     // can just use the input to the extract. | 
 |     if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR && | 
 |         N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT) | 
 |       return N2.getOperand(0); | 
 |     break; | 
 |   } | 
 |   case ISD::BITCAST: | 
 |     // Fold bit_convert nodes from a type to themselves. | 
 |     if (N1.getValueType() == VT) | 
 |       return N1; | 
 |     break; | 
 |   case ISD::VP_TRUNCATE: | 
 |   case ISD::VP_SIGN_EXTEND: | 
 |   case ISD::VP_ZERO_EXTEND: | 
 |     // Don't create noop casts. | 
 |     if (N1.getValueType() == VT) | 
 |       return N1; | 
 |     break; | 
 |   } | 
 |  | 
 |   // Memoize node if it doesn't produce a glue result. | 
 |   SDNode *N; | 
 |   SDVTList VTs = getVTList(VT); | 
 |   SDValue Ops[] = {N1, N2, N3}; | 
 |   if (VT != MVT::Glue) { | 
 |     FoldingSetNodeID ID; | 
 |     AddNodeIDNode(ID, Opcode, VTs, Ops); | 
 |     void *IP = nullptr; | 
 |     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { | 
 |       E->intersectFlagsWith(Flags); | 
 |       return SDValue(E, 0); | 
 |     } | 
 |  | 
 |     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); | 
 |     N->setFlags(Flags); | 
 |     createOperands(N, Ops); | 
 |     CSEMap.InsertNode(N, IP); | 
 |   } else { | 
 |     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); | 
 |     createOperands(N, Ops); | 
 |   } | 
 |  | 
 |   InsertNode(N); | 
 |   SDValue V = SDValue(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               SDValue N1, SDValue N2, SDValue N3, SDValue N4) { | 
 |   SDValue Ops[] = { N1, N2, N3, N4 }; | 
 |   return getNode(Opcode, DL, VT, Ops); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               SDValue N1, SDValue N2, SDValue N3, SDValue N4, | 
 |                               SDValue N5) { | 
 |   SDValue Ops[] = { N1, N2, N3, N4, N5 }; | 
 |   return getNode(Opcode, DL, VT, Ops); | 
 | } | 
 |  | 
 | /// getStackArgumentTokenFactor - Compute a TokenFactor to force all | 
 | /// the incoming stack arguments to be loaded from the stack. | 
 | SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { | 
 |   SmallVector<SDValue, 8> ArgChains; | 
 |  | 
 |   // Include the original chain at the beginning of the list. When this is | 
 |   // used by target LowerCall hooks, this helps legalize find the | 
 |   // CALLSEQ_BEGIN node. | 
 |   ArgChains.push_back(Chain); | 
 |  | 
 |   // Add a chain value for each stack argument. | 
 |   for (SDNode *U : getEntryNode().getNode()->uses()) | 
 |     if (LoadSDNode *L = dyn_cast<LoadSDNode>(U)) | 
 |       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) | 
 |         if (FI->getIndex() < 0) | 
 |           ArgChains.push_back(SDValue(L, 1)); | 
 |  | 
 |   // Build a tokenfactor for all the chains. | 
 |   return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); | 
 | } | 
 |  | 
 | /// getMemsetValue - Vectorized representation of the memset value | 
 | /// operand. | 
 | static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, | 
 |                               const SDLoc &dl) { | 
 |   assert(!Value.isUndef()); | 
 |  | 
 |   unsigned NumBits = VT.getScalarSizeInBits(); | 
 |   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { | 
 |     assert(C->getAPIntValue().getBitWidth() == 8); | 
 |     APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); | 
 |     if (VT.isInteger()) { | 
 |       bool IsOpaque = VT.getSizeInBits() > 64 || | 
 |           !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue()); | 
 |       return DAG.getConstant(Val, dl, VT, false, IsOpaque); | 
 |     } | 
 |     return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl, | 
 |                              VT); | 
 |   } | 
 |  | 
 |   assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?"); | 
 |   EVT IntVT = VT.getScalarType(); | 
 |   if (!IntVT.isInteger()) | 
 |     IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits()); | 
 |  | 
 |   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value); | 
 |   if (NumBits > 8) { | 
 |     // Use a multiplication with 0x010101... to extend the input to the | 
 |     // required length. | 
 |     APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); | 
 |     Value = DAG.getNode(ISD::MUL, dl, IntVT, Value, | 
 |                         DAG.getConstant(Magic, dl, IntVT)); | 
 |   } | 
 |  | 
 |   if (VT != Value.getValueType() && !VT.isInteger()) | 
 |     Value = DAG.getBitcast(VT.getScalarType(), Value); | 
 |   if (VT != Value.getValueType()) | 
 |     Value = DAG.getSplatBuildVector(VT, dl, Value); | 
 |  | 
 |   return Value; | 
 | } | 
 |  | 
 | /// getMemsetStringVal - Similar to getMemsetValue. Except this is only | 
 | /// used when a memcpy is turned into a memset when the source is a constant | 
 | /// string ptr. | 
 | static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG, | 
 |                                   const TargetLowering &TLI, | 
 |                                   const ConstantDataArraySlice &Slice) { | 
 |   // Handle vector with all elements zero. | 
 |   if (Slice.Array == nullptr) { | 
 |     if (VT.isInteger()) | 
 |       return DAG.getConstant(0, dl, VT); | 
 |     if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128) | 
 |       return DAG.getConstantFP(0.0, dl, VT); | 
 |     if (VT.isVector()) { | 
 |       unsigned NumElts = VT.getVectorNumElements(); | 
 |       MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; | 
 |       return DAG.getNode(ISD::BITCAST, dl, VT, | 
 |                          DAG.getConstant(0, dl, | 
 |                                          EVT::getVectorVT(*DAG.getContext(), | 
 |                                                           EltVT, NumElts))); | 
 |     } | 
 |     llvm_unreachable("Expected type!"); | 
 |   } | 
 |  | 
 |   assert(!VT.isVector() && "Can't handle vector type here!"); | 
 |   unsigned NumVTBits = VT.getSizeInBits(); | 
 |   unsigned NumVTBytes = NumVTBits / 8; | 
 |   unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length)); | 
 |  | 
 |   APInt Val(NumVTBits, 0); | 
 |   if (DAG.getDataLayout().isLittleEndian()) { | 
 |     for (unsigned i = 0; i != NumBytes; ++i) | 
 |       Val |= (uint64_t)(unsigned char)Slice[i] << i*8; | 
 |   } else { | 
 |     for (unsigned i = 0; i != NumBytes; ++i) | 
 |       Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8; | 
 |   } | 
 |  | 
 |   // If the "cost" of materializing the integer immediate is less than the cost | 
 |   // of a load, then it is cost effective to turn the load into the immediate. | 
 |   Type *Ty = VT.getTypeForEVT(*DAG.getContext()); | 
 |   if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty)) | 
 |     return DAG.getConstant(Val, dl, VT); | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset, | 
 |                                            const SDLoc &DL, | 
 |                                            const SDNodeFlags Flags) { | 
 |   EVT VT = Base.getValueType(); | 
 |   SDValue Index; | 
 |  | 
 |   if (Offset.isScalable()) | 
 |     Index = getVScale(DL, Base.getValueType(), | 
 |                       APInt(Base.getValueSizeInBits().getFixedValue(), | 
 |                             Offset.getKnownMinValue())); | 
 |   else | 
 |     Index = getConstant(Offset.getFixedValue(), DL, VT); | 
 |  | 
 |   return getMemBasePlusOffset(Base, Index, DL, Flags); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset, | 
 |                                            const SDLoc &DL, | 
 |                                            const SDNodeFlags Flags) { | 
 |   assert(Offset.getValueType().isInteger()); | 
 |   EVT BasePtrVT = Ptr.getValueType(); | 
 |   return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags); | 
 | } | 
 |  | 
 | /// Returns true if memcpy source is constant data. | 
 | static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) { | 
 |   uint64_t SrcDelta = 0; | 
 |   GlobalAddressSDNode *G = nullptr; | 
 |   if (Src.getOpcode() == ISD::GlobalAddress) | 
 |     G = cast<GlobalAddressSDNode>(Src); | 
 |   else if (Src.getOpcode() == ISD::ADD && | 
 |            Src.getOperand(0).getOpcode() == ISD::GlobalAddress && | 
 |            Src.getOperand(1).getOpcode() == ISD::Constant) { | 
 |     G = cast<GlobalAddressSDNode>(Src.getOperand(0)); | 
 |     SrcDelta = Src.getConstantOperandVal(1); | 
 |   } | 
 |   if (!G) | 
 |     return false; | 
 |  | 
 |   return getConstantDataArrayInfo(G->getGlobal(), Slice, 8, | 
 |                                   SrcDelta + G->getOffset()); | 
 | } | 
 |  | 
 | static bool shouldLowerMemFuncForSize(const MachineFunction &MF, | 
 |                                       SelectionDAG &DAG) { | 
 |   // On Darwin, -Os means optimize for size without hurting performance, so | 
 |   // only really optimize for size when -Oz (MinSize) is used. | 
 |   if (MF.getTarget().getTargetTriple().isOSDarwin()) | 
 |     return MF.getFunction().hasMinSize(); | 
 |   return DAG.shouldOptForSize(); | 
 | } | 
 |  | 
 | static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, | 
 |                           SmallVector<SDValue, 32> &OutChains, unsigned From, | 
 |                           unsigned To, SmallVector<SDValue, 16> &OutLoadChains, | 
 |                           SmallVector<SDValue, 16> &OutStoreChains) { | 
 |   assert(OutLoadChains.size() && "Missing loads in memcpy inlining"); | 
 |   assert(OutStoreChains.size() && "Missing stores in memcpy inlining"); | 
 |   SmallVector<SDValue, 16> GluedLoadChains; | 
 |   for (unsigned i = From; i < To; ++i) { | 
 |     OutChains.push_back(OutLoadChains[i]); | 
 |     GluedLoadChains.push_back(OutLoadChains[i]); | 
 |   } | 
 |  | 
 |   // Chain for all loads. | 
 |   SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, | 
 |                                   GluedLoadChains); | 
 |  | 
 |   for (unsigned i = From; i < To; ++i) { | 
 |     StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]); | 
 |     SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(), | 
 |                                   ST->getBasePtr(), ST->getMemoryVT(), | 
 |                                   ST->getMemOperand()); | 
 |     OutChains.push_back(NewStore); | 
 |   } | 
 | } | 
 |  | 
 | static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, | 
 |                                        SDValue Chain, SDValue Dst, SDValue Src, | 
 |                                        uint64_t Size, Align Alignment, | 
 |                                        bool isVol, bool AlwaysInline, | 
 |                                        MachinePointerInfo DstPtrInfo, | 
 |                                        MachinePointerInfo SrcPtrInfo, | 
 |                                        const AAMDNodes &AAInfo, AAResults *AA) { | 
 |   // Turn a memcpy of undef to nop. | 
 |   // FIXME: We need to honor volatile even is Src is undef. | 
 |   if (Src.isUndef()) | 
 |     return Chain; | 
 |  | 
 |   // Expand memcpy to a series of load and store ops if the size operand falls | 
 |   // below a certain threshold. | 
 |   // TODO: In the AlwaysInline case, if the size is big then generate a loop | 
 |   // rather than maybe a humongous number of loads and stores. | 
 |   const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | 
 |   const DataLayout &DL = DAG.getDataLayout(); | 
 |   LLVMContext &C = *DAG.getContext(); | 
 |   std::vector<EVT> MemOps; | 
 |   bool DstAlignCanChange = false; | 
 |   MachineFunction &MF = DAG.getMachineFunction(); | 
 |   MachineFrameInfo &MFI = MF.getFrameInfo(); | 
 |   bool OptSize = shouldLowerMemFuncForSize(MF, DAG); | 
 |   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); | 
 |   if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) | 
 |     DstAlignCanChange = true; | 
 |   MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); | 
 |   if (!SrcAlign || Alignment > *SrcAlign) | 
 |     SrcAlign = Alignment; | 
 |   assert(SrcAlign && "SrcAlign must be set"); | 
 |   ConstantDataArraySlice Slice; | 
 |   // If marked as volatile, perform a copy even when marked as constant. | 
 |   bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice); | 
 |   bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; | 
 |   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); | 
 |   const MemOp Op = isZeroConstant | 
 |                        ? MemOp::Set(Size, DstAlignCanChange, Alignment, | 
 |                                     /*IsZeroMemset*/ true, isVol) | 
 |                        : MemOp::Copy(Size, DstAlignCanChange, Alignment, | 
 |                                      *SrcAlign, isVol, CopyFromConstant); | 
 |   if (!TLI.findOptimalMemOpLowering( | 
 |           MemOps, Limit, Op, DstPtrInfo.getAddrSpace(), | 
 |           SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) | 
 |     return SDValue(); | 
 |  | 
 |   if (DstAlignCanChange) { | 
 |     Type *Ty = MemOps[0].getTypeForEVT(C); | 
 |     Align NewAlign = DL.getABITypeAlign(Ty); | 
 |  | 
 |     // Don't promote to an alignment that would require dynamic stack | 
 |     // realignment which may conflict with optimizations such as tail call | 
 |     // optimization. | 
 |     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); | 
 |     if (!TRI->hasStackRealignment(MF)) | 
 |       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) | 
 |         NewAlign = NewAlign.previous(); | 
 |  | 
 |     if (NewAlign > Alignment) { | 
 |       // Give the stack frame object a larger alignment if needed. | 
 |       if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) | 
 |         MFI.setObjectAlignment(FI->getIndex(), NewAlign); | 
 |       Alignment = NewAlign; | 
 |     } | 
 |   } | 
 |  | 
 |   // Prepare AAInfo for loads/stores after lowering this memcpy. | 
 |   AAMDNodes NewAAInfo = AAInfo; | 
 |   NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr; | 
 |  | 
 |   const Value *SrcVal = dyn_cast_if_present<const Value *>(SrcPtrInfo.V); | 
 |   bool isConstant = | 
 |       AA && SrcVal && | 
 |       AA->pointsToConstantMemory(MemoryLocation(SrcVal, Size, AAInfo)); | 
 |  | 
 |   MachineMemOperand::Flags MMOFlags = | 
 |       isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; | 
 |   SmallVector<SDValue, 16> OutLoadChains; | 
 |   SmallVector<SDValue, 16> OutStoreChains; | 
 |   SmallVector<SDValue, 32> OutChains; | 
 |   unsigned NumMemOps = MemOps.size(); | 
 |   uint64_t SrcOff = 0, DstOff = 0; | 
 |   for (unsigned i = 0; i != NumMemOps; ++i) { | 
 |     EVT VT = MemOps[i]; | 
 |     unsigned VTSize = VT.getSizeInBits() / 8; | 
 |     SDValue Value, Store; | 
 |  | 
 |     if (VTSize > Size) { | 
 |       // Issuing an unaligned load / store pair  that overlaps with the previous | 
 |       // pair. Adjust the offset accordingly. | 
 |       assert(i == NumMemOps-1 && i != 0); | 
 |       SrcOff -= VTSize - Size; | 
 |       DstOff -= VTSize - Size; | 
 |     } | 
 |  | 
 |     if (CopyFromConstant && | 
 |         (isZeroConstant || (VT.isInteger() && !VT.isVector()))) { | 
 |       // It's unlikely a store of a vector immediate can be done in a single | 
 |       // instruction. It would require a load from a constantpool first. | 
 |       // We only handle zero vectors here. | 
 |       // FIXME: Handle other cases where store of vector immediate is done in | 
 |       // a single instruction. | 
 |       ConstantDataArraySlice SubSlice; | 
 |       if (SrcOff < Slice.Length) { | 
 |         SubSlice = Slice; | 
 |         SubSlice.move(SrcOff); | 
 |       } else { | 
 |         // This is an out-of-bounds access and hence UB. Pretend we read zero. | 
 |         SubSlice.Array = nullptr; | 
 |         SubSlice.Offset = 0; | 
 |         SubSlice.Length = VTSize; | 
 |       } | 
 |       Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); | 
 |       if (Value.getNode()) { | 
 |         Store = DAG.getStore( | 
 |             Chain, dl, Value, | 
 |             DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl), | 
 |             DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo); | 
 |         OutChains.push_back(Store); | 
 |       } | 
 |     } | 
 |  | 
 |     if (!Store.getNode()) { | 
 |       // The type might not be legal for the target.  This should only happen | 
 |       // if the type is smaller than a legal type, as on PPC, so the right | 
 |       // thing to do is generate a LoadExt/StoreTrunc pair.  These simplify | 
 |       // to Load/Store if NVT==VT. | 
 |       // FIXME does the case above also need this? | 
 |       EVT NVT = TLI.getTypeToTransformTo(C, VT); | 
 |       assert(NVT.bitsGE(VT)); | 
 |  | 
 |       bool isDereferenceable = | 
 |         SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); | 
 |       MachineMemOperand::Flags SrcMMOFlags = MMOFlags; | 
 |       if (isDereferenceable) | 
 |         SrcMMOFlags |= MachineMemOperand::MODereferenceable; | 
 |       if (isConstant) | 
 |         SrcMMOFlags |= MachineMemOperand::MOInvariant; | 
 |  | 
 |       Value = DAG.getExtLoad( | 
 |           ISD::EXTLOAD, dl, NVT, Chain, | 
 |           DAG.getMemBasePlusOffset(Src, TypeSize::getFixed(SrcOff), dl), | 
 |           SrcPtrInfo.getWithOffset(SrcOff), VT, | 
 |           commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags, NewAAInfo); | 
 |       OutLoadChains.push_back(Value.getValue(1)); | 
 |  | 
 |       Store = DAG.getTruncStore( | 
 |           Chain, dl, Value, | 
 |           DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl), | 
 |           DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo); | 
 |       OutStoreChains.push_back(Store); | 
 |     } | 
 |     SrcOff += VTSize; | 
 |     DstOff += VTSize; | 
 |     Size -= VTSize; | 
 |   } | 
 |  | 
 |   unsigned GluedLdStLimit = MaxLdStGlue == 0 ? | 
 |                                 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue; | 
 |   unsigned NumLdStInMemcpy = OutStoreChains.size(); | 
 |  | 
 |   if (NumLdStInMemcpy) { | 
 |     // It may be that memcpy might be converted to memset if it's memcpy | 
 |     // of constants. In such a case, we won't have loads and stores, but | 
 |     // just stores. In the absence of loads, there is nothing to gang up. | 
 |     if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) { | 
 |       // If target does not care, just leave as it. | 
 |       for (unsigned i = 0; i < NumLdStInMemcpy; ++i) { | 
 |         OutChains.push_back(OutLoadChains[i]); | 
 |         OutChains.push_back(OutStoreChains[i]); | 
 |       } | 
 |     } else { | 
 |       // Ld/St less than/equal limit set by target. | 
 |       if (NumLdStInMemcpy <= GluedLdStLimit) { | 
 |           chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, | 
 |                                         NumLdStInMemcpy, OutLoadChains, | 
 |                                         OutStoreChains); | 
 |       } else { | 
 |         unsigned NumberLdChain =  NumLdStInMemcpy / GluedLdStLimit; | 
 |         unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit; | 
 |         unsigned GlueIter = 0; | 
 |  | 
 |         for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) { | 
 |           unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit; | 
 |           unsigned IndexTo   = NumLdStInMemcpy - GlueIter; | 
 |  | 
 |           chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo, | 
 |                                        OutLoadChains, OutStoreChains); | 
 |           GlueIter += GluedLdStLimit; | 
 |         } | 
 |  | 
 |         // Residual ld/st. | 
 |         if (RemainingLdStInMemcpy) { | 
 |           chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0, | 
 |                                         RemainingLdStInMemcpy, OutLoadChains, | 
 |                                         OutStoreChains); | 
 |         } | 
 |       } | 
 |     } | 
 |   } | 
 |   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); | 
 | } | 
 |  | 
 | static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, | 
 |                                         SDValue Chain, SDValue Dst, SDValue Src, | 
 |                                         uint64_t Size, Align Alignment, | 
 |                                         bool isVol, bool AlwaysInline, | 
 |                                         MachinePointerInfo DstPtrInfo, | 
 |                                         MachinePointerInfo SrcPtrInfo, | 
 |                                         const AAMDNodes &AAInfo) { | 
 |   // Turn a memmove of undef to nop. | 
 |   // FIXME: We need to honor volatile even is Src is undef. | 
 |   if (Src.isUndef()) | 
 |     return Chain; | 
 |  | 
 |   // Expand memmove to a series of load and store ops if the size operand falls | 
 |   // below a certain threshold. | 
 |   const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | 
 |   const DataLayout &DL = DAG.getDataLayout(); | 
 |   LLVMContext &C = *DAG.getContext(); | 
 |   std::vector<EVT> MemOps; | 
 |   bool DstAlignCanChange = false; | 
 |   MachineFunction &MF = DAG.getMachineFunction(); | 
 |   MachineFrameInfo &MFI = MF.getFrameInfo(); | 
 |   bool OptSize = shouldLowerMemFuncForSize(MF, DAG); | 
 |   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); | 
 |   if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) | 
 |     DstAlignCanChange = true; | 
 |   MaybeAlign SrcAlign = DAG.InferPtrAlign(Src); | 
 |   if (!SrcAlign || Alignment > *SrcAlign) | 
 |     SrcAlign = Alignment; | 
 |   assert(SrcAlign && "SrcAlign must be set"); | 
 |   unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); | 
 |   if (!TLI.findOptimalMemOpLowering( | 
 |           MemOps, Limit, | 
 |           MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign, | 
 |                       /*IsVolatile*/ true), | 
 |           DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), | 
 |           MF.getFunction().getAttributes())) | 
 |     return SDValue(); | 
 |  | 
 |   if (DstAlignCanChange) { | 
 |     Type *Ty = MemOps[0].getTypeForEVT(C); | 
 |     Align NewAlign = DL.getABITypeAlign(Ty); | 
 |  | 
 |     // Don't promote to an alignment that would require dynamic stack | 
 |     // realignment which may conflict with optimizations such as tail call | 
 |     // optimization. | 
 |     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); | 
 |     if (!TRI->hasStackRealignment(MF)) | 
 |       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) | 
 |         NewAlign = NewAlign.previous(); | 
 |  | 
 |     if (NewAlign > Alignment) { | 
 |       // Give the stack frame object a larger alignment if needed. | 
 |       if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) | 
 |         MFI.setObjectAlignment(FI->getIndex(), NewAlign); | 
 |       Alignment = NewAlign; | 
 |     } | 
 |   } | 
 |  | 
 |   // Prepare AAInfo for loads/stores after lowering this memmove. | 
 |   AAMDNodes NewAAInfo = AAInfo; | 
 |   NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr; | 
 |  | 
 |   MachineMemOperand::Flags MMOFlags = | 
 |       isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone; | 
 |   uint64_t SrcOff = 0, DstOff = 0; | 
 |   SmallVector<SDValue, 8> LoadValues; | 
 |   SmallVector<SDValue, 8> LoadChains; | 
 |   SmallVector<SDValue, 8> OutChains; | 
 |   unsigned NumMemOps = MemOps.size(); | 
 |   for (unsigned i = 0; i < NumMemOps; i++) { | 
 |     EVT VT = MemOps[i]; | 
 |     unsigned VTSize = VT.getSizeInBits() / 8; | 
 |     SDValue Value; | 
 |  | 
 |     bool isDereferenceable = | 
 |       SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL); | 
 |     MachineMemOperand::Flags SrcMMOFlags = MMOFlags; | 
 |     if (isDereferenceable) | 
 |       SrcMMOFlags |= MachineMemOperand::MODereferenceable; | 
 |  | 
 |     Value = DAG.getLoad( | 
 |         VT, dl, Chain, | 
 |         DAG.getMemBasePlusOffset(Src, TypeSize::getFixed(SrcOff), dl), | 
 |         SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo); | 
 |     LoadValues.push_back(Value); | 
 |     LoadChains.push_back(Value.getValue(1)); | 
 |     SrcOff += VTSize; | 
 |   } | 
 |   Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); | 
 |   OutChains.clear(); | 
 |   for (unsigned i = 0; i < NumMemOps; i++) { | 
 |     EVT VT = MemOps[i]; | 
 |     unsigned VTSize = VT.getSizeInBits() / 8; | 
 |     SDValue Store; | 
 |  | 
 |     Store = DAG.getStore( | 
 |         Chain, dl, LoadValues[i], | 
 |         DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl), | 
 |         DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo); | 
 |     OutChains.push_back(Store); | 
 |     DstOff += VTSize; | 
 |   } | 
 |  | 
 |   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); | 
 | } | 
 |  | 
 | /// Lower the call to 'memset' intrinsic function into a series of store | 
 | /// operations. | 
 | /// | 
 | /// \param DAG Selection DAG where lowered code is placed. | 
 | /// \param dl Link to corresponding IR location. | 
 | /// \param Chain Control flow dependency. | 
 | /// \param Dst Pointer to destination memory location. | 
 | /// \param Src Value of byte to write into the memory. | 
 | /// \param Size Number of bytes to write. | 
 | /// \param Alignment Alignment of the destination in bytes. | 
 | /// \param isVol True if destination is volatile. | 
 | /// \param AlwaysInline Makes sure no function call is generated. | 
 | /// \param DstPtrInfo IR information on the memory pointer. | 
 | /// \returns New head in the control flow, if lowering was successful, empty | 
 | /// SDValue otherwise. | 
 | /// | 
 | /// The function tries to replace 'llvm.memset' intrinsic with several store | 
 | /// operations and value calculation code. This is usually profitable for small | 
 | /// memory size or when the semantic requires inlining. | 
 | static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, | 
 |                                SDValue Chain, SDValue Dst, SDValue Src, | 
 |                                uint64_t Size, Align Alignment, bool isVol, | 
 |                                bool AlwaysInline, MachinePointerInfo DstPtrInfo, | 
 |                                const AAMDNodes &AAInfo) { | 
 |   // Turn a memset of undef to nop. | 
 |   // FIXME: We need to honor volatile even is Src is undef. | 
 |   if (Src.isUndef()) | 
 |     return Chain; | 
 |  | 
 |   // Expand memset to a series of load/store ops if the size operand | 
 |   // falls below a certain threshold. | 
 |   const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | 
 |   std::vector<EVT> MemOps; | 
 |   bool DstAlignCanChange = false; | 
 |   MachineFunction &MF = DAG.getMachineFunction(); | 
 |   MachineFrameInfo &MFI = MF.getFrameInfo(); | 
 |   bool OptSize = shouldLowerMemFuncForSize(MF, DAG); | 
 |   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); | 
 |   if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) | 
 |     DstAlignCanChange = true; | 
 |   bool IsZeroVal = isNullConstant(Src); | 
 |   unsigned Limit = AlwaysInline ? ~0 : TLI.getMaxStoresPerMemset(OptSize); | 
 |  | 
 |   if (!TLI.findOptimalMemOpLowering( | 
 |           MemOps, Limit, | 
 |           MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol), | 
 |           DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes())) | 
 |     return SDValue(); | 
 |  | 
 |   if (DstAlignCanChange) { | 
 |     Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); | 
 |     const DataLayout &DL = DAG.getDataLayout(); | 
 |     Align NewAlign = DL.getABITypeAlign(Ty); | 
 |  | 
 |     // Don't promote to an alignment that would require dynamic stack | 
 |     // realignment which may conflict with optimizations such as tail call | 
 |     // optimization. | 
 |     const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); | 
 |     if (!TRI->hasStackRealignment(MF)) | 
 |       while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign)) | 
 |         NewAlign = NewAlign.previous(); | 
 |  | 
 |     if (NewAlign > Alignment) { | 
 |       // Give the stack frame object a larger alignment if needed. | 
 |       if (MFI.getObjectAlign(FI->getIndex()) < NewAlign) | 
 |         MFI.setObjectAlignment(FI->getIndex(), NewAlign); | 
 |       Alignment = NewAlign; | 
 |     } | 
 |   } | 
 |  | 
 |   SmallVector<SDValue, 8> OutChains; | 
 |   uint64_t DstOff = 0; | 
 |   unsigned NumMemOps = MemOps.size(); | 
 |  | 
 |   // Find the largest store and generate the bit pattern for it. | 
 |   EVT LargestVT = MemOps[0]; | 
 |   for (unsigned i = 1; i < NumMemOps; i++) | 
 |     if (MemOps[i].bitsGT(LargestVT)) | 
 |       LargestVT = MemOps[i]; | 
 |   SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); | 
 |  | 
 |   // Prepare AAInfo for loads/stores after lowering this memset. | 
 |   AAMDNodes NewAAInfo = AAInfo; | 
 |   NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr; | 
 |  | 
 |   for (unsigned i = 0; i < NumMemOps; i++) { | 
 |     EVT VT = MemOps[i]; | 
 |     unsigned VTSize = VT.getSizeInBits() / 8; | 
 |     if (VTSize > Size) { | 
 |       // Issuing an unaligned load / store pair  that overlaps with the previous | 
 |       // pair. Adjust the offset accordingly. | 
 |       assert(i == NumMemOps-1 && i != 0); | 
 |       DstOff -= VTSize - Size; | 
 |     } | 
 |  | 
 |     // If this store is smaller than the largest store see whether we can get | 
 |     // the smaller value for free with a truncate or extract vector element and | 
 |     // then store. | 
 |     SDValue Value = MemSetValue; | 
 |     if (VT.bitsLT(LargestVT)) { | 
 |       unsigned Index; | 
 |       unsigned NElts = LargestVT.getSizeInBits() / VT.getSizeInBits(); | 
 |       EVT SVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), NElts); | 
 |       if (!LargestVT.isVector() && !VT.isVector() && | 
 |           TLI.isTruncateFree(LargestVT, VT)) | 
 |         Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); | 
 |       else if (LargestVT.isVector() && !VT.isVector() && | 
 |                TLI.shallExtractConstSplatVectorElementToStore( | 
 |                    LargestVT.getTypeForEVT(*DAG.getContext()), | 
 |                    VT.getSizeInBits(), Index) && | 
 |                TLI.isTypeLegal(SVT) && | 
 |                LargestVT.getSizeInBits() == SVT.getSizeInBits()) { | 
 |         // Target which can combine store(extractelement VectorTy, Idx) can get | 
 |         // the smaller value for free. | 
 |         SDValue TailValue = DAG.getNode(ISD::BITCAST, dl, SVT, MemSetValue); | 
 |         Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, TailValue, | 
 |                             DAG.getVectorIdxConstant(Index, dl)); | 
 |       } else | 
 |         Value = getMemsetValue(Src, VT, DAG, dl); | 
 |     } | 
 |     assert(Value.getValueType() == VT && "Value with wrong type."); | 
 |     SDValue Store = DAG.getStore( | 
 |         Chain, dl, Value, | 
 |         DAG.getMemBasePlusOffset(Dst, TypeSize::getFixed(DstOff), dl), | 
 |         DstPtrInfo.getWithOffset(DstOff), Alignment, | 
 |         isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone, | 
 |         NewAAInfo); | 
 |     OutChains.push_back(Store); | 
 |     DstOff += VT.getSizeInBits() / 8; | 
 |     Size -= VTSize; | 
 |   } | 
 |  | 
 |   return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); | 
 | } | 
 |  | 
 | static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI, | 
 |                                             unsigned AS) { | 
 |   // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all | 
 |   // pointer operands can be losslessly bitcasted to pointers of address space 0 | 
 |   if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) { | 
 |     report_fatal_error("cannot lower memory intrinsic in address space " + | 
 |                        Twine(AS)); | 
 |   } | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, | 
 |                                 SDValue Src, SDValue Size, Align Alignment, | 
 |                                 bool isVol, bool AlwaysInline, bool isTailCall, | 
 |                                 MachinePointerInfo DstPtrInfo, | 
 |                                 MachinePointerInfo SrcPtrInfo, | 
 |                                 const AAMDNodes &AAInfo, AAResults *AA) { | 
 |   // Check to see if we should lower the memcpy to loads and stores first. | 
 |   // For cases within the target-specified limits, this is the best choice. | 
 |   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); | 
 |   if (ConstantSize) { | 
 |     // Memcpy with size zero? Just return the original chain. | 
 |     if (ConstantSize->isZero()) | 
 |       return Chain; | 
 |  | 
 |     SDValue Result = getMemcpyLoadsAndStores( | 
 |         *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, | 
 |         isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo, AA); | 
 |     if (Result.getNode()) | 
 |       return Result; | 
 |   } | 
 |  | 
 |   // Then check to see if we should lower the memcpy with target-specific | 
 |   // code. If the target chooses to do this, this is the next best. | 
 |   if (TSI) { | 
 |     SDValue Result = TSI->EmitTargetCodeForMemcpy( | 
 |         *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline, | 
 |         DstPtrInfo, SrcPtrInfo); | 
 |     if (Result.getNode()) | 
 |       return Result; | 
 |   } | 
 |  | 
 |   // If we really need inline code and the target declined to provide it, | 
 |   // use a (potentially long) sequence of loads and stores. | 
 |   if (AlwaysInline) { | 
 |     assert(ConstantSize && "AlwaysInline requires a constant size!"); | 
 |     return getMemcpyLoadsAndStores( | 
 |         *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, | 
 |         isVol, true, DstPtrInfo, SrcPtrInfo, AAInfo, AA); | 
 |   } | 
 |  | 
 |   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); | 
 |   checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); | 
 |  | 
 |   // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc | 
 |   // memcpy is not guaranteed to be safe. libc memcpys aren't required to | 
 |   // respect volatile, so they may do things like read or write memory | 
 |   // beyond the given memory regions. But fixing this isn't easy, and most | 
 |   // people don't care. | 
 |  | 
 |   // Emit a library call. | 
 |   TargetLowering::ArgListTy Args; | 
 |   TargetLowering::ArgListEntry Entry; | 
 |   Entry.Ty = PointerType::getUnqual(*getContext()); | 
 |   Entry.Node = Dst; Args.push_back(Entry); | 
 |   Entry.Node = Src; Args.push_back(Entry); | 
 |  | 
 |   Entry.Ty = getDataLayout().getIntPtrType(*getContext()); | 
 |   Entry.Node = Size; Args.push_back(Entry); | 
 |   // FIXME: pass in SDLoc | 
 |   TargetLowering::CallLoweringInfo CLI(*this); | 
 |   CLI.setDebugLoc(dl) | 
 |       .setChain(Chain) | 
 |       .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY), | 
 |                     Dst.getValueType().getTypeForEVT(*getContext()), | 
 |                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), | 
 |                                       TLI->getPointerTy(getDataLayout())), | 
 |                     std::move(Args)) | 
 |       .setDiscardResult() | 
 |       .setTailCall(isTailCall); | 
 |  | 
 |   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); | 
 |   return CallResult.second; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl, | 
 |                                       SDValue Dst, SDValue Src, SDValue Size, | 
 |                                       Type *SizeTy, unsigned ElemSz, | 
 |                                       bool isTailCall, | 
 |                                       MachinePointerInfo DstPtrInfo, | 
 |                                       MachinePointerInfo SrcPtrInfo) { | 
 |   // Emit a library call. | 
 |   TargetLowering::ArgListTy Args; | 
 |   TargetLowering::ArgListEntry Entry; | 
 |   Entry.Ty = getDataLayout().getIntPtrType(*getContext()); | 
 |   Entry.Node = Dst; | 
 |   Args.push_back(Entry); | 
 |  | 
 |   Entry.Node = Src; | 
 |   Args.push_back(Entry); | 
 |  | 
 |   Entry.Ty = SizeTy; | 
 |   Entry.Node = Size; | 
 |   Args.push_back(Entry); | 
 |  | 
 |   RTLIB::Libcall LibraryCall = | 
 |       RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz); | 
 |   if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) | 
 |     report_fatal_error("Unsupported element size"); | 
 |  | 
 |   TargetLowering::CallLoweringInfo CLI(*this); | 
 |   CLI.setDebugLoc(dl) | 
 |       .setChain(Chain) | 
 |       .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), | 
 |                     Type::getVoidTy(*getContext()), | 
 |                     getExternalSymbol(TLI->getLibcallName(LibraryCall), | 
 |                                       TLI->getPointerTy(getDataLayout())), | 
 |                     std::move(Args)) | 
 |       .setDiscardResult() | 
 |       .setTailCall(isTailCall); | 
 |  | 
 |   std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); | 
 |   return CallResult.second; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, | 
 |                                  SDValue Src, SDValue Size, Align Alignment, | 
 |                                  bool isVol, bool isTailCall, | 
 |                                  MachinePointerInfo DstPtrInfo, | 
 |                                  MachinePointerInfo SrcPtrInfo, | 
 |                                  const AAMDNodes &AAInfo, AAResults *AA) { | 
 |   // Check to see if we should lower the memmove to loads and stores first. | 
 |   // For cases within the target-specified limits, this is the best choice. | 
 |   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); | 
 |   if (ConstantSize) { | 
 |     // Memmove with size zero? Just return the original chain. | 
 |     if (ConstantSize->isZero()) | 
 |       return Chain; | 
 |  | 
 |     SDValue Result = getMemmoveLoadsAndStores( | 
 |         *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment, | 
 |         isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo); | 
 |     if (Result.getNode()) | 
 |       return Result; | 
 |   } | 
 |  | 
 |   // Then check to see if we should lower the memmove with target-specific | 
 |   // code. If the target chooses to do this, this is the next best. | 
 |   if (TSI) { | 
 |     SDValue Result = | 
 |         TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, | 
 |                                       Alignment, isVol, DstPtrInfo, SrcPtrInfo); | 
 |     if (Result.getNode()) | 
 |       return Result; | 
 |   } | 
 |  | 
 |   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); | 
 |   checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace()); | 
 |  | 
 |   // FIXME: If the memmove is volatile, lowering it to plain libc memmove may | 
 |   // not be safe.  See memcpy above for more details. | 
 |  | 
 |   // Emit a library call. | 
 |   TargetLowering::ArgListTy Args; | 
 |   TargetLowering::ArgListEntry Entry; | 
 |   Entry.Ty = PointerType::getUnqual(*getContext()); | 
 |   Entry.Node = Dst; Args.push_back(Entry); | 
 |   Entry.Node = Src; Args.push_back(Entry); | 
 |  | 
 |   Entry.Ty = getDataLayout().getIntPtrType(*getContext()); | 
 |   Entry.Node = Size; Args.push_back(Entry); | 
 |   // FIXME:  pass in SDLoc | 
 |   TargetLowering::CallLoweringInfo CLI(*this); | 
 |   CLI.setDebugLoc(dl) | 
 |       .setChain(Chain) | 
 |       .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE), | 
 |                     Dst.getValueType().getTypeForEVT(*getContext()), | 
 |                     getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), | 
 |                                       TLI->getPointerTy(getDataLayout())), | 
 |                     std::move(Args)) | 
 |       .setDiscardResult() | 
 |       .setTailCall(isTailCall); | 
 |  | 
 |   std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); | 
 |   return CallResult.second; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl, | 
 |                                        SDValue Dst, SDValue Src, SDValue Size, | 
 |                                        Type *SizeTy, unsigned ElemSz, | 
 |                                        bool isTailCall, | 
 |                                        MachinePointerInfo DstPtrInfo, | 
 |                                        MachinePointerInfo SrcPtrInfo) { | 
 |   // Emit a library call. | 
 |   TargetLowering::ArgListTy Args; | 
 |   TargetLowering::ArgListEntry Entry; | 
 |   Entry.Ty = getDataLayout().getIntPtrType(*getContext()); | 
 |   Entry.Node = Dst; | 
 |   Args.push_back(Entry); | 
 |  | 
 |   Entry.Node = Src; | 
 |   Args.push_back(Entry); | 
 |  | 
 |   Entry.Ty = SizeTy; | 
 |   Entry.Node = Size; | 
 |   Args.push_back(Entry); | 
 |  | 
 |   RTLIB::Libcall LibraryCall = | 
 |       RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz); | 
 |   if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) | 
 |     report_fatal_error("Unsupported element size"); | 
 |  | 
 |   TargetLowering::CallLoweringInfo CLI(*this); | 
 |   CLI.setDebugLoc(dl) | 
 |       .setChain(Chain) | 
 |       .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), | 
 |                     Type::getVoidTy(*getContext()), | 
 |                     getExternalSymbol(TLI->getLibcallName(LibraryCall), | 
 |                                       TLI->getPointerTy(getDataLayout())), | 
 |                     std::move(Args)) | 
 |       .setDiscardResult() | 
 |       .setTailCall(isTailCall); | 
 |  | 
 |   std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); | 
 |   return CallResult.second; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, | 
 |                                 SDValue Src, SDValue Size, Align Alignment, | 
 |                                 bool isVol, bool AlwaysInline, bool isTailCall, | 
 |                                 MachinePointerInfo DstPtrInfo, | 
 |                                 const AAMDNodes &AAInfo) { | 
 |   // Check to see if we should lower the memset to stores first. | 
 |   // For cases within the target-specified limits, this is the best choice. | 
 |   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); | 
 |   if (ConstantSize) { | 
 |     // Memset with size zero? Just return the original chain. | 
 |     if (ConstantSize->isZero()) | 
 |       return Chain; | 
 |  | 
 |     SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src, | 
 |                                      ConstantSize->getZExtValue(), Alignment, | 
 |                                      isVol, false, DstPtrInfo, AAInfo); | 
 |  | 
 |     if (Result.getNode()) | 
 |       return Result; | 
 |   } | 
 |  | 
 |   // Then check to see if we should lower the memset with target-specific | 
 |   // code. If the target chooses to do this, this is the next best. | 
 |   if (TSI) { | 
 |     SDValue Result = TSI->EmitTargetCodeForMemset( | 
 |         *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline, DstPtrInfo); | 
 |     if (Result.getNode()) | 
 |       return Result; | 
 |   } | 
 |  | 
 |   // If we really need inline code and the target declined to provide it, | 
 |   // use a (potentially long) sequence of loads and stores. | 
 |   if (AlwaysInline) { | 
 |     assert(ConstantSize && "AlwaysInline requires a constant size!"); | 
 |     SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src, | 
 |                                      ConstantSize->getZExtValue(), Alignment, | 
 |                                      isVol, true, DstPtrInfo, AAInfo); | 
 |     assert(Result && | 
 |            "getMemsetStores must return a valid sequence when AlwaysInline"); | 
 |     return Result; | 
 |   } | 
 |  | 
 |   checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); | 
 |  | 
 |   // Emit a library call. | 
 |   auto &Ctx = *getContext(); | 
 |   const auto& DL = getDataLayout(); | 
 |  | 
 |   TargetLowering::CallLoweringInfo CLI(*this); | 
 |   // FIXME: pass in SDLoc | 
 |   CLI.setDebugLoc(dl).setChain(Chain); | 
 |  | 
 |   const char *BzeroName = getTargetLoweringInfo().getLibcallName(RTLIB::BZERO); | 
 |  | 
 |   // Helper function to create an Entry from Node and Type. | 
 |   const auto CreateEntry = [](SDValue Node, Type *Ty) { | 
 |     TargetLowering::ArgListEntry Entry; | 
 |     Entry.Node = Node; | 
 |     Entry.Ty = Ty; | 
 |     return Entry; | 
 |   }; | 
 |  | 
 |   // If zeroing out and bzero is present, use it. | 
 |   if (isNullConstant(Src) && BzeroName) { | 
 |     TargetLowering::ArgListTy Args; | 
 |     Args.push_back(CreateEntry(Dst, PointerType::getUnqual(Ctx))); | 
 |     Args.push_back(CreateEntry(Size, DL.getIntPtrType(Ctx))); | 
 |     CLI.setLibCallee( | 
 |         TLI->getLibcallCallingConv(RTLIB::BZERO), Type::getVoidTy(Ctx), | 
 |         getExternalSymbol(BzeroName, TLI->getPointerTy(DL)), std::move(Args)); | 
 |   } else { | 
 |     TargetLowering::ArgListTy Args; | 
 |     Args.push_back(CreateEntry(Dst, PointerType::getUnqual(Ctx))); | 
 |     Args.push_back(CreateEntry(Src, Src.getValueType().getTypeForEVT(Ctx))); | 
 |     Args.push_back(CreateEntry(Size, DL.getIntPtrType(Ctx))); | 
 |     CLI.setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET), | 
 |                      Dst.getValueType().getTypeForEVT(Ctx), | 
 |                      getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), | 
 |                                        TLI->getPointerTy(DL)), | 
 |                      std::move(Args)); | 
 |   } | 
 |  | 
 |   CLI.setDiscardResult().setTailCall(isTailCall); | 
 |  | 
 |   std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); | 
 |   return CallResult.second; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl, | 
 |                                       SDValue Dst, SDValue Value, SDValue Size, | 
 |                                       Type *SizeTy, unsigned ElemSz, | 
 |                                       bool isTailCall, | 
 |                                       MachinePointerInfo DstPtrInfo) { | 
 |   // Emit a library call. | 
 |   TargetLowering::ArgListTy Args; | 
 |   TargetLowering::ArgListEntry Entry; | 
 |   Entry.Ty = getDataLayout().getIntPtrType(*getContext()); | 
 |   Entry.Node = Dst; | 
 |   Args.push_back(Entry); | 
 |  | 
 |   Entry.Ty = Type::getInt8Ty(*getContext()); | 
 |   Entry.Node = Value; | 
 |   Args.push_back(Entry); | 
 |  | 
 |   Entry.Ty = SizeTy; | 
 |   Entry.Node = Size; | 
 |   Args.push_back(Entry); | 
 |  | 
 |   RTLIB::Libcall LibraryCall = | 
 |       RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz); | 
 |   if (LibraryCall == RTLIB::UNKNOWN_LIBCALL) | 
 |     report_fatal_error("Unsupported element size"); | 
 |  | 
 |   TargetLowering::CallLoweringInfo CLI(*this); | 
 |   CLI.setDebugLoc(dl) | 
 |       .setChain(Chain) | 
 |       .setLibCallee(TLI->getLibcallCallingConv(LibraryCall), | 
 |                     Type::getVoidTy(*getContext()), | 
 |                     getExternalSymbol(TLI->getLibcallName(LibraryCall), | 
 |                                       TLI->getPointerTy(getDataLayout())), | 
 |                     std::move(Args)) | 
 |       .setDiscardResult() | 
 |       .setTailCall(isTailCall); | 
 |  | 
 |   std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI); | 
 |   return CallResult.second; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, | 
 |                                 SDVTList VTList, ArrayRef<SDValue> Ops, | 
 |                                 MachineMemOperand *MMO) { | 
 |   FoldingSetNodeID ID; | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   AddNodeIDNode(ID, Opcode, VTList, Ops); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void* IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<AtomicSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |  | 
 |   auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), | 
 |                                     VTList, MemVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   return SDValue(N, 0); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, | 
 |                                        EVT MemVT, SDVTList VTs, SDValue Chain, | 
 |                                        SDValue Ptr, SDValue Cmp, SDValue Swp, | 
 |                                        MachineMemOperand *MMO) { | 
 |   assert(Opcode == ISD::ATOMIC_CMP_SWAP || | 
 |          Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS); | 
 |   assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); | 
 |  | 
 |   SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; | 
 |   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, | 
 |                                 SDValue Chain, SDValue Ptr, SDValue Val, | 
 |                                 MachineMemOperand *MMO) { | 
 |   assert((Opcode == ISD::ATOMIC_LOAD_ADD || | 
 |           Opcode == ISD::ATOMIC_LOAD_SUB || | 
 |           Opcode == ISD::ATOMIC_LOAD_AND || | 
 |           Opcode == ISD::ATOMIC_LOAD_CLR || | 
 |           Opcode == ISD::ATOMIC_LOAD_OR || | 
 |           Opcode == ISD::ATOMIC_LOAD_XOR || | 
 |           Opcode == ISD::ATOMIC_LOAD_NAND || | 
 |           Opcode == ISD::ATOMIC_LOAD_MIN || | 
 |           Opcode == ISD::ATOMIC_LOAD_MAX || | 
 |           Opcode == ISD::ATOMIC_LOAD_UMIN || | 
 |           Opcode == ISD::ATOMIC_LOAD_UMAX || | 
 |           Opcode == ISD::ATOMIC_LOAD_FADD || | 
 |           Opcode == ISD::ATOMIC_LOAD_FSUB || | 
 |           Opcode == ISD::ATOMIC_LOAD_FMAX || | 
 |           Opcode == ISD::ATOMIC_LOAD_FMIN || | 
 |           Opcode == ISD::ATOMIC_LOAD_UINC_WRAP || | 
 |           Opcode == ISD::ATOMIC_LOAD_UDEC_WRAP || | 
 |           Opcode == ISD::ATOMIC_SWAP || | 
 |           Opcode == ISD::ATOMIC_STORE) && | 
 |          "Invalid Atomic Op"); | 
 |  | 
 |   EVT VT = Val.getValueType(); | 
 |  | 
 |   SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : | 
 |                                                getVTList(VT, MVT::Other); | 
 |   SDValue Ops[] = {Chain, Ptr, Val}; | 
 |   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, | 
 |                                 EVT VT, SDValue Chain, SDValue Ptr, | 
 |                                 MachineMemOperand *MMO) { | 
 |   assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); | 
 |  | 
 |   SDVTList VTs = getVTList(VT, MVT::Other); | 
 |   SDValue Ops[] = {Chain, Ptr}; | 
 |   return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO); | 
 | } | 
 |  | 
 | /// getMergeValues - Create a MERGE_VALUES node from the given operands. | 
 | SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) { | 
 |   if (Ops.size() == 1) | 
 |     return Ops[0]; | 
 |  | 
 |   SmallVector<EVT, 4> VTs; | 
 |   VTs.reserve(Ops.size()); | 
 |   for (const SDValue &Op : Ops) | 
 |     VTs.push_back(Op.getValueType()); | 
 |   return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMemIntrinsicNode( | 
 |     unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops, | 
 |     EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, | 
 |     MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { | 
 |   if (!Size && MemVT.isScalableVector()) | 
 |     Size = MemoryLocation::UnknownSize; | 
 |   else if (!Size) | 
 |     Size = MemVT.getStoreSize(); | 
 |  | 
 |   MachineFunction &MF = getMachineFunction(); | 
 |   MachineMemOperand *MMO = | 
 |       MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo); | 
 |  | 
 |   return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, | 
 |                                           SDVTList VTList, | 
 |                                           ArrayRef<SDValue> Ops, EVT MemVT, | 
 |                                           MachineMemOperand *MMO) { | 
 |   assert((Opcode == ISD::INTRINSIC_VOID || | 
 |           Opcode == ISD::INTRINSIC_W_CHAIN || | 
 |           Opcode == ISD::PREFETCH || | 
 |           (Opcode <= (unsigned)std::numeric_limits<int>::max() && | 
 |            (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && | 
 |          "Opcode is not a memory-accessing opcode!"); | 
 |  | 
 |   // Memoize the node unless it returns a glue result. | 
 |   MemIntrinsicSDNode *N; | 
 |   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { | 
 |     FoldingSetNodeID ID; | 
 |     AddNodeIDNode(ID, Opcode, VTList, Ops); | 
 |     ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>( | 
 |         Opcode, dl.getIROrder(), VTList, MemVT, MMO)); | 
 |     ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |     ID.AddInteger(MMO->getFlags()); | 
 |     ID.AddInteger(MemVT.getRawBits()); | 
 |     void *IP = nullptr; | 
 |     if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |       cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); | 
 |       return SDValue(E, 0); | 
 |     } | 
 |  | 
 |     N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), | 
 |                                       VTList, MemVT, MMO); | 
 |     createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   } else { | 
 |     N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), | 
 |                                       VTList, MemVT, MMO); | 
 |     createOperands(N, Ops); | 
 |   } | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl, | 
 |                                       SDValue Chain, int FrameIndex, | 
 |                                       int64_t Size, int64_t Offset) { | 
 |   const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END; | 
 |   const auto VTs = getVTList(MVT::Other); | 
 |   SDValue Ops[2] = { | 
 |       Chain, | 
 |       getFrameIndex(FrameIndex, | 
 |                     getTargetLoweringInfo().getFrameIndexTy(getDataLayout()), | 
 |                     true)}; | 
 |  | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opcode, VTs, Ops); | 
 |   ID.AddInteger(FrameIndex); | 
 |   ID.AddInteger(Size); | 
 |   ID.AddInteger(Offset); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   LifetimeSDNode *N = newSDNode<LifetimeSDNode>( | 
 |       Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset); | 
 |   createOperands(N, Ops); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, | 
 |                                          uint64_t Guid, uint64_t Index, | 
 |                                          uint32_t Attr) { | 
 |   const unsigned Opcode = ISD::PSEUDO_PROBE; | 
 |   const auto VTs = getVTList(MVT::Other); | 
 |   SDValue Ops[] = {Chain}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, Opcode, VTs, Ops); | 
 |   ID.AddInteger(Guid); | 
 |   ID.AddInteger(Index); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<PseudoProbeSDNode>( | 
 |       Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr); | 
 |   createOperands(N, Ops); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a | 
 | /// MachinePointerInfo record from it.  This is particularly useful because the | 
 | /// code generator has many cases where it doesn't bother passing in a | 
 | /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". | 
 | static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, | 
 |                                            SelectionDAG &DAG, SDValue Ptr, | 
 |                                            int64_t Offset = 0) { | 
 |   // If this is FI+Offset, we can model it. | 
 |   if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) | 
 |     return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), | 
 |                                              FI->getIndex(), Offset); | 
 |  | 
 |   // If this is (FI+Offset1)+Offset2, we can model it. | 
 |   if (Ptr.getOpcode() != ISD::ADD || | 
 |       !isa<ConstantSDNode>(Ptr.getOperand(1)) || | 
 |       !isa<FrameIndexSDNode>(Ptr.getOperand(0))) | 
 |     return Info; | 
 |  | 
 |   int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); | 
 |   return MachinePointerInfo::getFixedStack( | 
 |       DAG.getMachineFunction(), FI, | 
 |       Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); | 
 | } | 
 |  | 
 | /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a | 
 | /// MachinePointerInfo record from it.  This is particularly useful because the | 
 | /// code generator has many cases where it doesn't bother passing in a | 
 | /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". | 
 | static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info, | 
 |                                            SelectionDAG &DAG, SDValue Ptr, | 
 |                                            SDValue OffsetOp) { | 
 |   // If the 'Offset' value isn't a constant, we can't handle this. | 
 |   if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) | 
 |     return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue()); | 
 |   if (OffsetOp.isUndef()) | 
 |     return InferPointerInfo(Info, DAG, Ptr); | 
 |   return Info; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, | 
 |                               EVT VT, const SDLoc &dl, SDValue Chain, | 
 |                               SDValue Ptr, SDValue Offset, | 
 |                               MachinePointerInfo PtrInfo, EVT MemVT, | 
 |                               Align Alignment, | 
 |                               MachineMemOperand::Flags MMOFlags, | 
 |                               const AAMDNodes &AAInfo, const MDNode *Ranges) { | 
 |   assert(Chain.getValueType() == MVT::Other && | 
 |         "Invalid chain type"); | 
 |  | 
 |   MMOFlags |= MachineMemOperand::MOLoad; | 
 |   assert((MMOFlags & MachineMemOperand::MOStore) == 0); | 
 |   // If we don't have a PtrInfo, infer the trivial frame index case to simplify | 
 |   // clients. | 
 |   if (PtrInfo.V.isNull()) | 
 |     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); | 
 |  | 
 |   uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); | 
 |   MachineFunction &MF = getMachineFunction(); | 
 |   MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, | 
 |                                                    Alignment, AAInfo, Ranges); | 
 |   return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, | 
 |                               EVT VT, const SDLoc &dl, SDValue Chain, | 
 |                               SDValue Ptr, SDValue Offset, EVT MemVT, | 
 |                               MachineMemOperand *MMO) { | 
 |   if (VT == MemVT) { | 
 |     ExtType = ISD::NON_EXTLOAD; | 
 |   } else if (ExtType == ISD::NON_EXTLOAD) { | 
 |     assert(VT == MemVT && "Non-extending load from different memory type!"); | 
 |   } else { | 
 |     // Extending load. | 
 |     assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && | 
 |            "Should only be an extending load, not truncating!"); | 
 |     assert(VT.isInteger() == MemVT.isInteger() && | 
 |            "Cannot convert from FP to Int or Int -> FP!"); | 
 |     assert(VT.isVector() == MemVT.isVector() && | 
 |            "Cannot use an ext load to convert to or from a vector!"); | 
 |     assert((!VT.isVector() || | 
 |             VT.getVectorElementCount() == MemVT.getVectorElementCount()) && | 
 |            "Cannot use an ext load to change the number of vector elements!"); | 
 |   } | 
 |  | 
 |   bool Indexed = AM != ISD::UNINDEXED; | 
 |   assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); | 
 |  | 
 |   SDVTList VTs = Indexed ? | 
 |     getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); | 
 |   SDValue Ops[] = { Chain, Ptr, Offset }; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::LOAD, VTs, Ops); | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>( | 
 |       dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<LoadSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, | 
 |                                   ExtType, MemVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, | 
 |                               SDValue Ptr, MachinePointerInfo PtrInfo, | 
 |                               MaybeAlign Alignment, | 
 |                               MachineMemOperand::Flags MMOFlags, | 
 |                               const AAMDNodes &AAInfo, const MDNode *Ranges) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, | 
 |                  PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain, | 
 |                               SDValue Ptr, MachineMemOperand *MMO) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, | 
 |                  VT, MMO); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, | 
 |                                  EVT VT, SDValue Chain, SDValue Ptr, | 
 |                                  MachinePointerInfo PtrInfo, EVT MemVT, | 
 |                                  MaybeAlign Alignment, | 
 |                                  MachineMemOperand::Flags MMOFlags, | 
 |                                  const AAMDNodes &AAInfo) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo, | 
 |                  MemVT, Alignment, MMOFlags, AAInfo); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, | 
 |                                  EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT, | 
 |                                  MachineMemOperand *MMO) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, | 
 |                  MemVT, MMO); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, | 
 |                                      SDValue Base, SDValue Offset, | 
 |                                      ISD::MemIndexedMode AM) { | 
 |   LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); | 
 |   assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); | 
 |   // Don't propagate the invariant or dereferenceable flags. | 
 |   auto MMOFlags = | 
 |       LD->getMemOperand()->getFlags() & | 
 |       ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); | 
 |   return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, | 
 |                  LD->getChain(), Base, Offset, LD->getPointerInfo(), | 
 |                  LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo()); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, | 
 |                                SDValue Ptr, MachinePointerInfo PtrInfo, | 
 |                                Align Alignment, | 
 |                                MachineMemOperand::Flags MMOFlags, | 
 |                                const AAMDNodes &AAInfo) { | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |  | 
 |   MMOFlags |= MachineMemOperand::MOStore; | 
 |   assert((MMOFlags & MachineMemOperand::MOLoad) == 0); | 
 |  | 
 |   if (PtrInfo.V.isNull()) | 
 |     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); | 
 |  | 
 |   MachineFunction &MF = getMachineFunction(); | 
 |   uint64_t Size = | 
 |       MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize()); | 
 |   MachineMemOperand *MMO = | 
 |       MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo); | 
 |   return getStore(Chain, dl, Val, Ptr, MMO); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val, | 
 |                                SDValue Ptr, MachineMemOperand *MMO) { | 
 |   assert(Chain.getValueType() == MVT::Other && | 
 |         "Invalid chain type"); | 
 |   EVT VT = Val.getValueType(); | 
 |   SDVTList VTs = getVTList(MVT::Other); | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   SDValue Ops[] = { Chain, Val, Ptr, Undef }; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::STORE, VTs, Ops); | 
 |   ID.AddInteger(VT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( | 
 |       dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<StoreSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, | 
 |                                    ISD::UNINDEXED, false, VT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, | 
 |                                     SDValue Ptr, MachinePointerInfo PtrInfo, | 
 |                                     EVT SVT, Align Alignment, | 
 |                                     MachineMemOperand::Flags MMOFlags, | 
 |                                     const AAMDNodes &AAInfo) { | 
 |   assert(Chain.getValueType() == MVT::Other && | 
 |         "Invalid chain type"); | 
 |  | 
 |   MMOFlags |= MachineMemOperand::MOStore; | 
 |   assert((MMOFlags & MachineMemOperand::MOLoad) == 0); | 
 |  | 
 |   if (PtrInfo.V.isNull()) | 
 |     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); | 
 |  | 
 |   MachineFunction &MF = getMachineFunction(); | 
 |   MachineMemOperand *MMO = MF.getMachineMemOperand( | 
 |       PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()), | 
 |       Alignment, AAInfo); | 
 |   return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, | 
 |                                     SDValue Ptr, EVT SVT, | 
 |                                     MachineMemOperand *MMO) { | 
 |   EVT VT = Val.getValueType(); | 
 |  | 
 |   assert(Chain.getValueType() == MVT::Other && | 
 |         "Invalid chain type"); | 
 |   if (VT == SVT) | 
 |     return getStore(Chain, dl, Val, Ptr, MMO); | 
 |  | 
 |   assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && | 
 |          "Should only be a truncating store, not extending!"); | 
 |   assert(VT.isInteger() == SVT.isInteger() && | 
 |          "Can't do FP-INT conversion!"); | 
 |   assert(VT.isVector() == SVT.isVector() && | 
 |          "Cannot use trunc store to convert to or from a vector!"); | 
 |   assert((!VT.isVector() || | 
 |           VT.getVectorElementCount() == SVT.getVectorElementCount()) && | 
 |          "Cannot use trunc store to change the number of vector elements!"); | 
 |  | 
 |   SDVTList VTs = getVTList(MVT::Other); | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   SDValue Ops[] = { Chain, Val, Ptr, Undef }; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::STORE, VTs, Ops); | 
 |   ID.AddInteger(SVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>( | 
 |       dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<StoreSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, | 
 |                                    ISD::UNINDEXED, true, SVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, | 
 |                                       SDValue Base, SDValue Offset, | 
 |                                       ISD::MemIndexedMode AM) { | 
 |   StoreSDNode *ST = cast<StoreSDNode>(OrigStore); | 
 |   assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); | 
 |   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); | 
 |   SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::STORE, VTs, Ops); | 
 |   ID.AddInteger(ST->getMemoryVT().getRawBits()); | 
 |   ID.AddInteger(ST->getRawSubclassData()); | 
 |   ID.AddInteger(ST->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(ST->getMemOperand()->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, | 
 |                                    ST->isTruncatingStore(), ST->getMemoryVT(), | 
 |                                    ST->getMemOperand()); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLoadVP( | 
 |     ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, | 
 |     SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, | 
 |     MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, | 
 |     MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, | 
 |     const MDNode *Ranges, bool IsExpanding) { | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |  | 
 |   MMOFlags |= MachineMemOperand::MOLoad; | 
 |   assert((MMOFlags & MachineMemOperand::MOStore) == 0); | 
 |   // If we don't have a PtrInfo, infer the trivial frame index case to simplify | 
 |   // clients. | 
 |   if (PtrInfo.V.isNull()) | 
 |     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); | 
 |  | 
 |   uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize()); | 
 |   MachineFunction &MF = getMachineFunction(); | 
 |   MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, | 
 |                                                    Alignment, AAInfo, Ranges); | 
 |   return getLoadVP(AM, ExtType, VT, dl, Chain, Ptr, Offset, Mask, EVL, MemVT, | 
 |                    MMO, IsExpanding); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLoadVP(ISD::MemIndexedMode AM, | 
 |                                 ISD::LoadExtType ExtType, EVT VT, | 
 |                                 const SDLoc &dl, SDValue Chain, SDValue Ptr, | 
 |                                 SDValue Offset, SDValue Mask, SDValue EVL, | 
 |                                 EVT MemVT, MachineMemOperand *MMO, | 
 |                                 bool IsExpanding) { | 
 |   bool Indexed = AM != ISD::UNINDEXED; | 
 |   assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); | 
 |  | 
 |   SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other) | 
 |                          : getVTList(VT, MVT::Other); | 
 |   SDValue Ops[] = {Chain, Ptr, Offset, Mask, EVL}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::VP_LOAD, VTs, Ops); | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>( | 
 |       dl.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<VPLoadSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = newSDNode<VPLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, | 
 |                                     ExtType, IsExpanding, MemVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain, | 
 |                                 SDValue Ptr, SDValue Mask, SDValue EVL, | 
 |                                 MachinePointerInfo PtrInfo, | 
 |                                 MaybeAlign Alignment, | 
 |                                 MachineMemOperand::Flags MMOFlags, | 
 |                                 const AAMDNodes &AAInfo, const MDNode *Ranges, | 
 |                                 bool IsExpanding) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, | 
 |                    Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges, | 
 |                    IsExpanding); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain, | 
 |                                 SDValue Ptr, SDValue Mask, SDValue EVL, | 
 |                                 MachineMemOperand *MMO, bool IsExpanding) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, | 
 |                    Mask, EVL, VT, MMO, IsExpanding); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, | 
 |                                    EVT VT, SDValue Chain, SDValue Ptr, | 
 |                                    SDValue Mask, SDValue EVL, | 
 |                                    MachinePointerInfo PtrInfo, EVT MemVT, | 
 |                                    MaybeAlign Alignment, | 
 |                                    MachineMemOperand::Flags MMOFlags, | 
 |                                    const AAMDNodes &AAInfo, bool IsExpanding) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask, | 
 |                    EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo, nullptr, | 
 |                    IsExpanding); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, | 
 |                                    EVT VT, SDValue Chain, SDValue Ptr, | 
 |                                    SDValue Mask, SDValue EVL, EVT MemVT, | 
 |                                    MachineMemOperand *MMO, bool IsExpanding) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask, | 
 |                    EVL, MemVT, MMO, IsExpanding); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, | 
 |                                        SDValue Base, SDValue Offset, | 
 |                                        ISD::MemIndexedMode AM) { | 
 |   auto *LD = cast<VPLoadSDNode>(OrigLoad); | 
 |   assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); | 
 |   // Don't propagate the invariant or dereferenceable flags. | 
 |   auto MMOFlags = | 
 |       LD->getMemOperand()->getFlags() & | 
 |       ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); | 
 |   return getLoadVP(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, | 
 |                    LD->getChain(), Base, Offset, LD->getMask(), | 
 |                    LD->getVectorLength(), LD->getPointerInfo(), | 
 |                    LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(), | 
 |                    nullptr, LD->isExpandingLoad()); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, | 
 |                                  SDValue Ptr, SDValue Offset, SDValue Mask, | 
 |                                  SDValue EVL, EVT MemVT, MachineMemOperand *MMO, | 
 |                                  ISD::MemIndexedMode AM, bool IsTruncating, | 
 |                                  bool IsCompressing) { | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |   bool Indexed = AM != ISD::UNINDEXED; | 
 |   assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!"); | 
 |   SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other) | 
 |                          : getVTList(MVT::Other); | 
 |   SDValue Ops[] = {Chain, Val, Ptr, Offset, Mask, EVL}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops); | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>( | 
 |       dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<VPStoreSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, | 
 |                                      IsTruncating, IsCompressing, MemVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getTruncStoreVP(SDValue Chain, const SDLoc &dl, | 
 |                                       SDValue Val, SDValue Ptr, SDValue Mask, | 
 |                                       SDValue EVL, MachinePointerInfo PtrInfo, | 
 |                                       EVT SVT, Align Alignment, | 
 |                                       MachineMemOperand::Flags MMOFlags, | 
 |                                       const AAMDNodes &AAInfo, | 
 |                                       bool IsCompressing) { | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |  | 
 |   MMOFlags |= MachineMemOperand::MOStore; | 
 |   assert((MMOFlags & MachineMemOperand::MOLoad) == 0); | 
 |  | 
 |   if (PtrInfo.V.isNull()) | 
 |     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); | 
 |  | 
 |   MachineFunction &MF = getMachineFunction(); | 
 |   MachineMemOperand *MMO = MF.getMachineMemOperand( | 
 |       PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()), | 
 |       Alignment, AAInfo); | 
 |   return getTruncStoreVP(Chain, dl, Val, Ptr, Mask, EVL, SVT, MMO, | 
 |                          IsCompressing); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getTruncStoreVP(SDValue Chain, const SDLoc &dl, | 
 |                                       SDValue Val, SDValue Ptr, SDValue Mask, | 
 |                                       SDValue EVL, EVT SVT, | 
 |                                       MachineMemOperand *MMO, | 
 |                                       bool IsCompressing) { | 
 |   EVT VT = Val.getValueType(); | 
 |  | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |   if (VT == SVT) | 
 |     return getStoreVP(Chain, dl, Val, Ptr, getUNDEF(Ptr.getValueType()), Mask, | 
 |                       EVL, VT, MMO, ISD::UNINDEXED, | 
 |                       /*IsTruncating*/ false, IsCompressing); | 
 |  | 
 |   assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && | 
 |          "Should only be a truncating store, not extending!"); | 
 |   assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!"); | 
 |   assert(VT.isVector() == SVT.isVector() && | 
 |          "Cannot use trunc store to convert to or from a vector!"); | 
 |   assert((!VT.isVector() || | 
 |           VT.getVectorElementCount() == SVT.getVectorElementCount()) && | 
 |          "Cannot use trunc store to change the number of vector elements!"); | 
 |  | 
 |   SDVTList VTs = getVTList(MVT::Other); | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   SDValue Ops[] = {Chain, Val, Ptr, Undef, Mask, EVL}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops); | 
 |   ID.AddInteger(SVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>( | 
 |       dl.getIROrder(), VTs, ISD::UNINDEXED, true, IsCompressing, SVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<VPStoreSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = | 
 |       newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, | 
 |                                ISD::UNINDEXED, true, IsCompressing, SVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, | 
 |                                         SDValue Base, SDValue Offset, | 
 |                                         ISD::MemIndexedMode AM) { | 
 |   auto *ST = cast<VPStoreSDNode>(OrigStore); | 
 |   assert(ST->getOffset().isUndef() && "Store is already an indexed store!"); | 
 |   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); | 
 |   SDValue Ops[] = {ST->getChain(), ST->getValue(), Base, | 
 |                    Offset,         ST->getMask(),  ST->getVectorLength()}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops); | 
 |   ID.AddInteger(ST->getMemoryVT().getRawBits()); | 
 |   ID.AddInteger(ST->getRawSubclassData()); | 
 |   ID.AddInteger(ST->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(ST->getMemOperand()->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<VPStoreSDNode>( | 
 |       dl.getIROrder(), dl.getDebugLoc(), VTs, AM, ST->isTruncatingStore(), | 
 |       ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand()); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getStridedLoadVP( | 
 |     ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, | 
 |     SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, | 
 |     SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, | 
 |     MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, | 
 |     const MDNode *Ranges, bool IsExpanding) { | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |  | 
 |   MMOFlags |= MachineMemOperand::MOLoad; | 
 |   assert((MMOFlags & MachineMemOperand::MOStore) == 0); | 
 |   // If we don't have a PtrInfo, infer the trivial frame index case to simplify | 
 |   // clients. | 
 |   if (PtrInfo.V.isNull()) | 
 |     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset); | 
 |  | 
 |   uint64_t Size = MemoryLocation::UnknownSize; | 
 |   MachineFunction &MF = getMachineFunction(); | 
 |   MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, | 
 |                                                    Alignment, AAInfo, Ranges); | 
 |   return getStridedLoadVP(AM, ExtType, VT, DL, Chain, Ptr, Offset, Stride, Mask, | 
 |                           EVL, MemVT, MMO, IsExpanding); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getStridedLoadVP( | 
 |     ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, | 
 |     SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, | 
 |     SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding) { | 
 |   bool Indexed = AM != ISD::UNINDEXED; | 
 |   assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!"); | 
 |  | 
 |   SDValue Ops[] = {Chain, Ptr, Offset, Stride, Mask, EVL}; | 
 |   SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other) | 
 |                          : getVTList(VT, MVT::Other); | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_LOAD, VTs, Ops); | 
 |   ID.AddInteger(VT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<VPStridedLoadSDNode>( | 
 |       DL.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |  | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { | 
 |     cast<VPStridedLoadSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |  | 
 |   auto *N = | 
 |       newSDNode<VPStridedLoadSDNode>(DL.getIROrder(), DL.getDebugLoc(), VTs, AM, | 
 |                                      ExtType, IsExpanding, MemVT, MMO); | 
 |   createOperands(N, Ops); | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getStridedLoadVP( | 
 |     EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Stride, | 
 |     SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, MaybeAlign Alignment, | 
 |     MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, | 
 |     const MDNode *Ranges, bool IsExpanding) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getStridedLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, DL, Chain, Ptr, | 
 |                           Undef, Stride, Mask, EVL, PtrInfo, VT, Alignment, | 
 |                           MMOFlags, AAInfo, Ranges, IsExpanding); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getStridedLoadVP(EVT VT, const SDLoc &DL, SDValue Chain, | 
 |                                        SDValue Ptr, SDValue Stride, | 
 |                                        SDValue Mask, SDValue EVL, | 
 |                                        MachineMemOperand *MMO, | 
 |                                        bool IsExpanding) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getStridedLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, DL, Chain, Ptr, | 
 |                           Undef, Stride, Mask, EVL, VT, MMO, IsExpanding); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getExtStridedLoadVP( | 
 |     ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, | 
 |     SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, | 
 |     MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment, | 
 |     MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, | 
 |     bool IsExpanding) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getStridedLoadVP(ISD::UNINDEXED, ExtType, VT, DL, Chain, Ptr, Undef, | 
 |                           Stride, Mask, EVL, PtrInfo, MemVT, Alignment, | 
 |                           MMOFlags, AAInfo, nullptr, IsExpanding); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getExtStridedLoadVP( | 
 |     ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain, | 
 |     SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, | 
 |     MachineMemOperand *MMO, bool IsExpanding) { | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   return getStridedLoadVP(ISD::UNINDEXED, ExtType, VT, DL, Chain, Ptr, Undef, | 
 |                           Stride, Mask, EVL, MemVT, MMO, IsExpanding); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getIndexedStridedLoadVP(SDValue OrigLoad, const SDLoc &DL, | 
 |                                               SDValue Base, SDValue Offset, | 
 |                                               ISD::MemIndexedMode AM) { | 
 |   auto *SLD = cast<VPStridedLoadSDNode>(OrigLoad); | 
 |   assert(SLD->getOffset().isUndef() && | 
 |          "Strided load is already a indexed load!"); | 
 |   // Don't propagate the invariant or dereferenceable flags. | 
 |   auto MMOFlags = | 
 |       SLD->getMemOperand()->getFlags() & | 
 |       ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable); | 
 |   return getStridedLoadVP( | 
 |       AM, SLD->getExtensionType(), OrigLoad.getValueType(), DL, SLD->getChain(), | 
 |       Base, Offset, SLD->getStride(), SLD->getMask(), SLD->getVectorLength(), | 
 |       SLD->getPointerInfo(), SLD->getMemoryVT(), SLD->getAlign(), MMOFlags, | 
 |       SLD->getAAInfo(), nullptr, SLD->isExpandingLoad()); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getStridedStoreVP(SDValue Chain, const SDLoc &DL, | 
 |                                         SDValue Val, SDValue Ptr, | 
 |                                         SDValue Offset, SDValue Stride, | 
 |                                         SDValue Mask, SDValue EVL, EVT MemVT, | 
 |                                         MachineMemOperand *MMO, | 
 |                                         ISD::MemIndexedMode AM, | 
 |                                         bool IsTruncating, bool IsCompressing) { | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |   bool Indexed = AM != ISD::UNINDEXED; | 
 |   assert((Indexed || Offset.isUndef()) && "Unindexed vp_store with an offset!"); | 
 |   SDVTList VTs = Indexed ? getVTList(Ptr.getValueType(), MVT::Other) | 
 |                          : getVTList(MVT::Other); | 
 |   SDValue Ops[] = {Chain, Val, Ptr, Offset, Stride, Mask, EVL}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops); | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>( | 
 |       DL.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { | 
 |     cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = newSDNode<VPStridedStoreSDNode>(DL.getIROrder(), DL.getDebugLoc(), | 
 |                                             VTs, AM, IsTruncating, | 
 |                                             IsCompressing, MemVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getTruncStridedStoreVP( | 
 |     SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride, | 
 |     SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT, | 
 |     Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, | 
 |     bool IsCompressing) { | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |  | 
 |   MMOFlags |= MachineMemOperand::MOStore; | 
 |   assert((MMOFlags & MachineMemOperand::MOLoad) == 0); | 
 |  | 
 |   if (PtrInfo.V.isNull()) | 
 |     PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr); | 
 |  | 
 |   MachineFunction &MF = getMachineFunction(); | 
 |   MachineMemOperand *MMO = MF.getMachineMemOperand( | 
 |       PtrInfo, MMOFlags, MemoryLocation::UnknownSize, Alignment, AAInfo); | 
 |   return getTruncStridedStoreVP(Chain, DL, Val, Ptr, Stride, Mask, EVL, SVT, | 
 |                                 MMO, IsCompressing); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, | 
 |                                              SDValue Val, SDValue Ptr, | 
 |                                              SDValue Stride, SDValue Mask, | 
 |                                              SDValue EVL, EVT SVT, | 
 |                                              MachineMemOperand *MMO, | 
 |                                              bool IsCompressing) { | 
 |   EVT VT = Val.getValueType(); | 
 |  | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |   if (VT == SVT) | 
 |     return getStridedStoreVP(Chain, DL, Val, Ptr, getUNDEF(Ptr.getValueType()), | 
 |                              Stride, Mask, EVL, VT, MMO, ISD::UNINDEXED, | 
 |                              /*IsTruncating*/ false, IsCompressing); | 
 |  | 
 |   assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && | 
 |          "Should only be a truncating store, not extending!"); | 
 |   assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!"); | 
 |   assert(VT.isVector() == SVT.isVector() && | 
 |          "Cannot use trunc store to convert to or from a vector!"); | 
 |   assert((!VT.isVector() || | 
 |           VT.getVectorElementCount() == SVT.getVectorElementCount()) && | 
 |          "Cannot use trunc store to change the number of vector elements!"); | 
 |  | 
 |   SDVTList VTs = getVTList(MVT::Other); | 
 |   SDValue Undef = getUNDEF(Ptr.getValueType()); | 
 |   SDValue Ops[] = {Chain, Val, Ptr, Undef, Stride, Mask, EVL}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops); | 
 |   ID.AddInteger(SVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<VPStridedStoreSDNode>( | 
 |       DL.getIROrder(), VTs, ISD::UNINDEXED, true, IsCompressing, SVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { | 
 |     cast<VPStridedStoreSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = newSDNode<VPStridedStoreSDNode>(DL.getIROrder(), DL.getDebugLoc(), | 
 |                                             VTs, ISD::UNINDEXED, true, | 
 |                                             IsCompressing, SVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getIndexedStridedStoreVP(SDValue OrigStore, | 
 |                                                const SDLoc &DL, SDValue Base, | 
 |                                                SDValue Offset, | 
 |                                                ISD::MemIndexedMode AM) { | 
 |   auto *SST = cast<VPStridedStoreSDNode>(OrigStore); | 
 |   assert(SST->getOffset().isUndef() && | 
 |          "Strided store is already an indexed store!"); | 
 |   SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); | 
 |   SDValue Ops[] = { | 
 |       SST->getChain(), SST->getValue(),       Base, Offset, SST->getStride(), | 
 |       SST->getMask(),  SST->getVectorLength()}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::EXPERIMENTAL_VP_STRIDED_STORE, VTs, Ops); | 
 |   ID.AddInteger(SST->getMemoryVT().getRawBits()); | 
 |   ID.AddInteger(SST->getRawSubclassData()); | 
 |   ID.AddInteger(SST->getPointerInfo().getAddrSpace()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<VPStridedStoreSDNode>( | 
 |       DL.getIROrder(), DL.getDebugLoc(), VTs, AM, SST->isTruncatingStore(), | 
 |       SST->isCompressingStore(), SST->getMemoryVT(), SST->getMemOperand()); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, | 
 |                                   ArrayRef<SDValue> Ops, MachineMemOperand *MMO, | 
 |                                   ISD::MemIndexType IndexType) { | 
 |   assert(Ops.size() == 6 && "Incompatible number of operands"); | 
 |  | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::VP_GATHER, VTs, Ops); | 
 |   ID.AddInteger(VT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>( | 
 |       dl.getIROrder(), VTs, VT, MMO, IndexType)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<VPGatherSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |  | 
 |   auto *N = newSDNode<VPGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, | 
 |                                       VT, MMO, IndexType); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   assert(N->getMask().getValueType().getVectorElementCount() == | 
 |              N->getValueType(0).getVectorElementCount() && | 
 |          "Vector width mismatch between mask and data"); | 
 |   assert(N->getIndex().getValueType().getVectorElementCount().isScalable() == | 
 |              N->getValueType(0).getVectorElementCount().isScalable() && | 
 |          "Scalable flags of index and data do not match"); | 
 |   assert(ElementCount::isKnownGE( | 
 |              N->getIndex().getValueType().getVectorElementCount(), | 
 |              N->getValueType(0).getVectorElementCount()) && | 
 |          "Vector width mismatch between index and data"); | 
 |   assert(isa<ConstantSDNode>(N->getScale()) && | 
 |          cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && | 
 |          "Scale should be a constant power of 2"); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, | 
 |                                    ArrayRef<SDValue> Ops, | 
 |                                    MachineMemOperand *MMO, | 
 |                                    ISD::MemIndexType IndexType) { | 
 |   assert(Ops.size() == 7 && "Incompatible number of operands"); | 
 |  | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::VP_SCATTER, VTs, Ops); | 
 |   ID.AddInteger(VT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>( | 
 |       dl.getIROrder(), VTs, VT, MMO, IndexType)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<VPScatterSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = newSDNode<VPScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, | 
 |                                        VT, MMO, IndexType); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   assert(N->getMask().getValueType().getVectorElementCount() == | 
 |              N->getValue().getValueType().getVectorElementCount() && | 
 |          "Vector width mismatch between mask and data"); | 
 |   assert( | 
 |       N->getIndex().getValueType().getVectorElementCount().isScalable() == | 
 |           N->getValue().getValueType().getVectorElementCount().isScalable() && | 
 |       "Scalable flags of index and data do not match"); | 
 |   assert(ElementCount::isKnownGE( | 
 |              N->getIndex().getValueType().getVectorElementCount(), | 
 |              N->getValue().getValueType().getVectorElementCount()) && | 
 |          "Vector width mismatch between index and data"); | 
 |   assert(isa<ConstantSDNode>(N->getScale()) && | 
 |          cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && | 
 |          "Scale should be a constant power of 2"); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, | 
 |                                     SDValue Base, SDValue Offset, SDValue Mask, | 
 |                                     SDValue PassThru, EVT MemVT, | 
 |                                     MachineMemOperand *MMO, | 
 |                                     ISD::MemIndexedMode AM, | 
 |                                     ISD::LoadExtType ExtTy, bool isExpanding) { | 
 |   bool Indexed = AM != ISD::UNINDEXED; | 
 |   assert((Indexed || Offset.isUndef()) && | 
 |          "Unindexed masked load with an offset!"); | 
 |   SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other) | 
 |                          : getVTList(VT, MVT::Other); | 
 |   SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops); | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>( | 
 |       dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<MaskedLoadSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, | 
 |                                         AM, ExtTy, isExpanding, MemVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, | 
 |                                            SDValue Base, SDValue Offset, | 
 |                                            ISD::MemIndexedMode AM) { | 
 |   MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad); | 
 |   assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!"); | 
 |   return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base, | 
 |                        Offset, LD->getMask(), LD->getPassThru(), | 
 |                        LD->getMemoryVT(), LD->getMemOperand(), AM, | 
 |                        LD->getExtensionType(), LD->isExpandingLoad()); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl, | 
 |                                      SDValue Val, SDValue Base, SDValue Offset, | 
 |                                      SDValue Mask, EVT MemVT, | 
 |                                      MachineMemOperand *MMO, | 
 |                                      ISD::MemIndexedMode AM, bool IsTruncating, | 
 |                                      bool IsCompressing) { | 
 |   assert(Chain.getValueType() == MVT::Other && | 
 |         "Invalid chain type"); | 
 |   bool Indexed = AM != ISD::UNINDEXED; | 
 |   assert((Indexed || Offset.isUndef()) && | 
 |          "Unindexed masked store with an offset!"); | 
 |   SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other) | 
 |                          : getVTList(MVT::Other); | 
 |   SDValue Ops[] = {Chain, Val, Base, Offset, Mask}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops); | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>( | 
 |       dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<MaskedStoreSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |   auto *N = | 
 |       newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM, | 
 |                                    IsTruncating, IsCompressing, MemVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl, | 
 |                                             SDValue Base, SDValue Offset, | 
 |                                             ISD::MemIndexedMode AM) { | 
 |   MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore); | 
 |   assert(ST->getOffset().isUndef() && | 
 |          "Masked store is already a indexed store!"); | 
 |   return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset, | 
 |                         ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(), | 
 |                         AM, ST->isTruncatingStore(), ST->isCompressingStore()); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, | 
 |                                       ArrayRef<SDValue> Ops, | 
 |                                       MachineMemOperand *MMO, | 
 |                                       ISD::MemIndexType IndexType, | 
 |                                       ISD::LoadExtType ExtTy) { | 
 |   assert(Ops.size() == 6 && "Incompatible number of operands"); | 
 |  | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops); | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>( | 
 |       dl.getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<MaskedGatherSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |  | 
 |   auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), | 
 |                                           VTs, MemVT, MMO, IndexType, ExtTy); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   assert(N->getPassThru().getValueType() == N->getValueType(0) && | 
 |          "Incompatible type of the PassThru value in MaskedGatherSDNode"); | 
 |   assert(N->getMask().getValueType().getVectorElementCount() == | 
 |              N->getValueType(0).getVectorElementCount() && | 
 |          "Vector width mismatch between mask and data"); | 
 |   assert(N->getIndex().getValueType().getVectorElementCount().isScalable() == | 
 |              N->getValueType(0).getVectorElementCount().isScalable() && | 
 |          "Scalable flags of index and data do not match"); | 
 |   assert(ElementCount::isKnownGE( | 
 |              N->getIndex().getValueType().getVectorElementCount(), | 
 |              N->getValueType(0).getVectorElementCount()) && | 
 |          "Vector width mismatch between index and data"); | 
 |   assert(isa<ConstantSDNode>(N->getScale()) && | 
 |          cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && | 
 |          "Scale should be a constant power of 2"); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, | 
 |                                        ArrayRef<SDValue> Ops, | 
 |                                        MachineMemOperand *MMO, | 
 |                                        ISD::MemIndexType IndexType, | 
 |                                        bool IsTrunc) { | 
 |   assert(Ops.size() == 6 && "Incompatible number of operands"); | 
 |  | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops); | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>( | 
 |       dl.getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) { | 
 |     cast<MaskedScatterSDNode>(E)->refineAlignment(MMO); | 
 |     return SDValue(E, 0); | 
 |   } | 
 |  | 
 |   auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), | 
 |                                            VTs, MemVT, MMO, IndexType, IsTrunc); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   assert(N->getMask().getValueType().getVectorElementCount() == | 
 |              N->getValue().getValueType().getVectorElementCount() && | 
 |          "Vector width mismatch between mask and data"); | 
 |   assert( | 
 |       N->getIndex().getValueType().getVectorElementCount().isScalable() == | 
 |           N->getValue().getValueType().getVectorElementCount().isScalable() && | 
 |       "Scalable flags of index and data do not match"); | 
 |   assert(ElementCount::isKnownGE( | 
 |              N->getIndex().getValueType().getVectorElementCount(), | 
 |              N->getValue().getValueType().getVectorElementCount()) && | 
 |          "Vector width mismatch between index and data"); | 
 |   assert(isa<ConstantSDNode>(N->getScale()) && | 
 |          cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() && | 
 |          "Scale should be a constant power of 2"); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, | 
 |                                   EVT MemVT, MachineMemOperand *MMO) { | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |   SDVTList VTs = getVTList(MVT::Other); | 
 |   SDValue Ops[] = {Chain, Ptr}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::GET_FPENV_MEM, VTs, Ops); | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>( | 
 |       ISD::GET_FPENV_MEM, dl.getIROrder(), VTs, MemVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<FPStateAccessSDNode>(ISD::GET_FPENV_MEM, dl.getIROrder(), | 
 |                                            dl.getDebugLoc(), VTs, MemVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, | 
 |                                   EVT MemVT, MachineMemOperand *MMO) { | 
 |   assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); | 
 |   SDVTList VTs = getVTList(MVT::Other); | 
 |   SDValue Ops[] = {Chain, Ptr}; | 
 |   FoldingSetNodeID ID; | 
 |   AddNodeIDNode(ID, ISD::SET_FPENV_MEM, VTs, Ops); | 
 |   ID.AddInteger(MemVT.getRawBits()); | 
 |   ID.AddInteger(getSyntheticNodeSubclassData<FPStateAccessSDNode>( | 
 |       ISD::SET_FPENV_MEM, dl.getIROrder(), VTs, MemVT, MMO)); | 
 |   ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); | 
 |   ID.AddInteger(MMO->getFlags()); | 
 |   void *IP = nullptr; | 
 |   if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) | 
 |     return SDValue(E, 0); | 
 |  | 
 |   auto *N = newSDNode<FPStateAccessSDNode>(ISD::SET_FPENV_MEM, dl.getIROrder(), | 
 |                                            dl.getDebugLoc(), VTs, MemVT, MMO); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   CSEMap.InsertNode(N, IP); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) { | 
 |   // select undef, T, F --> T (if T is a constant), otherwise F | 
 |   // select, ?, undef, F --> F | 
 |   // select, ?, T, undef --> T | 
 |   if (Cond.isUndef()) | 
 |     return isConstantValueOfAnyType(T) ? T : F; | 
 |   if (T.isUndef()) | 
 |     return F; | 
 |   if (F.isUndef()) | 
 |     return T; | 
 |  | 
 |   // select true, T, F --> T | 
 |   // select false, T, F --> F | 
 |   if (auto *CondC = dyn_cast<ConstantSDNode>(Cond)) | 
 |     return CondC->isZero() ? F : T; | 
 |  | 
 |   // TODO: This should simplify VSELECT with non-zero constant condition using | 
 |   // something like this (but check boolean contents to be complete?): | 
 |   if (ConstantSDNode *CondC = isConstOrConstSplat(Cond, /*AllowUndefs*/ false, | 
 |                                                   /*AllowTruncation*/ true)) | 
 |     if (CondC->isZero()) | 
 |       return F; | 
 |  | 
 |   // select ?, T, T --> T | 
 |   if (T == F) | 
 |     return T; | 
 |  | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) { | 
 |   // shift undef, Y --> 0 (can always assume that the undef value is 0) | 
 |   if (X.isUndef()) | 
 |     return getConstant(0, SDLoc(X.getNode()), X.getValueType()); | 
 |   // shift X, undef --> undef (because it may shift by the bitwidth) | 
 |   if (Y.isUndef()) | 
 |     return getUNDEF(X.getValueType()); | 
 |  | 
 |   // shift 0, Y --> 0 | 
 |   // shift X, 0 --> X | 
 |   if (isNullOrNullSplat(X) || isNullOrNullSplat(Y)) | 
 |     return X; | 
 |  | 
 |   // shift X, C >= bitwidth(X) --> undef | 
 |   // All vector elements must be too big (or undef) to avoid partial undefs. | 
 |   auto isShiftTooBig = [X](ConstantSDNode *Val) { | 
 |     return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits()); | 
 |   }; | 
 |   if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true)) | 
 |     return getUNDEF(X.getValueType()); | 
 |  | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y, | 
 |                                       SDNodeFlags Flags) { | 
 |   // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand | 
 |   // (an undef operand can be chosen to be Nan/Inf), then the result of this | 
 |   // operation is poison. That result can be relaxed to undef. | 
 |   ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true); | 
 |   ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true); | 
 |   bool HasNan = (XC && XC->getValueAPF().isNaN()) || | 
 |                 (YC && YC->getValueAPF().isNaN()); | 
 |   bool HasInf = (XC && XC->getValueAPF().isInfinity()) || | 
 |                 (YC && YC->getValueAPF().isInfinity()); | 
 |  | 
 |   if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef())) | 
 |     return getUNDEF(X.getValueType()); | 
 |  | 
 |   if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef())) | 
 |     return getUNDEF(X.getValueType()); | 
 |  | 
 |   if (!YC) | 
 |     return SDValue(); | 
 |  | 
 |   // X + -0.0 --> X | 
 |   if (Opcode == ISD::FADD) | 
 |     if (YC->getValueAPF().isNegZero()) | 
 |       return X; | 
 |  | 
 |   // X - +0.0 --> X | 
 |   if (Opcode == ISD::FSUB) | 
 |     if (YC->getValueAPF().isPosZero()) | 
 |       return X; | 
 |  | 
 |   // X * 1.0 --> X | 
 |   // X / 1.0 --> X | 
 |   if (Opcode == ISD::FMUL || Opcode == ISD::FDIV) | 
 |     if (YC->getValueAPF().isExactlyValue(1.0)) | 
 |       return X; | 
 |  | 
 |   // X * 0.0 --> 0.0 | 
 |   if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros()) | 
 |     if (YC->getValueAPF().isZero()) | 
 |       return getConstantFP(0.0, SDLoc(Y), Y.getValueType()); | 
 |  | 
 |   return SDValue(); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, | 
 |                                SDValue Ptr, SDValue SV, unsigned Align) { | 
 |   SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) }; | 
 |   return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               ArrayRef<SDUse> Ops) { | 
 |   switch (Ops.size()) { | 
 |   case 0: return getNode(Opcode, DL, VT); | 
 |   case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0])); | 
 |   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); | 
 |   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); | 
 |   default: break; | 
 |   } | 
 |  | 
 |   // Copy from an SDUse array into an SDValue array for use with | 
 |   // the regular getNode logic. | 
 |   SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end()); | 
 |   return getNode(Opcode, DL, VT, NewOps); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               ArrayRef<SDValue> Ops) { | 
 |   SDNodeFlags Flags; | 
 |   if (Inserter) | 
 |     Flags = Inserter->getFlags(); | 
 |   return getNode(Opcode, DL, VT, Ops, Flags); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, | 
 |                               ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { | 
 |   unsigned NumOps = Ops.size(); | 
 |   switch (NumOps) { | 
 |   case 0: return getNode(Opcode, DL, VT); | 
 |   case 1: return getNode(Opcode, DL, VT, Ops[0], Flags); | 
 |   case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags); | 
 |   case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags); | 
 |   default: break; | 
 |   } | 
 |  | 
 | #ifndef NDEBUG | 
 |   for (const auto &Op : Ops) | 
 |     assert(Op.getOpcode() != ISD::DELETED_NODE && | 
 |            "Operand is DELETED_NODE!"); | 
 | #endif | 
 |  | 
 |   switch (Opcode) { | 
 |   default: break; | 
 |   case ISD::BUILD_VECTOR: | 
 |     // Attempt to simplify BUILD_VECTOR. | 
 |     if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this)) | 
 |       return V; | 
 |     break; | 
 |   case ISD::CONCAT_VECTORS: | 
 |     if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this)) | 
 |       return V; | 
 |     break; | 
 |   case ISD::SELECT_CC: | 
 |     assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); | 
 |     assert(Ops[0].getValueType() == Ops[1].getValueType() && | 
 |            "LHS and RHS of condition must have same type!"); | 
 |     assert(Ops[2].getValueType() == Ops[3].getValueType() && | 
 |            "True and False arms of SelectCC must have same type!"); | 
 |     assert(Ops[2].getValueType() == VT && | 
 |            "select_cc node must be of same type as true and false value!"); | 
 |     assert((!Ops[0].getValueType().isVector() || | 
 |             Ops[0].getValueType().getVectorElementCount() == | 
 |                 VT.getVectorElementCount()) && | 
 |            "Expected select_cc with vector result to have the same sized " | 
 |            "comparison type!"); | 
 |     break; | 
 |   case ISD::BR_CC: | 
 |     assert(NumOps == 5 && "BR_CC takes 5 operands!"); | 
 |     assert(Ops[2].getValueType() == Ops[3].getValueType() && | 
 |            "LHS/RHS of comparison should match types!"); | 
 |     break; | 
 |   case ISD::VP_ADD: | 
 |   case ISD::VP_SUB: | 
 |     // If it is VP_ADD/VP_SUB mask operation then turn it to VP_XOR | 
 |     if (VT.isVector() && VT.getVectorElementType() == MVT::i1) | 
 |       Opcode = ISD::VP_XOR; | 
 |     break; | 
 |   case ISD::VP_MUL: | 
 |     // If it is VP_MUL mask operation then turn it to VP_AND | 
 |     if (VT.isVector() && VT.getVectorElementType() == MVT::i1) | 
 |       Opcode = ISD::VP_AND; | 
 |     break; | 
 |   case ISD::VP_REDUCE_MUL: | 
 |     // If it is VP_REDUCE_MUL mask operation then turn it to VP_REDUCE_AND | 
 |     if (VT == MVT::i1) | 
 |       Opcode = ISD::VP_REDUCE_AND; | 
 |     break; | 
 |   case ISD::VP_REDUCE_ADD: | 
 |     // If it is VP_REDUCE_ADD mask operation then turn it to VP_REDUCE_XOR | 
 |     if (VT == MVT::i1) | 
 |       Opcode = ISD::VP_REDUCE_XOR; | 
 |     break; | 
 |   case ISD::VP_REDUCE_SMAX: | 
 |   case ISD::VP_REDUCE_UMIN: | 
 |     // If it is VP_REDUCE_SMAX/VP_REDUCE_UMIN mask operation then turn it to | 
 |     // VP_REDUCE_AND. | 
 |     if (VT == MVT::i1) | 
 |       Opcode = ISD::VP_REDUCE_AND; | 
 |     break; | 
 |   case ISD::VP_REDUCE_SMIN: | 
 |   case ISD::VP_REDUCE_UMAX: | 
 |     // If it is VP_REDUCE_SMIN/VP_REDUCE_UMAX mask operation then turn it to | 
 |     // VP_REDUCE_OR. | 
 |     if (VT == MVT::i1) | 
 |       Opcode = ISD::VP_REDUCE_OR; | 
 |     break; | 
 |   } | 
 |  | 
 |   // Memoize nodes. | 
 |   SDNode *N; | 
 |   SDVTList VTs = getVTList(VT); | 
 |  | 
 |   if (VT != MVT::Glue) { | 
 |     FoldingSetNodeID ID; | 
 |     AddNodeIDNode(ID, Opcode, VTs, Ops); | 
 |     void *IP = nullptr; | 
 |  | 
 |     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) | 
 |       return SDValue(E, 0); | 
 |  | 
 |     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); | 
 |     createOperands(N, Ops); | 
 |  | 
 |     CSEMap.InsertNode(N, IP); | 
 |   } else { | 
 |     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); | 
 |     createOperands(N, Ops); | 
 |   } | 
 |  | 
 |   N->setFlags(Flags); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, | 
 |                               ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) { | 
 |   return getNode(Opcode, DL, getVTList(ResultTys), Ops); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, | 
 |                               ArrayRef<SDValue> Ops) { | 
 |   SDNodeFlags Flags; | 
 |   if (Inserter) | 
 |     Flags = Inserter->getFlags(); | 
 |   return getNode(Opcode, DL, VTList, Ops, Flags); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, | 
 |                               ArrayRef<SDValue> Ops, const SDNodeFlags Flags) { | 
 |   if (VTList.NumVTs == 1) | 
 |     return getNode(Opcode, DL, VTList.VTs[0], Ops, Flags); | 
 |  | 
 | #ifndef NDEBUG | 
 |   for (const auto &Op : Ops) | 
 |     assert(Op.getOpcode() != ISD::DELETED_NODE && | 
 |            "Operand is DELETED_NODE!"); | 
 | #endif | 
 |  | 
 |   switch (Opcode) { | 
 |   case ISD::SADDO: | 
 |   case ISD::UADDO: | 
 |   case ISD::SSUBO: | 
 |   case ISD::USUBO: { | 
 |     assert(VTList.NumVTs == 2 && Ops.size() == 2 && | 
 |            "Invalid add/sub overflow op!"); | 
 |     assert(VTList.VTs[0].isInteger() && VTList.VTs[1].isInteger() && | 
 |            Ops[0].getValueType() == Ops[1].getValueType() && | 
 |            Ops[0].getValueType() == VTList.VTs[0] && | 
 |            "Binary operator types must match!"); | 
 |     SDValue N1 = Ops[0], N2 = Ops[1]; | 
 |     canonicalizeCommutativeBinop(Opcode, N1, N2); | 
 |  | 
 |     // (X +- 0) -> X with zero-overflow. | 
 |     ConstantSDNode *N2CV = isConstOrConstSplat(N2, /*AllowUndefs*/ false, | 
 |                                                /*AllowTruncation*/ true); | 
 |     if (N2CV && N2CV->isZero()) { | 
 |       SDValue ZeroOverFlow = getConstant(0, DL, VTList.VTs[1]); | 
 |       return getNode(ISD::MERGE_VALUES, DL, VTList, {N1, ZeroOverFlow}, Flags); | 
 |     } | 
 |  | 
 |     if (VTList.VTs[0].isVector() && | 
 |         VTList.VTs[0].getVectorElementType() == MVT::i1 && | 
 |         VTList.VTs[1].getVectorElementType() == MVT::i1) { | 
 |       SDValue F1 = getFreeze(N1); | 
 |       SDValue F2 = getFreeze(N2); | 
 |       // {vXi1,vXi1} (u/s)addo(vXi1 x, vXi1y) -> {xor(x,y),and(x,y)} | 
 |       if (Opcode == ISD::UADDO || Opcode == ISD::SADDO) | 
 |         return getNode(ISD::MERGE_VALUES, DL, VTList, | 
 |                        {getNode(ISD::XOR, DL, VTList.VTs[0], F1, F2), | 
 |                         getNode(ISD::AND, DL, VTList.VTs[1], F1, F2)}, | 
 |                        Flags); | 
 |       // {vXi1,vXi1} (u/s)subo(vXi1 x, vXi1y) -> {xor(x,y),and(~x,y)} | 
 |       if (Opcode == ISD::USUBO || Opcode == ISD::SSUBO) { | 
 |         SDValue NotF1 = getNOT(DL, F1, VTList.VTs[0]); | 
 |         return getNode(ISD::MERGE_VALUES, DL, VTList, | 
 |                        {getNode(ISD::XOR, DL, VTList.VTs[0], F1, F2), | 
 |                         getNode(ISD::AND, DL, VTList.VTs[1], NotF1, F2)}, | 
 |                        Flags); | 
 |       } | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::SMUL_LOHI: | 
 |   case ISD::UMUL_LOHI: { | 
 |     assert(VTList.NumVTs == 2 && Ops.size() == 2 && "Invalid mul lo/hi op!"); | 
 |     assert(VTList.VTs[0].isInteger() && VTList.VTs[0] == VTList.VTs[1] && | 
 |            VTList.VTs[0] == Ops[0].getValueType() && | 
 |            VTList.VTs[0] == Ops[1].getValueType() && | 
 |            "Binary operator types must match!"); | 
 |     // Constant fold. | 
 |     ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(Ops[0]); | 
 |     ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ops[1]); | 
 |     if (LHS && RHS) { | 
 |       unsigned Width = VTList.VTs[0].getScalarSizeInBits(); | 
 |       unsigned OutWidth = Width * 2; | 
 |       APInt Val = LHS->getAPIntValue(); | 
 |       APInt Mul = RHS->getAPIntValue(); | 
 |       if (Opcode == ISD::SMUL_LOHI) { | 
 |         Val = Val.sext(OutWidth); | 
 |         Mul = Mul.sext(OutWidth); | 
 |       } else { | 
 |         Val = Val.zext(OutWidth); | 
 |         Mul = Mul.zext(OutWidth); | 
 |       } | 
 |       Val *= Mul; | 
 |  | 
 |       SDValue Hi = | 
 |           getConstant(Val.extractBits(Width, Width), DL, VTList.VTs[0]); | 
 |       SDValue Lo = getConstant(Val.trunc(Width), DL, VTList.VTs[0]); | 
 |       return getNode(ISD::MERGE_VALUES, DL, VTList, {Lo, Hi}, Flags); | 
 |     } | 
 |     break; | 
 |   } | 
 |   case ISD::FFREXP: { | 
 |     assert(VTList.NumVTs == 2 && Ops.size() == 1 && "Invalid ffrexp op!"); | 
 |     assert(VTList.VTs[0].isFloatingPoint() && VTList.VTs[1].isInteger() && | 
 |            VTList.VTs[0] == Ops[0].getValueType() && "frexp type mismatch"); | 
 |  | 
 |     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Ops[0])) { | 
 |       int FrexpExp; | 
 |       APFloat FrexpMant = | 
 |           frexp(C->getValueAPF(), FrexpExp, APFloat::rmNearestTiesToEven); | 
 |       SDValue Result0 = getConstantFP(FrexpMant, DL, VTList.VTs[0]); | 
 |       SDValue Result1 = | 
 |           getConstant(FrexpMant.isFinite() ? FrexpExp : 0, DL, VTList.VTs[1]); | 
 |       return getNode(ISD::MERGE_VALUES, DL, VTList, {Result0, Result1}, Flags); | 
 |     } | 
 |  | 
 |     break; | 
 |   } | 
 |   case ISD::STRICT_FP_EXTEND: | 
 |     assert(VTList.NumVTs == 2 && Ops.size() == 2 && | 
 |            "Invalid STRICT_FP_EXTEND!"); | 
 |     assert(VTList.VTs[0].isFloatingPoint() && | 
 |            Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!"); | 
 |     assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && | 
 |            "STRICT_FP_EXTEND result type should be vector iff the operand " | 
 |            "type is vector!"); | 
 |     assert((!VTList.VTs[0].isVector() || | 
 |             VTList.VTs[0].getVectorElementCount() == | 
 |                 Ops[1].getValueType().getVectorElementCount()) && | 
 |            "Vector element count mismatch!"); | 
 |     assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) && | 
 |            "Invalid fpext node, dst <= src!"); | 
 |     break; | 
 |   case ISD::STRICT_FP_ROUND: | 
 |     assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!"); | 
 |     assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() && | 
 |            "STRICT_FP_ROUND result type should be vector iff the operand " | 
 |            "type is vector!"); | 
 |     assert((!VTList.VTs[0].isVector() || | 
 |             VTList.VTs[0].getVectorElementCount() == | 
 |                 Ops[1].getValueType().getVectorElementCount()) && | 
 |            "Vector element count mismatch!"); | 
 |     assert(VTList.VTs[0].isFloatingPoint() && | 
 |            Ops[1].getValueType().isFloatingPoint() && | 
 |            VTList.VTs[0].bitsLT(Ops[1].getValueType()) && | 
 |            isa<ConstantSDNode>(Ops[2]) && | 
 |            (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 || | 
 |             cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) && | 
 |            "Invalid STRICT_FP_ROUND!"); | 
 |     break; | 
 | #if 0 | 
 |   // FIXME: figure out how to safely handle things like | 
 |   // int foo(int x) { return 1 << (x & 255); } | 
 |   // int bar() { return foo(256); } | 
 |   case ISD::SRA_PARTS: | 
 |   case ISD::SRL_PARTS: | 
 |   case ISD::SHL_PARTS: | 
 |     if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && | 
 |         cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) | 
 |       return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); | 
 |     else if (N3.getOpcode() == ISD::AND) | 
 |       if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { | 
 |         // If the and is only masking out bits that cannot effect the shift, | 
 |         // eliminate the and. | 
 |         unsigned NumBits = VT.getScalarSizeInBits()*2; | 
 |         if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) | 
 |           return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); | 
 |       } | 
 |     break; | 
 | #endif | 
 |   } | 
 |  | 
 |   // Memoize the node unless it returns a glue result. | 
 |   SDNode *N; | 
 |   if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { | 
 |     FoldingSetNodeID ID; | 
 |     AddNodeIDNode(ID, Opcode, VTList, Ops); | 
 |     void *IP = nullptr; | 
 |     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) | 
 |       return SDValue(E, 0); | 
 |  | 
 |     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); | 
 |     createOperands(N, Ops); | 
 |     CSEMap.InsertNode(N, IP); | 
 |   } else { | 
 |     N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList); | 
 |     createOperands(N, Ops); | 
 |   } | 
 |  | 
 |   N->setFlags(Flags); | 
 |   InsertNode(N); | 
 |   SDValue V(N, 0); | 
 |   NewSDValueDbgMsg(V, "Creating new node: ", this); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, | 
 |                               SDVTList VTList) { | 
 |   return getNode(Opcode, DL, VTList, std::nullopt); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, | 
 |                               SDValue N1) { | 
 |   SDValue Ops[] = { N1 }; | 
 |   return getNode(Opcode, DL, VTList, Ops); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, | 
 |                               SDValue N1, SDValue N2) { | 
 |   SDValue Ops[] = { N1, N2 }; | 
 |   return getNode(Opcode, DL, VTList, Ops); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, | 
 |                               SDValue N1, SDValue N2, SDValue N3) { | 
 |   SDValue Ops[] = { N1, N2, N3 }; | 
 |   return getNode(Opcode, DL, VTList, Ops); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, | 
 |                               SDValue N1, SDValue N2, SDValue N3, SDValue N4) { | 
 |   SDValue Ops[] = { N1, N2, N3, N4 }; | 
 |   return getNode(Opcode, DL, VTList, Ops); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, | 
 |                               SDValue N1, SDValue N2, SDValue N3, SDValue N4, | 
 |                               SDValue N5) { | 
 |   SDValue Ops[] = { N1, N2, N3, N4, N5 }; | 
 |   return getNode(Opcode, DL, VTList, Ops); | 
 | } | 
 |  | 
 | SDVTList SelectionDAG::getVTList(EVT VT) { | 
 |   return makeVTList(SDNode::getValueTypeList(VT), 1); | 
 | } | 
 |  | 
 | SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { | 
 |   FoldingSetNodeID ID; | 
 |   ID.AddInteger(2U); | 
 |   ID.AddInteger(VT1.getRawBits()); | 
 |   ID.AddInteger(VT2.getRawBits()); | 
 |  | 
 |   void *IP = nullptr; | 
 |   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); | 
 |   if (!Result) { | 
 |     EVT *Array = Allocator.Allocate<EVT>(2); | 
 |     Array[0] = VT1; | 
 |     Array[1] = VT2; | 
 |     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2); | 
 |     VTListMap.InsertNode(Result, IP); | 
 |   } | 
 |   return Result->getSDVTList(); | 
 | } | 
 |  | 
 | SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { | 
 |   FoldingSetNodeID ID; | 
 |   ID.AddInteger(3U); | 
 |   ID.AddInteger(VT1.getRawBits()); | 
 |   ID.AddInteger(VT2.getRawBits()); | 
 |   ID.AddInteger(VT3.getRawBits()); | 
 |  | 
 |   void *IP = nullptr; | 
 |   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); | 
 |   if (!Result) { | 
 |     EVT *Array = Allocator.Allocate<EVT>(3); | 
 |     Array[0] = VT1; | 
 |     Array[1] = VT2; | 
 |     Array[2] = VT3; | 
 |     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3); | 
 |     VTListMap.InsertNode(Result, IP); | 
 |   } | 
 |   return Result->getSDVTList(); | 
 | } | 
 |  | 
 | SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { | 
 |   FoldingSetNodeID ID; | 
 |   ID.AddInteger(4U); | 
 |   ID.AddInteger(VT1.getRawBits()); | 
 |   ID.AddInteger(VT2.getRawBits()); | 
 |   ID.AddInteger(VT3.getRawBits()); | 
 |   ID.AddInteger(VT4.getRawBits()); | 
 |  | 
 |   void *IP = nullptr; | 
 |   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); | 
 |   if (!Result) { | 
 |     EVT *Array = Allocator.Allocate<EVT>(4); | 
 |     Array[0] = VT1; | 
 |     Array[1] = VT2; | 
 |     Array[2] = VT3; | 
 |     Array[3] = VT4; | 
 |     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4); | 
 |     VTListMap.InsertNode(Result, IP); | 
 |   } | 
 |   return Result->getSDVTList(); | 
 | } | 
 |  | 
 | SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) { | 
 |   unsigned NumVTs = VTs.size(); | 
 |   FoldingSetNodeID ID; | 
 |   ID.AddInteger(NumVTs); | 
 |   for (unsigned index = 0; index < NumVTs; index++) { | 
 |     ID.AddInteger(VTs[index].getRawBits()); | 
 |   } | 
 |  | 
 |   void *IP = nullptr; | 
 |   SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP); | 
 |   if (!Result) { | 
 |     EVT *Array = Allocator.Allocate<EVT>(NumVTs); | 
 |     llvm::copy(VTs, Array); | 
 |     Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs); | 
 |     VTListMap.InsertNode(Result, IP); | 
 |   } | 
 |   return Result->getSDVTList(); | 
 | } | 
 |  | 
 |  | 
 | /// UpdateNodeOperands - *Mutate* the specified node in-place to have the | 
 | /// specified operands.  If the resultant node already exists in the DAG, | 
 | /// this does not modify the specified node, instead it returns the node that | 
 | /// already exists.  If the resultant node does not exist in the DAG, the | 
 | /// input node is returned.  As a degenerate case, if you specify the same | 
 | /// input operands as the node already has, the input node is returned. | 
 | SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { | 
 |   assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); | 
 |  | 
 |   // Check to see if there is no change. | 
 |   if (Op == N->getOperand(0)) return N; | 
 |  | 
 |   // See if the modified node already exists. | 
 |   void *InsertPos = nullptr; | 
 |   if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) | 
 |     return Existing; | 
 |  | 
 |   // Nope it doesn't.  Remove the node from its current place in the maps. | 
 |   if (InsertPos) | 
 |     if (!RemoveNodeFromCSEMaps(N)) | 
 |       InsertPos = nullptr; | 
 |  | 
 |   // Now we update the operands. | 
 |   N->OperandList[0].set(Op); | 
 |  | 
 |   updateDivergence(N); | 
 |   // If this gets put into a CSE map, add it. | 
 |   if (InsertPos) CSEMap.InsertNode(N, InsertPos); | 
 |   return N; | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { | 
 |   assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); | 
 |  | 
 |   // Check to see if there is no change. | 
 |   if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) | 
 |     return N;   // No operands changed, just return the input node. | 
 |  | 
 |   // See if the modified node already exists. | 
 |   void *InsertPos = nullptr; | 
 |   if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) | 
 |     return Existing; | 
 |  | 
 |   // Nope it doesn't.  Remove the node from its current place in the maps. | 
 |   if (InsertPos) | 
 |     if (!RemoveNodeFromCSEMaps(N)) | 
 |       InsertPos = nullptr; | 
 |  | 
 |   // Now we update the operands. | 
 |   if (N->OperandList[0] != Op1) | 
 |     N->OperandList[0].set(Op1); | 
 |   if (N->OperandList[1] != Op2) | 
 |     N->OperandList[1].set(Op2); | 
 |  | 
 |   updateDivergence(N); | 
 |   // If this gets put into a CSE map, add it. | 
 |   if (InsertPos) CSEMap.InsertNode(N, InsertPos); | 
 |   return N; | 
 | } | 
 |  | 
 | SDNode *SelectionDAG:: | 
 | UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { | 
 |   SDValue Ops[] = { Op1, Op2, Op3 }; | 
 |   return UpdateNodeOperands(N, Ops); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG:: | 
 | UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, | 
 |                    SDValue Op3, SDValue Op4) { | 
 |   SDValue Ops[] = { Op1, Op2, Op3, Op4 }; | 
 |   return UpdateNodeOperands(N, Ops); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG:: | 
 | UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, | 
 |                    SDValue Op3, SDValue Op4, SDValue Op5) { | 
 |   SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; | 
 |   return UpdateNodeOperands(N, Ops); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG:: | 
 | UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) { | 
 |   unsigned NumOps = Ops.size(); | 
 |   assert(N->getNumOperands() == NumOps && | 
 |          "Update with wrong number of operands"); | 
 |  | 
 |   // If no operands changed just return the input node. | 
 |   if (std::equal(Ops.begin(), Ops.end(), N->op_begin())) | 
 |     return N; | 
 |  | 
 |   // See if the modified node already exists. | 
 |   void *InsertPos = nullptr; | 
 |   if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos)) | 
 |     return Existing; | 
 |  | 
 |   // Nope it doesn't.  Remove the node from its current place in the maps. | 
 |   if (InsertPos) | 
 |     if (!RemoveNodeFromCSEMaps(N)) | 
 |       InsertPos = nullptr; | 
 |  | 
 |   // Now we update the operands. | 
 |   for (unsigned i = 0; i != NumOps; ++i) | 
 |     if (N->OperandList[i] != Ops[i]) | 
 |       N->OperandList[i].set(Ops[i]); | 
 |  | 
 |   updateDivergence(N); | 
 |   // If this gets put into a CSE map, add it. | 
 |   if (InsertPos) CSEMap.InsertNode(N, InsertPos); | 
 |   return N; | 
 | } | 
 |  | 
 | /// DropOperands - Release the operands and set this node to have | 
 | /// zero operands. | 
 | void SDNode::DropOperands() { | 
 |   // Unlike the code in MorphNodeTo that does this, we don't need to | 
 |   // watch for dead nodes here. | 
 |   for (op_iterator I = op_begin(), E = op_end(); I != E; ) { | 
 |     SDUse &Use = *I++; | 
 |     Use.set(SDValue()); | 
 |   } | 
 | } | 
 |  | 
 | void SelectionDAG::setNodeMemRefs(MachineSDNode *N, | 
 |                                   ArrayRef<MachineMemOperand *> NewMemRefs) { | 
 |   if (NewMemRefs.empty()) { | 
 |     N->clearMemRefs(); | 
 |     return; | 
 |   } | 
 |  | 
 |   // Check if we can avoid allocating by storing a single reference directly. | 
 |   if (NewMemRefs.size() == 1) { | 
 |     N->MemRefs = NewMemRefs[0]; | 
 |     N->NumMemRefs = 1; | 
 |     return; | 
 |   } | 
 |  | 
 |   MachineMemOperand **MemRefsBuffer = | 
 |       Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size()); | 
 |   llvm::copy(NewMemRefs, MemRefsBuffer); | 
 |   N->MemRefs = MemRefsBuffer; | 
 |   N->NumMemRefs = static_cast<int>(NewMemRefs.size()); | 
 | } | 
 |  | 
 | /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a | 
 | /// machine opcode. | 
 | /// | 
 | SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, | 
 |                                    EVT VT) { | 
 |   SDVTList VTs = getVTList(VT); | 
 |   return SelectNodeTo(N, MachineOpc, VTs, std::nullopt); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, | 
 |                                    EVT VT, SDValue Op1) { | 
 |   SDVTList VTs = getVTList(VT); | 
 |   SDValue Ops[] = { Op1 }; | 
 |   return SelectNodeTo(N, MachineOpc, VTs, Ops); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, | 
 |                                    EVT VT, SDValue Op1, | 
 |                                    SDValue Op2) { | 
 |   SDVTList VTs = getVTList(VT); | 
 |   SDValue Ops[] = { Op1, Op2 }; | 
 |   return SelectNodeTo(N, MachineOpc, VTs, Ops); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, | 
 |                                    EVT VT, SDValue Op1, | 
 |                                    SDValue Op2, SDValue Op3) { | 
 |   SDVTList VTs = getVTList(VT); | 
 |   SDValue Ops[] = { Op1, Op2, Op3 }; | 
 |   return SelectNodeTo(N, MachineOpc, VTs, Ops); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, | 
 |                                    EVT VT, ArrayRef<SDValue> Ops) { | 
 |   SDVTList VTs = getVTList(VT); | 
 |   return SelectNodeTo(N, MachineOpc, VTs, Ops); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, | 
 |                                    EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) { | 
 |   SDVTList VTs = getVTList(VT1, VT2); | 
 |   return SelectNodeTo(N, MachineOpc, VTs, Ops); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, | 
 |                                    EVT VT1, EVT VT2) { | 
 |   SDVTList VTs = getVTList(VT1, VT2); | 
 |   return SelectNodeTo(N, MachineOpc, VTs, std::nullopt); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, | 
 |                                    EVT VT1, EVT VT2, EVT VT3, | 
 |                                    ArrayRef<SDValue> Ops) { | 
 |   SDVTList VTs = getVTList(VT1, VT2, VT3); | 
 |   return SelectNodeTo(N, MachineOpc, VTs, Ops); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, | 
 |                                    EVT VT1, EVT VT2, | 
 |                                    SDValue Op1, SDValue Op2) { | 
 |   SDVTList VTs = getVTList(VT1, VT2); | 
 |   SDValue Ops[] = { Op1, Op2 }; | 
 |   return SelectNodeTo(N, MachineOpc, VTs, Ops); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, | 
 |                                    SDVTList VTs,ArrayRef<SDValue> Ops) { | 
 |   SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops); | 
 |   // Reset the NodeID to -1. | 
 |   New->setNodeId(-1); | 
 |   if (New != N) { | 
 |     ReplaceAllUsesWith(N, New); | 
 |     RemoveDeadNode(N); | 
 |   } | 
 |   return New; | 
 | } | 
 |  | 
 | /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away | 
 | /// the line number information on the merged node since it is not possible to | 
 | /// preserve the information that operation is associated with multiple lines. | 
 | /// This will make the debugger working better at -O0, were there is a higher | 
 | /// probability having other instructions associated with that line. | 
 | /// | 
 | /// For IROrder, we keep the smaller of the two | 
 | SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) { | 
 |   DebugLoc NLoc = N->getDebugLoc(); | 
 |   if (NLoc && OptLevel == CodeGenOptLevel::None && OLoc.getDebugLoc() != NLoc) { | 
 |     N->setDebugLoc(DebugLoc()); | 
 |   } | 
 |   unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); | 
 |   N->setIROrder(Order); | 
 |   return N; | 
 | } | 
 |  | 
 | /// MorphNodeTo - This *mutates* the specified node to have the specified | 
 | /// return type, opcode, and operands. | 
 | /// | 
 | /// Note that MorphNodeTo returns the resultant node.  If there is already a | 
 | /// node of the specified opcode and operands, it returns that node instead of | 
 | /// the current one.  Note that the SDLoc need not be the same. | 
 | /// | 
 | /// Using MorphNodeTo is faster than creating a new node and swapping it in | 
 | /// with ReplaceAllUsesWith both because it often avoids allocating a new | 
 | /// node, and because it doesn't require CSE recalculation for any of | 
 | /// the node's users. | 
 | /// | 
 | /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG. | 
 | /// As a consequence it isn't appropriate to use from within the DAG combiner or | 
 | /// the legalizer which maintain worklists that would need to be updated when | 
 | /// deleting things. | 
 | SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, | 
 |                                   SDVTList VTs, ArrayRef<SDValue> Ops) { | 
 |   // If an identical node already exists, use it. | 
 |   void *IP = nullptr; | 
 |   if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { | 
 |     FoldingSetNodeID ID; | 
 |     AddNodeIDNode(ID, Opc, VTs, Ops); | 
 |     if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP)) | 
 |       return UpdateSDLocOnMergeSDNode(ON, SDLoc(N)); | 
 |   } | 
 |  | 
 |   if (!RemoveNodeFromCSEMaps(N)) | 
 |     IP = nullptr; | 
 |  | 
 |   // Start the morphing. | 
 |   N->NodeType = Opc; | 
 |   N->ValueList = VTs.VTs; | 
 |   N->NumValues = VTs.NumVTs; | 
 |  | 
 |   // Clear the operands list, updating used nodes to remove this from their | 
 |   // use list.  Keep track of any operands that become dead as a result. | 
 |   SmallPtrSet<SDNode*, 16> DeadNodeSet; | 
 |   for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { | 
 |     SDUse &Use = *I++; | 
 |     SDNode *Used = Use.getNode(); | 
 |     Use.set(SDValue()); | 
 |     if (Used->use_empty()) | 
 |       DeadNodeSet.insert(Used); | 
 |   } | 
 |  | 
 |   // For MachineNode, initialize the memory references information. | 
 |   if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) | 
 |     MN->clearMemRefs(); | 
 |  | 
 |   // Swap for an appropriately sized array from the recycler. | 
 |   removeOperands(N); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   // Delete any nodes that are still dead after adding the uses for the | 
 |   // new operands. | 
 |   if (!DeadNodeSet.empty()) { | 
 |     SmallVector<SDNode *, 16> DeadNodes; | 
 |     for (SDNode *N : DeadNodeSet) | 
 |       if (N->use_empty()) | 
 |         DeadNodes.push_back(N); | 
 |     RemoveDeadNodes(DeadNodes); | 
 |   } | 
 |  | 
 |   if (IP) | 
 |     CSEMap.InsertNode(N, IP);   // Memoize the new node. | 
 |   return N; | 
 | } | 
 |  | 
 | SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) { | 
 |   unsigned OrigOpc = Node->getOpcode(); | 
 |   unsigned NewOpc; | 
 |   switch (OrigOpc) { | 
 |   default: | 
 |     llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!"); | 
 | #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \ | 
 |   case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break; | 
 | #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \ | 
 |   case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break; | 
 | #include "llvm/IR/ConstrainedOps.def" | 
 |   } | 
 |  | 
 |   assert(Node->getNumValues() == 2 && "Unexpected number of results!"); | 
 |  | 
 |   // We're taking this node out of the chain, so we need to re-link things. | 
 |   SDValue InputChain = Node->getOperand(0); | 
 |   SDValue OutputChain = SDValue(Node, 1); | 
 |   ReplaceAllUsesOfValueWith(OutputChain, InputChain); | 
 |  | 
 |   SmallVector<SDValue, 3> Ops; | 
 |   for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) | 
 |     Ops.push_back(Node->getOperand(i)); | 
 |  | 
 |   SDVTList VTs = getVTList(Node->getValueType(0)); | 
 |   SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops); | 
 |  | 
 |   // MorphNodeTo can operate in two ways: if an existing node with the | 
 |   // specified operands exists, it can just return it.  Otherwise, it | 
 |   // updates the node in place to have the requested operands. | 
 |   if (Res == Node) { | 
 |     // If we updated the node in place, reset the node ID.  To the isel, | 
 |     // this should be just like a newly allocated machine node. | 
 |     Res->setNodeId(-1); | 
 |   } else { | 
 |     ReplaceAllUsesWith(Node, Res); | 
 |     RemoveDeadNode(Node); | 
 |   } | 
 |  | 
 |   return Res; | 
 | } | 
 |  | 
 | /// getMachineNode - These are used for target selectors to create a new node | 
 | /// with specified return type(s), MachineInstr opcode, and operands. | 
 | /// | 
 | /// Note that getMachineNode returns the resultant node.  If there is already a | 
 | /// node of the specified opcode and operands, it returns that node instead of | 
 | /// the current one. | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT) { | 
 |   SDVTList VTs = getVTList(VT); | 
 |   return getMachineNode(Opcode, dl, VTs, std::nullopt); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT, SDValue Op1) { | 
 |   SDVTList VTs = getVTList(VT); | 
 |   SDValue Ops[] = { Op1 }; | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT, SDValue Op1, SDValue Op2) { | 
 |   SDVTList VTs = getVTList(VT); | 
 |   SDValue Ops[] = { Op1, Op2 }; | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT, SDValue Op1, SDValue Op2, | 
 |                                             SDValue Op3) { | 
 |   SDVTList VTs = getVTList(VT); | 
 |   SDValue Ops[] = { Op1, Op2, Op3 }; | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT, ArrayRef<SDValue> Ops) { | 
 |   SDVTList VTs = getVTList(VT); | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT1, EVT VT2, SDValue Op1, | 
 |                                             SDValue Op2) { | 
 |   SDVTList VTs = getVTList(VT1, VT2); | 
 |   SDValue Ops[] = { Op1, Op2 }; | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT1, EVT VT2, SDValue Op1, | 
 |                                             SDValue Op2, SDValue Op3) { | 
 |   SDVTList VTs = getVTList(VT1, VT2); | 
 |   SDValue Ops[] = { Op1, Op2, Op3 }; | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT1, EVT VT2, | 
 |                                             ArrayRef<SDValue> Ops) { | 
 |   SDVTList VTs = getVTList(VT1, VT2); | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT1, EVT VT2, EVT VT3, | 
 |                                             SDValue Op1, SDValue Op2) { | 
 |   SDVTList VTs = getVTList(VT1, VT2, VT3); | 
 |   SDValue Ops[] = { Op1, Op2 }; | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT1, EVT VT2, EVT VT3, | 
 |                                             SDValue Op1, SDValue Op2, | 
 |                                             SDValue Op3) { | 
 |   SDVTList VTs = getVTList(VT1, VT2, VT3); | 
 |   SDValue Ops[] = { Op1, Op2, Op3 }; | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             EVT VT1, EVT VT2, EVT VT3, | 
 |                                             ArrayRef<SDValue> Ops) { | 
 |   SDVTList VTs = getVTList(VT1, VT2, VT3); | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl, | 
 |                                             ArrayRef<EVT> ResultTys, | 
 |                                             ArrayRef<SDValue> Ops) { | 
 |   SDVTList VTs = getVTList(ResultTys); | 
 |   return getMachineNode(Opcode, dl, VTs, Ops); | 
 | } | 
 |  | 
 | MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL, | 
 |                                             SDVTList VTs, | 
 |                                             ArrayRef<SDValue> Ops) { | 
 |   bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; | 
 |   MachineSDNode *N; | 
 |   void *IP = nullptr; | 
 |  | 
 |   if (DoCSE) { | 
 |     FoldingSetNodeID ID; | 
 |     AddNodeIDNode(ID, ~Opcode, VTs, Ops); | 
 |     IP = nullptr; | 
 |     if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) { | 
 |       return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL)); | 
 |     } | 
 |   } | 
 |  | 
 |   // Allocate a new MachineSDNode. | 
 |   N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); | 
 |   createOperands(N, Ops); | 
 |  | 
 |   if (DoCSE) | 
 |     CSEMap.InsertNode(N, IP); | 
 |  | 
 |   InsertNode(N); | 
 |   NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this); | 
 |   return N; | 
 | } | 
 |  | 
 | /// getTargetExtractSubreg - A convenience function for creating | 
 | /// TargetOpcode::EXTRACT_SUBREG nodes. | 
 | SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, | 
 |                                              SDValue Operand) { | 
 |   SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); | 
 |   SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, | 
 |                                   VT, Operand, SRIdxVal); | 
 |   return SDValue(Subreg, 0); | 
 | } | 
 |  | 
 | /// getTargetInsertSubreg - A convenience function for creating | 
 | /// TargetOpcode::INSERT_SUBREG nodes. | 
 | SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, | 
 |                                             SDValue Operand, SDValue Subreg) { | 
 |   SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32); | 
 |   SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, | 
 |                                   VT, Operand, Subreg, SRIdxVal); | 
 |   return SDValue(Result, 0); | 
 | } | 
 |  | 
 | /// getNodeIfExists - Get the specified node if it's already available, or | 
 | /// else return NULL. | 
 | SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, | 
 |                                       ArrayRef<SDValue> Ops) { | 
 |   SDNodeFlags Flags; | 
 |   if (Inserter) | 
 |     Flags = Inserter->getFlags(); | 
 |   return getNodeIfExists(Opcode, VTList, Ops, Flags); | 
 | } | 
 |  | 
 | SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, | 
 |                                       ArrayRef<SDValue> Ops, | 
 |                                       const SDNodeFlags Flags) { | 
 |   if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { | 
 |     FoldingSetNodeID ID; | 
 |     AddNodeIDNode(ID, Opcode, VTList, Ops); | 
 |     void *IP = nullptr; | 
 |     if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) { | 
 |       E->intersectFlagsWith(Flags); | 
 |       return E; | 
 |     } | 
 |   } | 
 |   return nullptr; | 
 | } | 
 |  | 
 | /// doesNodeExist - Check if a node exists without modifying its flags. | 
 | bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList, | 
 |                                  ArrayRef<SDValue> Ops) { | 
 |   if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) { | 
 |     FoldingSetNodeID ID; | 
 |     AddNodeIDNode(ID, Opcode, VTList, Ops); | 
 |     void *IP = nullptr; | 
 |     if (FindNodeOrInsertPos(ID, SDLoc(), IP)) | 
 |       return true; | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | /// getDbgValue - Creates a SDDbgValue node. | 
 | /// | 
 | /// SDNode | 
 | SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr, | 
 |                                       SDNode *N, unsigned R, bool IsIndirect, | 
 |                                       const DebugLoc &DL, unsigned O) { | 
 |   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && | 
 |          "Expected inlined-at fields to agree"); | 
 |   return new (DbgInfo->getAlloc()) | 
 |       SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromNode(N, R), | 
 |                  {}, IsIndirect, DL, O, | 
 |                  /*IsVariadic=*/false); | 
 | } | 
 |  | 
 | /// Constant | 
 | SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var, | 
 |                                               DIExpression *Expr, | 
 |                                               const Value *C, | 
 |                                               const DebugLoc &DL, unsigned O) { | 
 |   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && | 
 |          "Expected inlined-at fields to agree"); | 
 |   return new (DbgInfo->getAlloc()) | 
 |       SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromConst(C), {}, | 
 |                  /*IsIndirect=*/false, DL, O, | 
 |                  /*IsVariadic=*/false); | 
 | } | 
 |  | 
 | /// FrameIndex | 
 | SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, | 
 |                                                 DIExpression *Expr, unsigned FI, | 
 |                                                 bool IsIndirect, | 
 |                                                 const DebugLoc &DL, | 
 |                                                 unsigned O) { | 
 |   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && | 
 |          "Expected inlined-at fields to agree"); | 
 |   return getFrameIndexDbgValue(Var, Expr, FI, {}, IsIndirect, DL, O); | 
 | } | 
 |  | 
 | /// FrameIndex with dependencies | 
 | SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var, | 
 |                                                 DIExpression *Expr, unsigned FI, | 
 |                                                 ArrayRef<SDNode *> Dependencies, | 
 |                                                 bool IsIndirect, | 
 |                                                 const DebugLoc &DL, | 
 |                                                 unsigned O) { | 
 |   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && | 
 |          "Expected inlined-at fields to agree"); | 
 |   return new (DbgInfo->getAlloc()) | 
 |       SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromFrameIdx(FI), | 
 |                  Dependencies, IsIndirect, DL, O, | 
 |                  /*IsVariadic=*/false); | 
 | } | 
 |  | 
 | /// VReg | 
 | SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, DIExpression *Expr, | 
 |                                           unsigned VReg, bool IsIndirect, | 
 |                                           const DebugLoc &DL, unsigned O) { | 
 |   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && | 
 |          "Expected inlined-at fields to agree"); | 
 |   return new (DbgInfo->getAlloc()) | 
 |       SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromVReg(VReg), | 
 |                  {}, IsIndirect, DL, O, | 
 |                  /*IsVariadic=*/false); | 
 | } | 
 |  | 
 | SDDbgValue *SelectionDAG::getDbgValueList(DIVariable *Var, DIExpression *Expr, | 
 |                                           ArrayRef<SDDbgOperand> Locs, | 
 |                                           ArrayRef<SDNode *> Dependencies, | 
 |                                           bool IsIndirect, const DebugLoc &DL, | 
 |                                           unsigned O, bool IsVariadic) { | 
 |   assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) && | 
 |          "Expected inlined-at fields to agree"); | 
 |   return new (DbgInfo->getAlloc()) | 
 |       SDDbgValue(DbgInfo->getAlloc(), Var, Expr, Locs, Dependencies, IsIndirect, | 
 |                  DL, O, IsVariadic); | 
 | } | 
 |  | 
 | void SelectionDAG::transferDbgValues(SDValue From, SDValue To, | 
 |                                      unsigned OffsetInBits, unsigned SizeInBits, | 
 |                                      bool InvalidateDbg) { | 
 |   SDNode *FromNode = From.getNode(); | 
 |   SDNode *ToNode = To.getNode(); | 
 |   assert(FromNode && ToNode && "Can't modify dbg values"); | 
 |  | 
 |   // PR35338 | 
 |   // TODO: assert(From != To && "Redundant dbg value transfer"); | 
 |   // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer"); | 
 |   if (From == To || FromNode == ToNode) | 
 |     return; | 
 |  | 
 |   if (!FromNode->getHasDebugValue()) | 
 |     return; | 
 |  | 
 |   SDDbgOperand FromLocOp = | 
 |       SDDbgOperand::fromNode(From.getNode(), From.getResNo()); | 
 |   SDDbgOperand ToLocOp = SDDbgOperand::fromNode(To.getNode(), To.getResNo()); | 
 |  | 
 |   SmallVector<SDDbgValue *, 2> ClonedDVs; | 
 |   for (SDDbgValue *Dbg : GetDbgValues(FromNode)) { | 
 |     if (Dbg->isInvalidated()) | 
 |       continue; | 
 |  | 
 |     // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value"); | 
 |  | 
 |     // Create a new location ops vector that is equal to the old vector, but | 
 |     // with each instance of FromLocOp replaced with ToLocOp. | 
 |     bool Changed = false; | 
 |     auto NewLocOps = Dbg->copyLocationOps(); | 
 |     std::replace_if( | 
 |         NewLocOps.begin(), NewLocOps.end(), | 
 |         [&Changed, FromLocOp](const SDDbgOperand &Op) { | 
 |           bool Match = Op == FromLocOp; | 
 |           Changed |= Match; | 
 |           return Match; | 
 |         }, | 
 |         ToLocOp); | 
 |     // Ignore this SDDbgValue if we didn't find a matching location. | 
 |     if (!Changed) | 
 |       continue; | 
 |  | 
 |     DIVariable *Var = Dbg->getVariable(); | 
 |     auto *Expr = Dbg->getExpression(); | 
 |     // If a fragment is requested, update the expression. | 
 |     if (SizeInBits) { | 
 |       // When splitting a larger (e.g., sign-extended) value whose | 
 |       // lower bits are described with an SDDbgValue, do not attempt | 
 |       // to transfer the SDDbgValue to the upper bits. | 
 |       if (auto FI = Expr->getFragmentInfo()) | 
 |         if (OffsetInBits + SizeInBits > FI->SizeInBits) | 
 |           continue; | 
 |       auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits, | 
 |                                                              SizeInBits); | 
 |       if (!Fragment) | 
 |         continue; | 
 |       Expr = *Fragment; | 
 |     } | 
 |  | 
 |     auto AdditionalDependencies = Dbg->getAdditionalDependencies(); | 
 |     // Clone the SDDbgValue and move it to To. | 
 |     SDDbgValue *Clone = getDbgValueList( | 
 |         Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(), | 
 |         Dbg->getDebugLoc(), std::max(ToNode->getIROrder(), Dbg->getOrder()), | 
 |         Dbg->isVariadic()); | 
 |     ClonedDVs.push_back(Clone); | 
 |  | 
 |     if (InvalidateDbg) { | 
 |       // Invalidate value and indicate the SDDbgValue should not be emitted. | 
 |       Dbg->setIsInvalidated(); | 
 |       Dbg->setIsEmitted(); | 
 |     } | 
 |   } | 
 |  | 
 |   for (SDDbgValue *Dbg : ClonedDVs) { | 
 |     assert(is_contained(Dbg->getSDNodes(), ToNode) && | 
 |            "Transferred DbgValues should depend on the new SDNode"); | 
 |     AddDbgValue(Dbg, false); | 
 |   } | 
 | } | 
 |  | 
 | void SelectionDAG::salvageDebugInfo(SDNode &N) { | 
 |   if (!N.getHasDebugValue()) | 
 |     return; | 
 |  | 
 |   SmallVector<SDDbgValue *, 2> ClonedDVs; | 
 |   for (auto *DV : GetDbgValues(&N)) { | 
 |     if (DV->isInvalidated()) | 
 |       continue; | 
 |     switch (N.getOpcode()) { | 
 |     default: | 
 |       break; | 
 |     case ISD::ADD: { | 
 |       SDValue N0 = N.getOperand(0); | 
 |       SDValue N1 = N.getOperand(1); | 
 |       if (!isa<ConstantSDNode>(N0)) { | 
 |         bool RHSConstant = isa<ConstantSDNode>(N1); | 
 |         uint64_t Offset; | 
 |         if (RHSConstant) | 
 |           Offset = N.getConstantOperandVal(1); | 
 |         // We are not allowed to turn indirect debug values variadic, so | 
 |         // don't salvage those. | 
 |         if (!RHSConstant && DV->isIndirect()) | 
 |           continue; | 
 |  | 
 |         // Rewrite an ADD constant node into a DIExpression. Since we are | 
 |         // performing arithmetic to compute the variable's *value* in the | 
 |         // DIExpression, we need to mark the expression with a | 
 |         // DW_OP_stack_value. | 
 |         auto *DIExpr = DV->getExpression(); | 
 |         auto NewLocOps = DV->copyLocationOps(); | 
 |         bool Changed = false; | 
 |         size_t OrigLocOpsSize = NewLocOps.size(); | 
 |         for (size_t i = 0; i < OrigLocOpsSize; ++i) { | 
 |           // We're not given a ResNo to compare against because the whole | 
 |           // node is going away. We know that any ISD::ADD only has one | 
 |           // result, so we can assume any node match is using the result. | 
 |           if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE || | 
 |               NewLocOps[i].getSDNode() != &N) | 
 |             continue; | 
 |           NewLocOps[i] = SDDbgOperand::fromNode(N0.getNode(), N0.getResNo()); | 
 |           if (RHSConstant) { | 
 |             SmallVector<uint64_t, 3> ExprOps; | 
 |             DIExpression::appendOffset(ExprOps, Offset); | 
 |             DIExpr = DIExpression::appendOpsToArg(DIExpr, ExprOps, i, true); | 
 |           } else { | 
 |             // Convert to a variadic expression (if not already). | 
 |             // convertToVariadicExpression() returns a const pointer, so we use | 
 |             // a temporary const variable here. | 
 |             const auto *TmpDIExpr = | 
 |                 DIExpression::convertToVariadicExpression(DIExpr); | 
 |             SmallVector<uint64_t, 3> ExprOps; | 
 |             ExprOps.push_back(dwarf::DW_OP_LLVM_arg); | 
 |             ExprOps.push_back(NewLocOps.size()); | 
 |             ExprOps.push_back(dwarf::DW_OP_plus); | 
 |             SDDbgOperand RHS = | 
 |                 SDDbgOperand::fromNode(N1.getNode(), N1.getResNo()); | 
 |             NewLocOps.push_back(RHS); | 
 |             DIExpr = DIExpression::appendOpsToArg(TmpDIExpr, ExprOps, i, true); | 
 |           } | 
 |           Changed = true; | 
 |         } | 
 |         (void)Changed; | 
 |         assert(Changed && "Salvage target doesn't use N"); | 
 |  | 
 |         bool IsVariadic = | 
 |             DV->isVariadic() || OrigLocOpsSize != NewLocOps.size(); | 
 |  | 
 |         auto AdditionalDependencies = DV->getAdditionalDependencies(); | 
 |         SDDbgValue *Clone = getDbgValueList( | 
 |             DV->getVariable(), DIExpr, NewLocOps, AdditionalDependencies, | 
 |             DV->isIndirect(), DV->getDebugLoc(), DV->getOrder(), IsVariadic); | 
 |         ClonedDVs.push_back(Clone); | 
 |         DV->setIsInvalidated(); | 
 |         DV->setIsEmitted(); | 
 |         LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; | 
 |                    N0.getNode()->dumprFull(this); | 
 |                    dbgs() << " into " << *DIExpr << '\n'); | 
 |       } | 
 |       break; | 
 |     } | 
 |     case ISD::TRUNCATE: { | 
 |       SDValue N0 = N.getOperand(0); | 
 |       TypeSize FromSize = N0.getValueSizeInBits(); | 
 |       TypeSize ToSize = N.getValueSizeInBits(0); | 
 |  | 
 |       DIExpression *DbgExpression = DV->getExpression(); | 
 |       auto ExtOps = DIExpression::getExtOps(FromSize, ToSize, false); | 
 |       auto NewLocOps = DV->copyLocationOps(); | 
 |       bool Changed = false; | 
 |       for (size_t i = 0; i < NewLocOps.size(); ++i) { | 
 |         if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE || | 
 |             NewLocOps[i].getSDNode() != &N) | 
 |           continue; | 
 |  | 
 |         NewLocOps[i] = SDDbgOperand::fromNode(N0.getNode(), N0.getResNo()); | 
 |         DbgExpression = DIExpression::appendOpsToArg(DbgExpression, ExtOps, i); | 
 |         Changed = true; | 
 |       } | 
 |       assert(Changed && "Salvage target doesn't use N"); | 
 |       (void)Changed; | 
 |  | 
 |       SDDbgValue *Clone = | 
 |           getDbgValueList(DV->getVariable(), DbgExpression, NewLocOps, | 
 |                           DV->getAdditionalDependencies(), DV->isIndirect(), | 
 |                           DV->getDebugLoc(), DV->getOrder(), DV->isVariadic()); | 
 |  | 
 |       ClonedDVs.push_back(Clone); | 
 |       DV->setIsInvalidated(); | 
 |       DV->setIsEmitted(); | 
 |       LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting"; N0.getNode()->dumprFull(this); | 
 |                  dbgs() << " into " << *DbgExpression << '\n'); | 
 |       break; | 
 |     } | 
 |     } | 
 |   } | 
 |  | 
 |   for (SDDbgValue *Dbg : ClonedDVs) { | 
 |     assert(!Dbg->getSDNodes().empty() && | 
 |            "Salvaged DbgValue should depend on a new SDNode"); | 
 |     AddDbgValue(Dbg, false); | 
 |   } | 
 | } | 
 |  | 
 | /// Creates a SDDbgLabel node. | 
 | SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label, | 
 |                                       const DebugLoc &DL, unsigned O) { | 
 |   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) && | 
 |          "Expected inlined-at fields to agree"); | 
 |   return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O); | 
 | } | 
 |  | 
 | namespace { | 
 |  | 
 | /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node | 
 | /// pointed to by a use iterator is deleted, increment the use iterator | 
 | /// so that it doesn't dangle. | 
 | /// | 
 | class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { | 
 |   SDNode::use_iterator &UI; | 
 |   SDNode::use_iterator &UE; | 
 |  | 
 |   void NodeDeleted(SDNode *N, SDNode *E) override { | 
 |     // Increment the iterator as needed. | 
 |     while (UI != UE && N == *UI) | 
 |       ++UI; | 
 |   } | 
 |  | 
 | public: | 
 |   RAUWUpdateListener(SelectionDAG &d, | 
 |                      SDNode::use_iterator &ui, | 
 |                      SDNode::use_iterator &ue) | 
 |     : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} | 
 | }; | 
 |  | 
 | } // end anonymous namespace | 
 |  | 
 | /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. | 
 | /// This can cause recursive merging of nodes in the DAG. | 
 | /// | 
 | /// This version assumes From has a single result value. | 
 | /// | 
 | void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { | 
 |   SDNode *From = FromN.getNode(); | 
 |   assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && | 
 |          "Cannot replace with this method!"); | 
 |   assert(From != To.getNode() && "Cannot replace uses of with self"); | 
 |  | 
 |   // Preserve Debug Values | 
 |   transferDbgValues(FromN, To); | 
 |   // Preserve extra info. | 
 |   copyExtraInfo(From, To.getNode()); | 
 |  | 
 |   // Iterate over all the existing uses of From. New uses will be added | 
 |   // to the beginning of the use list, which we avoid visiting. | 
 |   // This specifically avoids visiting uses of From that arise while the | 
 |   // replacement is happening, because any such uses would be the result | 
 |   // of CSE: If an existing node looks like From after one of its operands | 
 |   // is replaced by To, we don't want to replace of all its users with To | 
 |   // too. See PR3018 for more info. | 
 |   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); | 
 |   RAUWUpdateListener Listener(*this, UI, UE); | 
 |   while (UI != UE) { | 
 |     SDNode *User = *UI; | 
 |  | 
 |     // This node is about to morph, remove its old self from the CSE maps. | 
 |     RemoveNodeFromCSEMaps(User); | 
 |  | 
 |     // A user can appear in a use list multiple times, and when this | 
 |     // happens the uses are usually next to each other in the list. | 
 |     // To help reduce the number of CSE recomputations, process all | 
 |     // the uses of this user that we can find this way. | 
 |     do { | 
 |       SDUse &Use = UI.getUse(); | 
 |       ++UI; | 
 |       Use.set(To); | 
 |       if (To->isDivergent() != From->isDivergent()) | 
 |         updateDivergence(User); | 
 |     } while (UI != UE && *UI == User); | 
 |     // Now that we have modified User, add it back to the CSE maps.  If it | 
 |     // already exists there, recursively merge the results together. | 
 |     AddModifiedNodeToCSEMaps(User); | 
 |   } | 
 |  | 
 |   // If we just RAUW'd the root, take note. | 
 |   if (FromN == getRoot()) | 
 |     setRoot(To); | 
 | } | 
 |  | 
 | /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. | 
 | /// This can cause recursive merging of nodes in the DAG. | 
 | /// | 
 | /// This version assumes that for each value of From, there is a | 
 | /// corresponding value in To in the same position with the same type. | 
 | /// | 
 | void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { | 
 | #ifndef NDEBUG | 
 |   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) | 
 |     assert((!From->hasAnyUseOfValue(i) || | 
 |             From->getValueType(i) == To->getValueType(i)) && | 
 |            "Cannot use this version of ReplaceAllUsesWith!"); | 
 | #endif | 
 |  | 
 |   // Handle the trivial case. | 
 |   if (From == To) | 
 |     return; | 
 |  | 
 |   // Preserve Debug Info. Only do this if there's a use. | 
 |   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) | 
 |     if (From->hasAnyUseOfValue(i)) { | 
 |       assert((i < To->getNumValues()) && "Invalid To location"); | 
 |       transferDbgValues(SDValue(From, i), SDValue(To, i)); | 
 |     } | 
 |   // Preserve extra info. | 
 |   copyExtraInfo(From, To); | 
 |  | 
 |   // Iterate over just the existing users of From. See the comments in | 
 |   // the ReplaceAllUsesWith above. | 
 |   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); | 
 |   RAUWUpdateListener Listener(*this, UI, UE); | 
 |   while (UI != UE) { | 
 |     SDNode *User = *UI; | 
 |  | 
 |     // This node is about to morph, remove its old self from the CSE maps. | 
 |     RemoveNodeFromCSEMaps(User); | 
 |  | 
 |     // A user can appear in a use list multiple times, and when this | 
 |     // happens the uses are usually next to each other in the list. | 
 |     // To help reduce the number of CSE recomputations, process all | 
 |     // the uses of this user that we can find this way. | 
 |     do { | 
 |       SDUse &Use = UI.getUse(); | 
 |       ++UI; | 
 |       Use.setNode(To); | 
 |       if (To->isDivergent() != From->isDivergent()) | 
 |         updateDivergence(User); | 
 |     } while (UI != UE && *UI == User); | 
 |  | 
 |     // Now that we have modified User, add it back to the CSE maps.  If it | 
 |     // already exists there, recursively merge the results together. | 
 |     AddModifiedNodeToCSEMaps(User); | 
 |   } | 
 |  | 
 |   // If we just RAUW'd the root, take note. | 
 |   if (From == getRoot().getNode()) | 
 |     setRoot(SDValue(To, getRoot().getResNo())); | 
 | } | 
 |  | 
 | /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. | 
 | /// This can cause recursive merging of nodes in the DAG. | 
 | /// | 
 | /// This version can replace From with any result values.  To must match the | 
 | /// number and types of values returned by From. | 
 | void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { | 
 |   if (From->getNumValues() == 1)  // Handle the simple case efficiently. | 
 |     return ReplaceAllUsesWith(SDValue(From, 0), To[0]); | 
 |  | 
 |   for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) { | 
 |     // Preserve Debug Info. | 
 |     transferDbgValues(SDValue(From, i), To[i]); | 
 |     // Preserve extra info. | 
 |     copyExtraInfo(From, To[i].getNode()); | 
 |   } | 
 |  | 
 |   // Iterate over just the existing users of From. See the comments in | 
 |   // the ReplaceAllUsesWith above. | 
 |   SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); | 
 |   RAUWUpdateListener Listener(*this, UI, UE); | 
 |   while (UI != UE) { | 
 |     SDNode *User = *UI; | 
 |  | 
 |     // This node is about to morph, remove its old self from the CSE maps. | 
 |     RemoveNodeFromCSEMaps(User); | 
 |  | 
 |     // A user can appear in a use list multiple times, and when this happens the | 
 |     // uses are usually next to each other in the list.  To help reduce the | 
 |     // number of CSE and divergence recomputations, process all the uses of this | 
 |     // user that we can find this way. | 
 |     bool To_IsDivergent = false; | 
 |     do { | 
 |       SDUse &Use = UI.getUse(); | 
 |       const SDValue &ToOp = To[Use.getResNo()]; | 
 |       ++UI; | 
 |       Use.set(ToOp); | 
 |       To_IsDivergent |= ToOp->isDivergent(); | 
 |     } while (UI != UE && *UI == User); | 
 |  | 
 |     if (To_IsDivergent != From->isDivergent()) | 
 |       updateDivergence(User); | 
 |  | 
 |     // Now that we have modified User, add it back to the CSE maps.  If it | 
 |     // already exists there, recursively merge the results together. | 
 |     AddModifiedNodeToCSEMaps(User); | 
 |   } | 
 |  | 
 |   // If we just RAUW'd the root, take note. | 
 |   if (From == getRoot().getNode()) | 
 |     setRoot(SDValue(To[getRoot().getResNo()])); | 
 | } | 
 |  | 
 | /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving | 
 | /// uses of other values produced by From.getNode() alone.  The Deleted | 
 | /// vector is handled the same way as for ReplaceAllUsesWith. | 
 | void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ | 
 |   // Handle the really simple, really trivial case efficiently. | 
 |   if (From == To) return; | 
 |  | 
 |   // Handle the simple, trivial, case efficiently. | 
 |   if (From.getNode()->getNumValues() == 1) { | 
 |     ReplaceAllUsesWith(From, To); | 
 |     return; | 
 |   } | 
 |  | 
 |   // Preserve Debug Info. | 
 |   transferDbgValues(From, To); | 
 |   copyExtraInfo(From.getNode(), To.getNode()); | 
 |  | 
 |   // Iterate over just the existing users of From. See the comments in | 
 |   // the ReplaceAllUsesWith above. | 
 |   SDNode::use_iterator UI = From.getNode()->use_begin(), | 
 |                        UE = From.getNode()->use_end(); | 
 |   RAUWUpdateListener Listener(*this, UI, UE); | 
 |   while (UI != UE) { | 
 |     SDNode *User = *UI; | 
 |     bool UserRemovedFromCSEMaps = false; | 
 |  | 
 |     // A user can appear in a use list multiple times, and when this | 
 |     // happens the uses are usually next to each other in the list. | 
 |     // To help reduce the number of CSE recomputations, process all | 
 |     // the uses of this user that we can find this way. | 
 |     do { | 
 |       SDUse &Use = UI.getUse(); | 
 |  | 
 |       // Skip uses of different values from the same node. | 
 |       if (Use.getResNo() != From.getResNo()) { | 
 |         ++UI; | 
 |         continue; | 
 |       } | 
 |  | 
 |       // If this node hasn't been modified yet, it's still in the CSE maps, | 
 |       // so remove its old self from the CSE maps. | 
 |       if (!UserRemovedFromCSEMaps) { | 
 |         RemoveNodeFromCSEMaps(User); | 
 |         UserRemovedFromCSEMaps = true; | 
 |       } | 
 |  | 
 |       ++UI; | 
 |       Use.set(To); | 
 |       if (To->isDivergent() != From->isDivergent()) | 
 |         updateDivergence(User); | 
 |     } while (UI != UE && *UI == User); | 
 |     // We are iterating over all uses of the From node, so if a use | 
 |     // doesn't use the specific value, no changes are made. | 
 |     if (!UserRemovedFromCSEMaps) | 
 |       continue; | 
 |  | 
 |     // Now that we have modified User, add it back to the CSE maps.  If it | 
 |     // already exists there, recursively merge the results together. | 
 |     AddModifiedNodeToCSEMaps(User); | 
 |   } | 
 |  | 
 |   // If we just RAUW'd the root, take note. | 
 |   if (From == getRoot()) | 
 |     setRoot(To); | 
 | } | 
 |  | 
 | namespace { | 
 |  | 
 | /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith | 
 | /// to record information about a use. | 
 | struct UseMemo { | 
 |   SDNode *User; | 
 |   unsigned Index; | 
 |   SDUse *Use; | 
 | }; | 
 |  | 
 | /// operator< - Sort Memos by User. | 
 | bool operator<(const UseMemo &L, const UseMemo &R) { | 
 |   return (intptr_t)L.User < (intptr_t)R.User; | 
 | } | 
 |  | 
 | /// RAUOVWUpdateListener - Helper for ReplaceAllUsesOfValuesWith - When the node | 
 | /// pointed to by a UseMemo is deleted, set the User to nullptr to indicate that | 
 | /// the node already has been taken care of recursively. | 
 | class RAUOVWUpdateListener : public SelectionDAG::DAGUpdateListener { | 
 |   SmallVector<UseMemo, 4> &Uses; | 
 |  | 
 |   void NodeDeleted(SDNode *N, SDNode *E) override { | 
 |     for (UseMemo &Memo : Uses) | 
 |       if (Memo.User == N) | 
 |         Memo.User = nullptr; | 
 |   } | 
 |  | 
 | public: | 
 |   RAUOVWUpdateListener(SelectionDAG &d, SmallVector<UseMemo, 4> &uses) | 
 |       : SelectionDAG::DAGUpdateListener(d), Uses(uses) {} | 
 | }; | 
 |  | 
 | } // end anonymous namespace | 
 |  | 
 | bool SelectionDAG::calculateDivergence(SDNode *N) { | 
 |   if (TLI->isSDNodeAlwaysUniform(N)) { | 
 |     assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, UA) && | 
 |            "Conflicting divergence information!"); | 
 |     return false; | 
 |   } | 
 |   if (TLI->isSDNodeSourceOfDivergence(N, FLI, UA)) | 
 |     return true; | 
 |   for (const auto &Op : N->ops()) { | 
 |     if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent()) | 
 |       return true; | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | void SelectionDAG::updateDivergence(SDNode *N) { | 
 |   SmallVector<SDNode *, 16> Worklist(1, N); | 
 |   do { | 
 |     N = Worklist.pop_back_val(); | 
 |     bool IsDivergent = calculateDivergence(N); | 
 |     if (N->SDNodeBits.IsDivergent != IsDivergent) { | 
 |       N->SDNodeBits.IsDivergent = IsDivergent; | 
 |       llvm::append_range(Worklist, N->uses()); | 
 |     } | 
 |   } while (!Worklist.empty()); | 
 | } | 
 |  | 
 | void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) { | 
 |   DenseMap<SDNode *, unsigned> Degree; | 
 |   Order.reserve(AllNodes.size()); | 
 |   for (auto &N : allnodes()) { | 
 |     unsigned NOps = N.getNumOperands(); | 
 |     Degree[&N] = NOps; | 
 |     if (0 == NOps) | 
 |       Order.push_back(&N); | 
 |   } | 
 |   for (size_t I = 0; I != Order.size(); ++I) { | 
 |     SDNode *N = Order[I]; | 
 |     for (auto *U : N->uses()) { | 
 |       unsigned &UnsortedOps = Degree[U]; | 
 |       if (0 == --UnsortedOps) | 
 |         Order.push_back(U); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | #ifndef NDEBUG | 
 | void SelectionDAG::VerifyDAGDivergence() { | 
 |   std::vector<SDNode *> TopoOrder; | 
 |   CreateTopologicalOrder(TopoOrder); | 
 |   for (auto *N : TopoOrder) { | 
 |     assert(calculateDivergence(N) == N->isDivergent() && | 
 |            "Divergence bit inconsistency detected"); | 
 |   } | 
 | } | 
 | #endif | 
 |  | 
 | /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving | 
 | /// uses of other values produced by From.getNode() alone.  The same value | 
 | /// may appear in both the From and To list.  The Deleted vector is | 
 | /// handled the same way as for ReplaceAllUsesWith. | 
 | void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, | 
 |                                               const SDValue *To, | 
 |                                               unsigned Num){ | 
 |   // Handle the simple, trivial case efficiently. | 
 |   if (Num == 1) | 
 |     return ReplaceAllUsesOfValueWith(*From, *To); | 
 |  | 
 |   transferDbgValues(*From, *To); | 
 |   copyExtraInfo(From->getNode(), To->getNode()); | 
 |  | 
 |   // Read up all the uses and make records of them. This helps | 
 |   // processing new uses that are introduced during the | 
 |   // replacement process. | 
 |   SmallVector<UseMemo, 4> Uses; | 
 |   for (unsigned i = 0; i != Num; ++i) { | 
 |     unsigned FromResNo = From[i].getResNo(); | 
 |     SDNode *FromNode = From[i].getNode(); | 
 |     for (SDNode::use_iterator UI = FromNode->use_begin(), | 
 |          E = FromNode->use_end(); UI != E; ++UI) { | 
 |       SDUse &Use = UI.getUse(); | 
 |       if (Use.getResNo() == FromResNo) { | 
 |         UseMemo Memo = { *UI, i, &Use }; | 
 |         Uses.push_back(Memo); | 
 |       } | 
 |     } | 
 |   } | 
 |  | 
 |   // Sort the uses, so that all the uses from a given User are together. | 
 |   llvm::sort(Uses); | 
 |   RAUOVWUpdateListener Listener(*this, Uses); | 
 |  | 
 |   for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); | 
 |        UseIndex != UseIndexEnd; ) { | 
 |     // We know that this user uses some value of From.  If it is the right | 
 |     // value, update it. | 
 |     SDNode *User = Uses[UseIndex].User; | 
 |     // If the node has been deleted by recursive CSE updates when updating | 
 |     // another node, then just skip this entry. | 
 |     if (User == nullptr) { | 
 |       ++UseIndex; | 
 |       continue; | 
 |     } | 
 |  | 
 |     // This node is about to morph, remove its old self from the CSE maps. | 
 |     RemoveNodeFromCSEMaps(User); | 
 |  | 
 |     // The Uses array is sorted, so all the uses for a given User | 
 |     // are next to each other in the list. | 
 |     // To help reduce the number of CSE recomputations, process all | 
 |     // the uses of this user that we can find this way. | 
 |     do { | 
 |       unsigned i = Uses[UseIndex].Index; | 
 |       SDUse &Use = *Uses[UseIndex].Use; | 
 |       ++UseIndex; | 
 |  | 
 |       Use.set(To[i]); | 
 |     } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); | 
 |  | 
 |     // Now that we have modified User, add it back to the CSE maps.  If it | 
 |     // already exists there, recursively merge the results together. | 
 |     AddModifiedNodeToCSEMaps(User); | 
 |   } | 
 | } | 
 |  | 
 | /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG | 
 | /// based on their topological order. It returns the maximum id and a vector | 
 | /// of the SDNodes* in assigned order by reference. | 
 | unsigned SelectionDAG::AssignTopologicalOrder() { | 
 |   unsigned DAGSize = 0; | 
 |  | 
 |   // SortedPos tracks the progress of the algorithm. Nodes before it are | 
 |   // sorted, nodes after it are unsorted. When the algorithm completes | 
 |   // it is at the end of the list. | 
 |   allnodes_iterator SortedPos = allnodes_begin(); | 
 |  | 
 |   // Visit all the nodes. Move nodes with no operands to the front of | 
 |   // the list immediately. Annotate nodes that do have operands with their | 
 |   // operand count. Before we do this, the Node Id fields of the nodes | 
 |   // may contain arbitrary values. After, the Node Id fields for nodes | 
 |   // before SortedPos will contain the topological sort index, and the | 
 |   // Node Id fields for nodes At SortedPos and after will contain the | 
 |   // count of outstanding operands. | 
 |   for (SDNode &N : llvm::make_early_inc_range(allnodes())) { | 
 |     checkForCycles(&N, this); | 
 |     unsigned Degree = N.getNumOperands(); | 
 |     if (Degree == 0) { | 
 |       // A node with no uses, add it to the result array immediately. | 
 |       N.setNodeId(DAGSize++); | 
 |       allnodes_iterator Q(&N); | 
 |       if (Q != SortedPos) | 
 |         SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); | 
 |       assert(SortedPos != AllNodes.end() && "Overran node list"); | 
 |       ++SortedPos; | 
 |     } else { | 
 |       // Temporarily use the Node Id as scratch space for the degree count. | 
 |       N.setNodeId(Degree); | 
 |     } | 
 |   } | 
 |  | 
 |   // Visit all the nodes. As we iterate, move nodes into sorted order, | 
 |   // such that by the time the end is reached all nodes will be sorted. | 
 |   for (SDNode &Node : allnodes()) { | 
 |     SDNode *N = &Node; | 
 |     checkForCycles(N, this); | 
 |     // N is in sorted position, so all its uses have one less operand | 
 |     // that needs to be sorted. | 
 |     for (SDNode *P : N->uses()) { | 
 |       unsigned Degree = P->getNodeId(); | 
 |       assert(Degree != 0 && "Invalid node degree"); | 
 |       --Degree; | 
 |       if (Degree == 0) { | 
 |         // All of P's operands are sorted, so P may sorted now. | 
 |         P->setNodeId(DAGSize++); | 
 |         if (P->getIterator() != SortedPos) | 
 |           SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); | 
 |         assert(SortedPos != AllNodes.end() && "Overran node list"); | 
 |         ++SortedPos; | 
 |       } else { | 
 |         // Update P's outstanding operand count. | 
 |         P->setNodeId(Degree); | 
 |       } | 
 |     } | 
 |     if (Node.getIterator() == SortedPos) { | 
 | #ifndef NDEBUG | 
 |       allnodes_iterator I(N); | 
 |       SDNode *S = &*++I; | 
 |       dbgs() << "Overran sorted position:\n"; | 
 |       S->dumprFull(this); dbgs() << "\n"; | 
 |       dbgs() << "Checking if this is due to cycles\n"; | 
 |       checkForCycles(this, true); | 
 | #endif | 
 |       llvm_unreachable(nullptr); | 
 |     } | 
 |   } | 
 |  | 
 |   assert(SortedPos == AllNodes.end() && | 
 |          "Topological sort incomplete!"); | 
 |   assert(AllNodes.front().getOpcode() == ISD::EntryToken && | 
 |          "First node in topological sort is not the entry token!"); | 
 |   assert(AllNodes.front().getNodeId() == 0 && | 
 |          "First node in topological sort has non-zero id!"); | 
 |   assert(AllNodes.front().getNumOperands() == 0 && | 
 |          "First node in topological sort has operands!"); | 
 |   assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && | 
 |          "Last node in topologic sort has unexpected id!"); | 
 |   assert(AllNodes.back().use_empty() && | 
 |          "Last node in topologic sort has users!"); | 
 |   assert(DAGSize == allnodes_size() && "Node count mismatch!"); | 
 |   return DAGSize; | 
 | } | 
 |  | 
 | /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the | 
 | /// value is produced by SD. | 
 | void SelectionDAG::AddDbgValue(SDDbgValue *DB, bool isParameter) { | 
 |   for (SDNode *SD : DB->getSDNodes()) { | 
 |     if (!SD) | 
 |       continue; | 
 |     assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue()); | 
 |     SD->setHasDebugValue(true); | 
 |   } | 
 |   DbgInfo->add(DB, isParameter); | 
 | } | 
 |  | 
 | void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { DbgInfo->add(DB); } | 
 |  | 
 | SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain, | 
 |                                                    SDValue NewMemOpChain) { | 
 |   assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node"); | 
 |   assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT"); | 
 |   // The new memory operation must have the same position as the old load in | 
 |   // terms of memory dependency. Create a TokenFactor for the old load and new | 
 |   // memory operation and update uses of the old load's output chain to use that | 
 |   // TokenFactor. | 
 |   if (OldChain == NewMemOpChain || OldChain.use_empty()) | 
 |     return NewMemOpChain; | 
 |  | 
 |   SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other, | 
 |                                 OldChain, NewMemOpChain); | 
 |   ReplaceAllUsesOfValueWith(OldChain, TokenFactor); | 
 |   UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain); | 
 |   return TokenFactor; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, | 
 |                                                    SDValue NewMemOp) { | 
 |   assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node"); | 
 |   SDValue OldChain = SDValue(OldLoad, 1); | 
 |   SDValue NewMemOpChain = NewMemOp.getValue(1); | 
 |   return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op, | 
 |                                                      Function **OutFunction) { | 
 |   assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol"); | 
 |  | 
 |   auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol(); | 
 |   auto *Module = MF->getFunction().getParent(); | 
 |   auto *Function = Module->getFunction(Symbol); | 
 |  | 
 |   if (OutFunction != nullptr) | 
 |       *OutFunction = Function; | 
 |  | 
 |   if (Function != nullptr) { | 
 |     auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace()); | 
 |     return getGlobalAddress(Function, SDLoc(Op), PtrTy); | 
 |   } | 
 |  | 
 |   std::string ErrorStr; | 
 |   raw_string_ostream ErrorFormatter(ErrorStr); | 
 |   ErrorFormatter << "Undefined external symbol "; | 
 |   ErrorFormatter << '"' << Symbol << '"'; | 
 |   report_fatal_error(Twine(ErrorFormatter.str())); | 
 | } | 
 |  | 
 | //===----------------------------------------------------------------------===// | 
 | //                              SDNode Class | 
 | //===----------------------------------------------------------------------===// | 
 |  | 
 | bool llvm::isNullConstant(SDValue V) { | 
 |   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); | 
 |   return Const != nullptr && Const->isZero(); | 
 | } | 
 |  | 
 | bool llvm::isNullFPConstant(SDValue V) { | 
 |   ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V); | 
 |   return Const != nullptr && Const->isZero() && !Const->isNegative(); | 
 | } | 
 |  | 
 | bool llvm::isAllOnesConstant(SDValue V) { | 
 |   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); | 
 |   return Const != nullptr && Const->isAllOnes(); | 
 | } | 
 |  | 
 | bool llvm::isOneConstant(SDValue V) { | 
 |   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); | 
 |   return Const != nullptr && Const->isOne(); | 
 | } | 
 |  | 
 | bool llvm::isMinSignedConstant(SDValue V) { | 
 |   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V); | 
 |   return Const != nullptr && Const->isMinSignedValue(); | 
 | } | 
 |  | 
 | bool llvm::isNeutralConstant(unsigned Opcode, SDNodeFlags Flags, SDValue V, | 
 |                              unsigned OperandNo) { | 
 |   // NOTE: The cases should match with IR's ConstantExpr::getBinOpIdentity(). | 
 |   // TODO: Target-specific opcodes could be added. | 
 |   if (auto *Const = isConstOrConstSplat(V)) { | 
 |     switch (Opcode) { | 
 |     case ISD::ADD: | 
 |     case ISD::OR: | 
 |     case ISD::XOR: | 
 |     case ISD::UMAX: | 
 |       return Const->isZero(); | 
 |     case ISD::MUL: | 
 |       return Const->isOne(); | 
 |     case ISD::AND: | 
 |     case ISD::UMIN: | 
 |       return Const->isAllOnes(); | 
 |     case ISD::SMAX: | 
 |       return Const->isMinSignedValue(); | 
 |     case ISD::SMIN: | 
 |       return Const->isMaxSignedValue(); | 
 |     case ISD::SUB: | 
 |     case ISD::SHL: | 
 |     case ISD::SRA: | 
 |     case ISD::SRL: | 
 |       return OperandNo == 1 && Const->isZero(); | 
 |     case ISD::UDIV: | 
 |     case ISD::SDIV: | 
 |       return OperandNo == 1 && Const->isOne(); | 
 |     } | 
 |   } else if (auto *ConstFP = isConstOrConstSplatFP(V)) { | 
 |     switch (Opcode) { | 
 |     case ISD::FADD: | 
 |       return ConstFP->isZero() && | 
 |              (Flags.hasNoSignedZeros() || ConstFP->isNegative()); | 
 |     case ISD::FSUB: | 
 |       return OperandNo == 1 && ConstFP->isZero() && | 
 |              (Flags.hasNoSignedZeros() || !ConstFP->isNegative()); | 
 |     case ISD::FMUL: | 
 |       return ConstFP->isExactlyValue(1.0); | 
 |     case ISD::FDIV: | 
 |       return OperandNo == 1 && ConstFP->isExactlyValue(1.0); | 
 |     case ISD::FMINNUM: | 
 |     case ISD::FMAXNUM: { | 
 |       // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF. | 
 |       EVT VT = V.getValueType(); | 
 |       const fltSemantics &Semantics = SelectionDAG::EVTToAPFloatSemantics(VT); | 
 |       APFloat NeutralAF = !Flags.hasNoNaNs() | 
 |                               ? APFloat::getQNaN(Semantics) | 
 |                               : !Flags.hasNoInfs() | 
 |                                     ? APFloat::getInf(Semantics) | 
 |                                     : APFloat::getLargest(Semantics); | 
 |       if (Opcode == ISD::FMAXNUM) | 
 |         NeutralAF.changeSign(); | 
 |  | 
 |       return ConstFP->isExactlyValue(NeutralAF); | 
 |     } | 
 |     } | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | SDValue llvm::peekThroughBitcasts(SDValue V) { | 
 |   while (V.getOpcode() == ISD::BITCAST) | 
 |     V = V.getOperand(0); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue llvm::peekThroughOneUseBitcasts(SDValue V) { | 
 |   while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse()) | 
 |     V = V.getOperand(0); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue llvm::peekThroughExtractSubvectors(SDValue V) { | 
 |   while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) | 
 |     V = V.getOperand(0); | 
 |   return V; | 
 | } | 
 |  | 
 | SDValue llvm::peekThroughTruncates(SDValue V) { | 
 |   while (V.getOpcode() == ISD::TRUNCATE) | 
 |     V = V.getOperand(0); | 
 |   return V; | 
 | } | 
 |  | 
 | bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) { | 
 |   if (V.getOpcode() != ISD::XOR) | 
 |     return false; | 
 |   V = peekThroughBitcasts(V.getOperand(1)); | 
 |   unsigned NumBits = V.getScalarValueSizeInBits(); | 
 |   ConstantSDNode *C = | 
 |       isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true); | 
 |   return C && (C->getAPIntValue().countr_one() >= NumBits); | 
 | } | 
 |  | 
 | ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs, | 
 |                                           bool AllowTruncation) { | 
 |   EVT VT = N.getValueType(); | 
 |   APInt DemandedElts = VT.isFixedLengthVector() | 
 |                            ? APInt::getAllOnes(VT.getVectorMinNumElements()) | 
 |                            : APInt(1, 1); | 
 |   return isConstOrConstSplat(N, DemandedElts, AllowUndefs, AllowTruncation); | 
 | } | 
 |  | 
 | ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts, | 
 |                                           bool AllowUndefs, | 
 |                                           bool AllowTruncation) { | 
 |   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) | 
 |     return CN; | 
 |  | 
 |   // SplatVectors can truncate their operands. Ignore that case here unless | 
 |   // AllowTruncation is set. | 
 |   if (N->getOpcode() == ISD::SPLAT_VECTOR) { | 
 |     EVT VecEltVT = N->getValueType(0).getVectorElementType(); | 
 |     if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) { | 
 |       EVT CVT = CN->getValueType(0); | 
 |       assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension"); | 
 |       if (AllowTruncation || CVT == VecEltVT) | 
 |         return CN; | 
 |     } | 
 |   } | 
 |  | 
 |   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { | 
 |     BitVector UndefElements; | 
 |     ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements); | 
 |  | 
 |     // BuildVectors can truncate their operands. Ignore that case here unless | 
 |     // AllowTruncation is set. | 
 |     // TODO: Look into whether we should allow UndefElements in non-DemandedElts | 
 |     if (CN && (UndefElements.none() || AllowUndefs)) { | 
 |       EVT CVT = CN->getValueType(0); | 
 |       EVT NSVT = N.getValueType().getScalarType(); | 
 |       assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension"); | 
 |       if (AllowTruncation || (CVT == NSVT)) | 
 |         return CN; | 
 |     } | 
 |   } | 
 |  | 
 |   return nullptr; | 
 | } | 
 |  | 
 | ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) { | 
 |   EVT VT = N.getValueType(); | 
 |   APInt DemandedElts = VT.isFixedLengthVector() | 
 |                            ? APInt::getAllOnes(VT.getVectorMinNumElements()) | 
 |                            : APInt(1, 1); | 
 |   return isConstOrConstSplatFP(N, DemandedElts, AllowUndefs); | 
 | } | 
 |  | 
 | ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, | 
 |                                               const APInt &DemandedElts, | 
 |                                               bool AllowUndefs) { | 
 |   if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N)) | 
 |     return CN; | 
 |  | 
 |   if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) { | 
 |     BitVector UndefElements; | 
 |     ConstantFPSDNode *CN = | 
 |         BV->getConstantFPSplatNode(DemandedElts, &UndefElements); | 
 |     // TODO: Look into whether we should allow UndefElements in non-DemandedElts | 
 |     if (CN && (UndefElements.none() || AllowUndefs)) | 
 |       return CN; | 
 |   } | 
 |  | 
 |   if (N.getOpcode() == ISD::SPLAT_VECTOR) | 
 |     if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0))) | 
 |       return CN; | 
 |  | 
 |   return nullptr; | 
 | } | 
 |  | 
 | bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) { | 
 |   // TODO: may want to use peekThroughBitcast() here. | 
 |   ConstantSDNode *C = | 
 |       isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation=*/true); | 
 |   return C && C->isZero(); | 
 | } | 
 |  | 
 | bool llvm::isOneOrOneSplat(SDValue N, bool AllowUndefs) { | 
 |   ConstantSDNode *C = | 
 |       isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation*/ true); | 
 |   return C && C->isOne(); | 
 | } | 
 |  | 
 | bool llvm::isAllOnesOrAllOnesSplat(SDValue N, bool AllowUndefs) { | 
 |   N = peekThroughBitcasts(N); | 
 |   unsigned BitWidth = N.getScalarValueSizeInBits(); | 
 |   ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs); | 
 |   return C && C->isAllOnes() && C->getValueSizeInBits(0) == BitWidth; | 
 | } | 
 |  | 
 | HandleSDNode::~HandleSDNode() { | 
 |   DropOperands(); | 
 | } | 
 |  | 
 | GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, | 
 |                                          const DebugLoc &DL, | 
 |                                          const GlobalValue *GA, EVT VT, | 
 |                                          int64_t o, unsigned TF) | 
 |     : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { | 
 |   TheGlobal = GA; | 
 | } | 
 |  | 
 | AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, | 
 |                                          EVT VT, unsigned SrcAS, | 
 |                                          unsigned DestAS) | 
 |     : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)), | 
 |       SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {} | 
 |  | 
 | MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, | 
 |                      SDVTList VTs, EVT memvt, MachineMemOperand *mmo) | 
 |     : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { | 
 |   MemSDNodeBits.IsVolatile = MMO->isVolatile(); | 
 |   MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal(); | 
 |   MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable(); | 
 |   MemSDNodeBits.IsInvariant = MMO->isInvariant(); | 
 |  | 
 |   // We check here that the size of the memory operand fits within the size of | 
 |   // the MMO. This is because the MMO might indicate only a possible address | 
 |   // range instead of specifying the affected memory addresses precisely. | 
 |   // TODO: Make MachineMemOperands aware of scalable vectors. | 
 |   assert(memvt.getStoreSize().getKnownMinValue() <= MMO->getSize() && | 
 |          "Size mismatch!"); | 
 | } | 
 |  | 
 | /// Profile - Gather unique data for the node. | 
 | /// | 
 | void SDNode::Profile(FoldingSetNodeID &ID) const { | 
 |   AddNodeIDNode(ID, this); | 
 | } | 
 |  | 
 | namespace { | 
 |  | 
 |   struct EVTArray { | 
 |     std::vector<EVT> VTs; | 
 |  | 
 |     EVTArray() { | 
 |       VTs.reserve(MVT::VALUETYPE_SIZE); | 
 |       for (unsigned i = 0; i < MVT::VALUETYPE_SIZE; ++i) | 
 |         VTs.push_back(MVT((MVT::SimpleValueType)i)); | 
 |     } | 
 |   }; | 
 |  | 
 | } // end anonymous namespace | 
 |  | 
 | /// getValueTypeList - Return a pointer to the specified value type. | 
 | /// | 
 | const EVT *SDNode::getValueTypeList(EVT VT) { | 
 |   static std::set<EVT, EVT::compareRawBits> EVTs; | 
 |   static EVTArray SimpleVTArray; | 
 |   static sys::SmartMutex<true> VTMutex; | 
 |  | 
 |   if (VT.isExtended()) { | 
 |     sys::SmartScopedLock<true> Lock(VTMutex); | 
 |     return &(*EVTs.insert(VT).first); | 
 |   } | 
 |   assert(VT.getSimpleVT() < MVT::VALUETYPE_SIZE && "Value type out of range!"); | 
 |   return &SimpleVTArray.VTs[VT.getSimpleVT().SimpleTy]; | 
 | } | 
 |  | 
 | /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the | 
 | /// indicated value.  This method ignores uses of other values defined by this | 
 | /// operation. | 
 | bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { | 
 |   assert(Value < getNumValues() && "Bad value!"); | 
 |  | 
 |   // TODO: Only iterate over uses of a given value of the node | 
 |   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { | 
 |     if (UI.getUse().getResNo() == Value) { | 
 |       if (NUses == 0) | 
 |         return false; | 
 |       --NUses; | 
 |     } | 
 |   } | 
 |  | 
 |   // Found exactly the right number of uses? | 
 |   return NUses == 0; | 
 | } | 
 |  | 
 | /// hasAnyUseOfValue - Return true if there are any use of the indicated | 
 | /// value. This method ignores uses of other values defined by this operation. | 
 | bool SDNode::hasAnyUseOfValue(unsigned Value) const { | 
 |   assert(Value < getNumValues() && "Bad value!"); | 
 |  | 
 |   for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) | 
 |     if (UI.getUse().getResNo() == Value) | 
 |       return true; | 
 |  | 
 |   return false; | 
 | } | 
 |  | 
 | /// isOnlyUserOf - Return true if this node is the only use of N. | 
 | bool SDNode::isOnlyUserOf(const SDNode *N) const { | 
 |   bool Seen = false; | 
 |   for (const SDNode *User : N->uses()) { | 
 |     if (User == this) | 
 |       Seen = true; | 
 |     else | 
 |       return false; | 
 |   } | 
 |  | 
 |   return Seen; | 
 | } | 
 |  | 
 | /// Return true if the only users of N are contained in Nodes. | 
 | bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) { | 
 |   bool Seen = false; | 
 |   for (const SDNode *User : N->uses()) { | 
 |     if (llvm::is_contained(Nodes, User)) | 
 |       Seen = true; | 
 |     else | 
 |       return false; | 
 |   } | 
 |  | 
 |   return Seen; | 
 | } | 
 |  | 
 | /// isOperand - Return true if this node is an operand of N. | 
 | bool SDValue::isOperandOf(const SDNode *N) const { | 
 |   return is_contained(N->op_values(), *this); | 
 | } | 
 |  | 
 | bool SDNode::isOperandOf(const SDNode *N) const { | 
 |   return any_of(N->op_values(), | 
 |                 [this](SDValue Op) { return this == Op.getNode(); }); | 
 | } | 
 |  | 
 | /// reachesChainWithoutSideEffects - Return true if this operand (which must | 
 | /// be a chain) reaches the specified operand without crossing any | 
 | /// side-effecting instructions on any chain path.  In practice, this looks | 
 | /// through token factors and non-volatile loads.  In order to remain efficient, | 
 | /// this only looks a couple of nodes in, it does not do an exhaustive search. | 
 | /// | 
 | /// Note that we only need to examine chains when we're searching for | 
 | /// side-effects; SelectionDAG requires that all side-effects are represented | 
 | /// by chains, even if another operand would force a specific ordering. This | 
 | /// constraint is necessary to allow transformations like splitting loads. | 
 | bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, | 
 |                                              unsigned Depth) const { | 
 |   if (*this == Dest) return true; | 
 |  | 
 |   // Don't search too deeply, we just want to be able to see through | 
 |   // TokenFactor's etc. | 
 |   if (Depth == 0) return false; | 
 |  | 
 |   // If this is a token factor, all inputs to the TF happen in parallel. | 
 |   if (getOpcode() == ISD::TokenFactor) { | 
 |     // First, try a shallow search. | 
 |     if (is_contained((*this)->ops(), Dest)) { | 
 |       // We found the chain we want as an operand of this TokenFactor. | 
 |       // Essentially, we reach the chain without side-effects if we could | 
 |       // serialize the TokenFactor into a simple chain of operations with | 
 |       // Dest as the last operation. This is automatically true if the | 
 |       // chain has one use: there are no other ordering constraints. | 
 |       // If the chain has more than one use, we give up: some other | 
 |       // use of Dest might force a side-effect between Dest and the current | 
 |       // node. | 
 |       if (Dest.hasOneUse()) | 
 |         return true; | 
 |     } | 
 |     // Next, try a deep search: check whether every operand of the TokenFactor | 
 |     // reaches Dest. | 
 |     return llvm::all_of((*this)->ops(), [=](SDValue Op) { | 
 |       return Op.reachesChainWithoutSideEffects(Dest, Depth - 1); | 
 |     }); | 
 |   } | 
 |  | 
 |   // Loads don't have side effects, look through them. | 
 |   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { | 
 |     if (Ld->isUnordered()) | 
 |       return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); | 
 |   } | 
 |   return false; | 
 | } | 
 |  | 
 | bool SDNode::hasPredecessor(const SDNode *N) const { | 
 |   SmallPtrSet<const SDNode *, 32> Visited; | 
 |   SmallVector<const SDNode *, 16> Worklist; | 
 |   Worklist.push_back(this); | 
 |   return hasPredecessorHelper(N, Visited, Worklist); | 
 | } | 
 |  | 
 | void SDNode::intersectFlagsWith(const SDNodeFlags Flags) { | 
 |   this->Flags.intersectWith(Flags); | 
 | } | 
 |  | 
 | SDValue | 
 | SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp, | 
 |                                   ArrayRef<ISD::NodeType> CandidateBinOps, | 
 |                                   bool AllowPartials) { | 
 |   // The pattern must end in an extract from index 0. | 
 |   if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT || | 
 |       !isNullConstant(Extract->getOperand(1))) | 
 |     return SDValue(); | 
 |  | 
 |   // Match against one of the candidate binary ops. | 
 |   SDValue Op = Extract->getOperand(0); | 
 |   if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) { | 
 |         return Op.getOpcode() == unsigned(BinOp); | 
 |       })) | 
 |     return SDValue(); | 
 |  | 
 |   // Floating-point reductions may require relaxed constraints on the final step | 
 |   // of the reduction because they may reorder intermediate operations. | 
 |   unsigned CandidateBinOp = Op.getOpcode(); | 
 |   if (Op.getValueType().isFloatingPoint()) { | 
 |     SDNodeFlags Flags = Op->getFlags(); | 
 |     switch (CandidateBinOp) { | 
 |     case ISD::FADD: | 
 |       if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation()) | 
 |         return SDValue(); | 
 |       break; | 
 |     default: | 
 |       llvm_unreachable("Unhandled FP opcode for binop reduction"); | 
 |     } | 
 |   } | 
 |  | 
 |   // Matching failed - attempt to see if we did enough stages that a partial | 
 |   // reduction from a subvector is possible. | 
 |   auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) { | 
 |     if (!AllowPartials || !Op) | 
 |       return SDValue(); | 
 |     EVT OpVT = Op.getValueType(); | 
 |     EVT OpSVT = OpVT.getScalarType(); | 
 |     EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts); | 
 |     if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0)) | 
 |       return SDValue(); | 
 |     BinOp = (ISD::NodeType)CandidateBinOp; | 
 |     return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op, | 
 |                    getVectorIdxConstant(0, SDLoc(Op))); | 
 |   }; | 
 |  | 
 |   // At each stage, we're looking for something that looks like: | 
 |   // %s = shufflevector <8 x i32> %op, <8 x i32> undef, | 
 |   //                    <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, | 
 |   //                               i32 undef, i32 undef, i32 undef, i32 undef> | 
 |   // %a = binop <8 x i32> %op, %s | 
 |   // Where the mask changes according to the stage. E.g. for a 3-stage pyramid, | 
 |   // we expect something like: | 
 |   // <4,5,6,7,u,u,u,u> | 
 |   // <2,3,u,u,u,u,u,u> | 
 |   // <1,u,u,u,u,u,u,u> | 
 |   // While a partial reduction match would be: | 
 |   // <2,3,u,u,u,u,u,u> | 
 |   // <1,u,u,u,u,u,u,u> | 
 |   unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements()); | 
 |   SDValue PrevOp; | 
 |   for (unsigned i = 0; i < Stages; ++i) { | 
 |     unsigned MaskEnd = (1 << i); | 
 |  | 
 |     if (Op.getOpcode() != CandidateBinOp) | 
 |       return PartialReduction(PrevOp, MaskEnd); | 
 |  | 
 |     SDValue Op0 = Op.getOperand(0); | 
 |     SDValue Op1 = Op.getOperand(1); | 
 |  | 
 |     ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0); | 
 |     if (Shuffle) { | 
 |       Op = Op1; | 
 |     } else { | 
 |       Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1); | 
 |       Op = Op0; | 
 |     } | 
 |  | 
 |     // The first operand of the shuffle should be the same as the other operand | 
 |     // of the binop. | 
 |     if (!Shuffle || Shuffle->getOperand(0) != Op) | 
 |       return PartialReduction(PrevOp, MaskEnd); | 
 |  | 
 |     // Verify the shuffle has the expected (at this stage of the pyramid) mask. | 
 |     for (int Index = 0; Index < (int)MaskEnd; ++Index) | 
 |       if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index)) | 
 |         return PartialReduction(PrevOp, MaskEnd); | 
 |  | 
 |     PrevOp = Op; | 
 |   } | 
 |  | 
 |   // Handle subvector reductions, which tend to appear after the shuffle | 
 |   // reduction stages. | 
 |   while (Op.getOpcode() == CandidateBinOp) { | 
 |     unsigned NumElts = Op.getValueType().getVectorNumElements(); | 
 |     SDValue Op0 = Op.getOperand(0); | 
 |     SDValue Op1 = Op.getOperand(1); | 
 |     if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR || | 
 |         Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR || | 
 |         Op0.getOperand(0) != Op1.getOperand(0)) | 
 |       break; | 
 |     SDValue Src = Op0.getOperand(0); | 
 |     unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); | 
 |     if (NumSrcElts != (2 * NumElts)) | 
 |       break; | 
 |     if (!(Op0.getConstantOperandAPInt(1) == 0 && | 
 |           Op1.getConstantOperandAPInt(1) == NumElts) && | 
 |         !(Op1.getConstantOperandAPInt(1) == 0 && | 
 |           Op0.getConstantOperandAPInt(1) == NumElts)) | 
 |       break; | 
 |     Op = Src; | 
 |   } | 
 |  | 
 |   BinOp = (ISD::NodeType)CandidateBinOp; | 
 |   return Op; | 
 | } | 
 |  | 
 | SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { | 
 |   EVT VT = N->getValueType(0); | 
 |   EVT EltVT = VT.getVectorElementType(); | 
 |   unsigned NE = VT.getVectorNumElements(); | 
 |  | 
 |   SDLoc dl(N); | 
 |  | 
 |   // If ResNE is 0, fully unroll the vector op. | 
 |   if (ResNE == 0) | 
 |     ResNE = NE; | 
 |   else if (NE > ResNE) | 
 |     NE = ResNE; | 
 |  | 
 |   if (N->getNumValues() == 2) { | 
 |     SmallVector<SDValue, 8> Scalars0, Scalars1; | 
 |     SmallVector<SDValue, 4> Operands(N->getNumOperands()); | 
 |     EVT VT1 = N->getValueType(1); | 
 |     EVT EltVT1 = VT1.getVectorElementType(); | 
 |  | 
 |     unsigned i; | 
 |     for (i = 0; i != NE; ++i) { | 
 |       for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { | 
 |         SDValue Operand = N->getOperand(j); | 
 |         EVT OperandVT = Operand.getValueType(); | 
 |  | 
 |         // A vector operand; extract a single element. | 
 |         EVT OperandEltVT = OperandVT.getVectorElementType(); | 
 |         Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, | 
 |                               Operand, getVectorIdxConstant(i, dl)); | 
 |       } | 
 |  | 
 |       SDValue EltOp = getNode(N->getOpcode(), dl, {EltVT, EltVT1}, Operands); | 
 |       Scalars0.push_back(EltOp); | 
 |       Scalars1.push_back(EltOp.getValue(1)); | 
 |     } | 
 |  | 
 |     SDValue Vec0 = getBuildVector(VT, dl, Scalars0); | 
 |     SDValue Vec1 = getBuildVector(VT1, dl, Scalars1); | 
 |     return getMergeValues({Vec0, Vec1}, dl); | 
 |   } | 
 |  | 
 |   assert(N->getNumValues() == 1 && | 
 |          "Can't unroll a vector with multiple results!"); | 
 |  | 
 |   SmallVector<SDValue, 8> Scalars; | 
 |   SmallVector<SDValue, 4> Operands(N->getNumOperands()); | 
 |  | 
 |   unsigned i; | 
 |   for (i= 0; i != NE; ++i) { | 
 |     for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { | 
 |       SDValue Operand = N->getOperand(j); | 
 |       EVT OperandVT = Operand.getValueType(); | 
 |       if (OperandVT.isVector()) { | 
 |         // A vector operand; extract a single element. | 
 |         EVT OperandEltVT = OperandVT.getVectorElementType(); | 
 |         Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, | 
 |                               Operand, getVectorIdxConstant(i, dl)); | 
 |       } else { | 
 |         // A scalar operand; just use it as is. | 
 |         Operands[j] = Operand; | 
 |       } | 
 |     } | 
 |  | 
 |     switch (N->getOpcode()) { | 
 |     default: { | 
 |       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands, | 
 |                                 N->getFlags())); | 
 |       break; | 
 |     } | 
 |     case ISD::VSELECT: | 
 |       Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands)); | 
 |       break; | 
 |     case ISD::SHL: | 
 |     case ISD::SRA: | 
 |     case ISD::SRL: | 
 |     case ISD::ROTL: | 
 |     case ISD::ROTR: | 
 |       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], | 
 |                                getShiftAmountOperand(Operands[0].getValueType(), | 
 |                                                      Operands[1]))); | 
 |       break; | 
 |     case ISD::SIGN_EXTEND_INREG: { | 
 |       EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); | 
 |       Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, | 
 |                                 Operands[0], | 
 |                                 getValueType(ExtVT))); | 
 |     } | 
 |     } | 
 |   } | 
 |  | 
 |   for (; i < ResNE; ++i) | 
 |     Scalars.push_back(getUNDEF(EltVT)); | 
 |  | 
 |   EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE); | 
 |   return getBuildVector(VecVT, dl, Scalars); | 
 | } | 
 |  | 
 | std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp( | 
 |     SDNode *N, unsigned ResNE) { | 
 |   unsigned Opcode = N->getOpcode(); | 
 |   assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO || | 
 |           Opcode == ISD::USUBO || Opcode == ISD::SSUBO || | 
 |           Opcode == ISD::UMULO || Opcode == ISD::SMULO) && | 
 |          "Expected an overflow opcode"); | 
 |  | 
 |   EVT ResVT = N->getValueType(0); | 
 |   EVT OvVT = N->getValueType(1); | 
 |   EVT ResEltVT = ResVT.getVectorElementType(); | 
 |   EVT OvEltVT = OvVT.getVectorElementType(); | 
 |   SDLoc dl(N); | 
 |  | 
 |   // If ResNE is 0, fully unroll the vector op. | 
 |   unsigned NE = ResVT.getVectorNumElements(); | 
 |   if (ResNE == 0) | 
 |     ResNE = NE; | 
 |   else if (NE > ResNE) | 
 |     NE = ResNE; | 
 |  | 
 |   SmallVector<SDValue, 8> LHSScalars; | 
 |   SmallVector<SDValue, 8> RHSScalars; | 
 |   ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE); | 
 |   ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE); | 
 |  | 
 |   EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT); | 
 |   SDVTList VTs = getVTList(ResEltVT, SVT); | 
 |   SmallVector<SDValue, 8> ResScalars; | 
 |   SmallVector<SDValue, 8> OvScalars; | 
 |   for (unsigned i = 0; i < NE; ++i) { | 
 |     SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]); | 
 |     SDValue Ov = | 
 |         getSelect(dl, OvEltVT, Res.getValue(1), | 
 |                   getBoolConstant(true, dl, OvEltVT, ResVT), | 
 |                   getConstant(0, dl, OvEltVT)); | 
 |  | 
 |     ResScalars.push_back(Res); | 
 |     OvScalars.push_back(Ov); | 
 |   } | 
 |  | 
 |   ResScalars.append(ResNE - NE, getUNDEF(ResEltVT)); | 
 |   OvScalars.append(ResNE - NE, getUNDEF(OvEltVT)); | 
 |  | 
 |   EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE); | 
 |   EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE); | 
 |   return std::make_pair(getBuildVector(NewResVT, dl, ResScalars), | 
 |                         getBuildVector(NewOvVT, dl, OvScalars)); | 
 | } | 
 |  | 
 | bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD, | 
 |                                                   LoadSDNode *Base, | 
 |                                                   unsigned Bytes, | 
 |                                                   int Dist) const { | 
 |   if (LD->isVolatile() || Base->isVolatile()) | 
 |     return false; | 
 |   // TODO: probably too restrictive for atomics, revisit | 
 |   if (!LD->isSimple()) | 
 |     return false; | 
 |   if (LD->isIndexed() || Base->isIndexed()) | 
 |     return false; | 
 |   if (LD->getChain() != Base->getChain()) | 
 |     return false; | 
 |   EVT VT = LD->getMemoryVT(); | 
 |   if (VT.getSizeInBits() / 8 != Bytes) | 
 |     return false; | 
 |  | 
 |   auto BaseLocDecomp = BaseIndexOffset::match(Base, *this); | 
 |   auto LocDecomp = BaseIndexOffset::match(LD, *this); | 
 |  | 
 |   int64_t Offset = 0; | 
 |   if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset)) | 
 |     return (Dist * (int64_t)Bytes == Offset); | 
 |   return false; | 
 | } | 
 |  | 
 | /// InferPtrAlignment - Infer alignment of a load / store address. Return | 
 | /// std::nullopt if it cannot be inferred. | 
 | MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const { | 
 |   // If this is a GlobalAddress + cst, return the alignment. | 
 |   const GlobalValue *GV = nullptr; | 
 |   int64_t GVOffset = 0; | 
 |   if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { | 
 |     unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType()); | 
 |     KnownBits Known(PtrWidth); | 
 |     llvm::computeKnownBits(GV, Known, getDataLayout()); | 
 |     unsigned AlignBits = Known.countMinTrailingZeros(); | 
 |     if (AlignBits) | 
 |       return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset); | 
 |   } | 
 |  | 
 |   // If this is a direct reference to a stack slot, use information about the | 
 |   // stack slot's alignment. | 
 |   int FrameIdx = INT_MIN; | 
 |   int64_t FrameOffset = 0; | 
 |   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { | 
 |     FrameIdx = FI->getIndex(); | 
 |   } else if (isBaseWithConstantOffset(Ptr) && | 
 |              isa<FrameIndexSDNode>(Ptr.getOperand(0))) { | 
 |     // Handle FI+Cst | 
 |     FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); | 
 |     FrameOffset = Ptr.getConstantOperandVal(1); | 
 |   } | 
 |  | 
 |   if (FrameIdx != INT_MIN) { | 
 |     const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo(); | 
 |     return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset); | 
 |   } | 
 |  | 
 |   return std::nullopt; | 
 | } | 
 |  | 
 | /// Split the scalar node with EXTRACT_ELEMENT using the provided | 
 | /// VTs and return the low/high part. | 
 | std::pair<SDValue, SDValue> SelectionDAG::SplitScalar(const SDValue &N, | 
 |                                                       const SDLoc &DL, | 
 |                                                       const EVT &LoVT, | 
 |                                                       const EVT &HiVT) { | 
 |   assert(!LoVT.isVector() && !HiVT.isVector() && !N.getValueType().isVector() && | 
 |          "Split node must be a scalar type"); | 
 |   SDValue Lo = | 
 |       getNode(ISD::EXTRACT_ELEMENT, DL, LoVT, N, getIntPtrConstant(0, DL)); | 
 |   SDValue Hi = | 
 |       getNode(ISD::EXTRACT_ELEMENT, DL, HiVT, N, getIntPtrConstant(1, DL)); | 
 |   return std::make_pair(Lo, Hi); | 
 | } | 
 |  | 
 | /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type | 
 | /// which is split (or expanded) into two not necessarily identical pieces. | 
 | std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const { | 
 |   // Currently all types are split in half. | 
 |   EVT LoVT, HiVT; | 
 |   if (!VT.isVector()) | 
 |     LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT); | 
 |   else | 
 |     LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext()); | 
 |  | 
 |   return std::make_pair(LoVT, HiVT); | 
 | } | 
 |  | 
 | /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a | 
 | /// type, dependent on an enveloping VT that has been split into two identical | 
 | /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size. | 
 | std::pair<EVT, EVT> | 
 | SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT, | 
 |                                        bool *HiIsEmpty) const { | 
 |   EVT EltTp = VT.getVectorElementType(); | 
 |   // Examples: | 
 |   //   custom VL=8  with enveloping VL=8/8 yields 8/0 (hi empty) | 
 |   //   custom VL=9  with enveloping VL=8/8 yields 8/1 | 
 |   //   custom VL=10 with enveloping VL=8/8 yields 8/2 | 
 |   //   etc. | 
 |   ElementCount VTNumElts = VT.getVectorElementCount(); | 
 |   ElementCount EnvNumElts = EnvVT.getVectorElementCount(); | 
 |   assert(VTNumElts.isScalable() == EnvNumElts.isScalable() && | 
 |          "Mixing fixed width and scalable vectors when enveloping a type"); | 
 |   EVT LoVT, HiVT; | 
 |   if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) { | 
 |     LoVT = EVT::getVectorVT(*getContext(), EltTp, EnvNumElts); | 
 |     HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts); | 
 |     *HiIsEmpty = false; | 
 |   } else { | 
 |     // Flag that hi type has zero storage size, but return split envelop type | 
 |     // (this would be easier if vector types with zero elements were allowed). | 
 |     LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts); | 
 |     HiVT = EVT::getVectorVT(*getContext(), EltTp, EnvNumElts); | 
 |     *HiIsEmpty = true; | 
 |   } | 
 |   return std::make_pair(LoVT, HiVT); | 
 | } | 
 |  | 
 | /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the | 
 | /// low/high part. | 
 | std::pair<SDValue, SDValue> | 
 | SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT, | 
 |                           const EVT &HiVT) { | 
 |   assert(LoVT.isScalableVector() == HiVT.isScalableVector() && | 
 |          LoVT.isScalableVector() == N.getValueType().isScalableVector() && | 
 |          "Splitting vector with an invalid mixture of fixed and scalable " | 
 |          "vector types"); | 
 |   assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <= | 
 |              N.getValueType().getVectorMinNumElements() && | 
 |          "More vector elements requested than available!"); | 
 |   SDValue Lo, Hi; | 
 |   Lo = | 
 |       getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL)); | 
 |   // For scalable vectors it is safe to use LoVT.getVectorMinNumElements() | 
 |   // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales | 
 |   // IDX with the runtime scaling factor of the result vector type. For | 
 |   // fixed-width result vectors, that runtime scaling factor is 1. | 
 |   Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N, | 
 |                getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL)); | 
 |   return std::make_pair(Lo, Hi); | 
 | } | 
 |  | 
 | std::pair<SDValue, SDValue> SelectionDAG::SplitEVL(SDValue N, EVT VecVT, | 
 |                                                    const SDLoc &DL) { | 
 |   // Split the vector length parameter. | 
 |   // %evl -> umin(%evl, %halfnumelts) and usubsat(%evl - %halfnumelts). | 
 |   EVT VT = N.getValueType(); | 
 |   assert(VecVT.getVectorElementCount().isKnownEven() && | 
 |          "Expecting the mask to be an evenly-sized vector"); | 
 |   unsigned HalfMinNumElts = VecVT.getVectorMinNumElements() / 2; | 
 |   SDValue HalfNumElts = | 
 |       VecVT.isFixedLengthVector() | 
 |           ? getConstant(HalfMinNumElts, DL, VT) | 
 |           : getVScale(DL, VT, APInt(VT.getScalarSizeInBits(), HalfMinNumElts)); | 
 |   SDValue Lo = getNode(ISD::UMIN, DL, VT, N, HalfNumElts); | 
 |   SDValue Hi = getNode(ISD::USUBSAT, DL, VT, N, HalfNumElts); | 
 |   return std::make_pair(Lo, Hi); | 
 | } | 
 |  | 
 | /// Widen the vector up to the next power of two using INSERT_SUBVECTOR. | 
 | SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) { | 
 |   EVT VT = N.getValueType(); | 
 |   EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(), | 
 |                                 NextPowerOf2(VT.getVectorNumElements())); | 
 |   return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N, | 
 |                  getVectorIdxConstant(0, DL)); | 
 | } | 
 |  | 
 | void SelectionDAG::ExtractVectorElements(SDValue Op, | 
 |                                          SmallVectorImpl<SDValue> &Args, | 
 |                                          unsigned Start, unsigned Count, | 
 |                                          EVT EltVT) { | 
 |   EVT VT = Op.getValueType(); | 
 |   if (Count == 0) | 
 |     Count = VT.getVectorNumElements(); | 
 |   if (EltVT == EVT()) | 
 |     EltVT = VT.getVectorElementType(); | 
 |   SDLoc SL(Op); | 
 |   for (unsigned i = Start, e = Start + Count; i != e; ++i) { | 
 |     Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op, | 
 |                            getVectorIdxConstant(i, SL))); | 
 |   } | 
 | } | 
 |  | 
 | // getAddressSpace - Return the address space this GlobalAddress belongs to. | 
 | unsigned GlobalAddressSDNode::getAddressSpace() const { | 
 |   return getGlobal()->getType()->getAddressSpace(); | 
 | } | 
 |  | 
 | Type *ConstantPoolSDNode::getType() const { | 
 |   if (isMachineConstantPoolEntry()) | 
 |     return Val.MachineCPVal->getType(); | 
 |   return Val.ConstVal->getType(); | 
 | } | 
 |  | 
 | bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef, | 
 |                                         unsigned &SplatBitSize, | 
 |                                         bool &HasAnyUndefs, | 
 |                                         unsigned MinSplatBits, | 
 |                                         bool IsBigEndian) const { | 
 |   EVT VT = getValueType(0); | 
 |   assert(VT.isVector() && "Expected a vector type"); | 
 |   unsigned VecWidth = VT.getSizeInBits(); | 
 |   if (MinSplatBits > VecWidth) | 
 |     return false; | 
 |  | 
 |   // FIXME: The widths are based on this node's type, but build vectors can | 
 |   // truncate their operands. | 
 |   SplatValue = APInt(VecWidth, 0); | 
 |   SplatUndef = APInt(VecWidth, 0); | 
 |  | 
 |   // Get the bits. Bits with undefined values (when the corresponding element | 
 |   // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared | 
 |   // in SplatValue. If any of the values are not constant, give up and return | 
 |   // false. | 
 |   unsigned int NumOps = getNumOperands(); | 
 |   assert(NumOps > 0 && "isConstantSplat has 0-size build vector"); | 
 |   unsigned EltWidth = VT.getScalarSizeInBits(); | 
 |  | 
 |   for (unsigned j = 0; j < NumOps; ++j) { | 
 |     unsigned i = IsBigEndian ? NumOps - 1 - j : j; | 
 |     SDValue OpVal = getOperand(i); | 
 |     unsigned BitPos = j * EltWidth; | 
 |  | 
 |     if (OpVal.isUndef()) | 
 |       SplatUndef.setBits(BitPos, BitPos + EltWidth); | 
 |     else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal)) | 
 |       SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos); | 
 |     else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal)) | 
 |       SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos); | 
 |     else | 
 |       return false; | 
 |   } | 
 |  | 
 |   // The build_vector is all constants or undefs. Find the smallest element | 
 |   // size that splats the vector. | 
 |   HasAnyUndefs = (SplatUndef != 0); | 
 |  | 
 |   // FIXME: This does not work for vectors with elements less than 8 bits. | 
 |   while (VecWidth > 8) { | 
 |     // If we can't split in half, stop here. | 
 |     if (VecWidth & 1) | 
 |       break; | 
 |  | 
 |     unsigned HalfSize = VecWidth / 2; | 
 |     APInt HighValue = SplatValue.extractBits(HalfSize, HalfSize); | 
 |     APInt LowValue = SplatValue.extractBits(HalfSize, 0); | 
 |     APInt HighUndef = SplatUndef.extractBits(HalfSize, HalfSize); | 
 |     APInt LowUndef = SplatUndef.extractBits(HalfSize, 0); | 
 |  | 
 |     // If the two halves do not match (ignoring undef bits), stop here. | 
 |     if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || | 
 |         MinSplatBits > HalfSize) | 
 |       break; | 
 |  | 
 |     SplatValue = HighValue | LowValue; | 
 |     SplatUndef = HighUndef & LowUndef; | 
 |  | 
 |     VecWidth = HalfSize; | 
 |   } | 
 |  | 
 |   // FIXME: The loop above only tries to split in halves. But if the input | 
 |   // vector for example is <3 x i16> it wouldn't be able to detect a | 
 |   // SplatBitSize of 16. No idea if that is a design flaw currently limiting | 
 |   // optimizations. I guess that back in the days when this helper was created | 
 |   // vectors normally was power-of-2 sized. | 
 |  | 
 |   SplatBitSize = VecWidth; | 
 |   return true; | 
 | } | 
 |  | 
 | SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts, | 
 |                                          BitVector *UndefElements) const { | 
 |   unsigned NumOps = getNumOperands(); | 
 |   if (UndefElements) { | 
 |     UndefElements->clear(); | 
 |     UndefElements->resize(NumOps); | 
 |   } | 
 |   assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size"); | 
 |   if (!DemandedElts) | 
 |     return SDValue(); | 
 |   SDValue Splatted; | 
 |   for (unsigned i = 0; i != NumOps; ++i) { | 
 |     if (!DemandedElts[i]) | 
 |       continue; | 
 |     SDValue Op = getOperand(i); | 
 |     if (Op.isUndef()) { | 
 |       if (UndefElements) | 
 |         (*UndefElements)[i] = true; | 
 |     } else if (!Splatted) { | 
 |       Splatted = Op; | 
 |     } else if (Splatted != Op) { | 
 |       return SDValue(); | 
 |     } | 
 |   } | 
 |  | 
 |   if (!Splatted) { | 
 |     unsigned FirstDemandedIdx = DemandedElts.countr_zero(); | 
 |     assert(getOperand(FirstDemandedIdx).isUndef() && | 
 |            "Can only have a splat without a constant for all undefs."); | 
 |     return getOperand(FirstDemandedIdx); | 
 |   } | 
 |  | 
 |   return Splatted; | 
 | } | 
 |  | 
 | SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const { | 
 |   APInt DemandedElts = APInt::getAllOnes(getNumOperands()); | 
 |   return getSplatValue(DemandedElts, UndefElements); | 
 | } | 
 |  | 
 | bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts, | 
 |                                             SmallVectorImpl<SDValue> &Sequence, | 
 |                                             BitVector *UndefElements) const { | 
 |   unsigned NumOps = getNumOperands(); | 
 |   Sequence.clear(); | 
 |   if (UndefElements) { | 
 |     UndefElements->clear(); | 
 |     UndefElements->resize(NumOps); | 
 |   } | 
 |   assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size"); | 
 |   if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps)) | 
 |     return false; | 
 |  | 
 |   // Set the undefs even if we don't find a sequence (like getSplatValue). | 
 |   if (UndefElements) | 
 |     for (unsigned I = 0; I != NumOps; ++I) | 
 |       if (DemandedElts[I] && getOperand(I).isUndef()) | 
 |         (*UndefElements)[I] = true; | 
 |  | 
 |   // Iteratively widen the sequence length looking for repetitions. | 
 |   for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) { | 
 |     Sequence.append(SeqLen, SDValue()); | 
 |     for (unsigned I = 0; I != NumOps; ++I) { | 
 |       if (!DemandedElts[I]) | 
 |         continue; | 
 |       SDValue &SeqOp = Sequence[I % SeqLen]; | 
 |       SDValue Op = getOperand(I); | 
 |       if (Op.isUndef()) { | 
 |         if (!SeqOp) | 
 |           SeqOp = Op; | 
 |         continue; | 
 |       } | 
 |       if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) { | 
 |         Sequence.clear(); | 
 |         break; | 
 |       } | 
 |       SeqOp = Op; | 
 |     } | 
 |     if (!Sequence.empty()) | 
 |       return true; | 
 |   } | 
 |  | 
 |   assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern"); | 
 |   return false; | 
 | } | 
 |  | 
 | bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence, | 
 |                                             BitVector *UndefElements) const { | 
 |   APInt DemandedElts = APInt::getAllOnes(getNumOperands()); | 
 |   return getRepeatedSequence(DemandedElts, Sequence, UndefElements); | 
 | } | 
 |  | 
 | ConstantSDNode * | 
 | BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts, | 
 |                                         BitVector *UndefElements) const { | 
 |   return dyn_cast_or_null<ConstantSDNode>( | 
 |       getSplatValue(DemandedElts, UndefElements)); | 
 | } | 
 |  | 
 | ConstantSDNode * | 
 | BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const { | 
 |   return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements)); | 
 | } | 
 |  | 
 | ConstantFPSDNode * | 
 | BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts, | 
 |                                           BitVector *UndefElements) const { | 
 |   return dyn_cast_or_null<ConstantFPSDNode>( | 
 |       getSplatValue(DemandedElts, UndefElements)); | 
 | } | 
 |  | 
 | ConstantFPSDNode * | 
 | BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const { | 
 |   return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements)); | 
 | } | 
 |  | 
 | int32_t | 
 | BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, | 
 |                                                    uint32_t BitWidth) const { | 
 |   if (ConstantFPSDNode *CN = | 
 |           dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) { | 
 |     bool IsExact; | 
 |     APSInt IntVal(BitWidth); | 
 |     const APFloat &APF = CN->getValueAPF(); | 
 |     if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) != | 
 |             APFloat::opOK || | 
 |         !IsExact) | 
 |       return -1; | 
 |  | 
 |     return IntVal.exactLogBase2(); | 
 |   } | 
 |   return -1; | 
 | } | 
 |  | 
 | bool BuildVectorSDNode::getConstantRawBits( | 
 |     bool IsLittleEndian, unsigned DstEltSizeInBits, | 
 |     SmallVectorImpl<APInt> &RawBitElements, BitVector &UndefElements) const { | 
 |   // Early-out if this contains anything but Undef/Constant/ConstantFP. | 
 |   if (!isConstant()) | 
 |     return false; | 
 |  | 
 |   unsigned NumSrcOps = getNumOperands(); | 
 |   unsigned SrcEltSizeInBits = getValueType(0).getScalarSizeInBits(); | 
 |   assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 && | 
 |          "Invalid bitcast scale"); | 
 |  | 
 |   // Extract raw src bits. | 
 |   SmallVector<APInt> SrcBitElements(NumSrcOps, | 
 |                                     APInt::getZero(SrcEltSizeInBits)); | 
 |   BitVector SrcUndeElements(NumSrcOps, false); | 
 |  | 
 |   for (unsigned I = 0; I != NumSrcOps; ++I) { | 
 |     SDValue Op = getOperand(I); | 
 |     if (Op.isUndef()) { | 
 |       SrcUndeElements.set(I); | 
 |       continue; | 
 |     } | 
 |     auto *CInt = dyn_cast<ConstantSDNode>(Op); | 
 |     auto *CFP = dyn_cast<ConstantFPSDNode>(Op); | 
 |     assert((CInt || CFP) && "Unknown constant"); | 
 |     SrcBitElements[I] = CInt ? CInt->getAPIntValue().trunc(SrcEltSizeInBits) | 
 |                              : CFP->getValueAPF().bitcastToAPInt(); | 
 |   } | 
 |  | 
 |   // Recast to dst width. | 
 |   recastRawBits(IsLittleEndian, DstEltSizeInBits, RawBitElements, | 
 |                 SrcBitElements, UndefElements, SrcUndeElements); | 
 |   return true; | 
 | } | 
 |  | 
 | void BuildVectorSDNode::recastRawBits(bool IsLittleEndian, | 
 |                                       unsigned DstEltSizeInBits, | 
 |                                       SmallVectorImpl<APInt> &DstBitElements, | 
 |                                       ArrayRef<APInt> SrcBitElements, | 
 |                                       BitVector &DstUndefElements, | 
 |                                       const BitVector &SrcUndefElements) { | 
 |   unsigned NumSrcOps = SrcBitElements.size(); | 
 |   unsigned SrcEltSizeInBits = SrcBitElements[0].getBitWidth(); | 
 |   assert(((NumSrcOps * SrcEltSizeInBits) % DstEltSizeInBits) == 0 && | 
 |          "Invalid bitcast scale"); | 
 |   assert(NumSrcOps == SrcUndefElements.size() && | 
 |          "Vector size mismatch"); | 
 |  | 
 |   unsigned NumDstOps = (NumSrcOps * SrcEltSizeInBits) / DstEltSizeInBits; | 
 |   DstUndefElements.clear(); | 
 |   DstUndefElements.resize(NumDstOps, false); | 
 |   DstBitElements.assign(NumDstOps, APInt::getZero(DstEltSizeInBits)); | 
 |  | 
 |   // Concatenate src elements constant bits together into dst element. | 
 |   if (SrcEltSizeInBits <= DstEltSizeInBits) { | 
 |     unsigned Scale = DstEltSizeInBits / SrcEltSizeInBits; | 
 |     for (unsigned I = 0; I != NumDstOps; ++I) { | 
 |       DstUndefElements.set(I); | 
 |       APInt &DstBits = DstBitElements[I]; | 
 |       for (unsigned J = 0; J != Scale; ++J) { | 
 |         unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1)); | 
 |         if (SrcUndefElements[Idx]) | 
 |           continue; | 
 |         DstUndefElements.reset(I); | 
 |         const APInt &SrcBits = SrcBitElements[Idx]; | 
 |         assert(SrcBits.getBitWidth() == SrcEltSizeInBits && | 
 |                "Illegal constant bitwidths"); | 
 |         DstBits.insertBits(SrcBits, J * SrcEltSizeInBits); | 
 |       } | 
 |     } | 
 |     return; | 
 |   } | 
 |  | 
 |   // Split src element constant bits into dst elements. | 
 |   unsigned Scale = SrcEltSizeInBits / DstEltSizeInBits; | 
 |   for (unsigned I = 0; I != NumSrcOps; ++I) { | 
 |     if (SrcUndefElements[I]) { | 
 |       DstUndefElements.set(I * Scale, (I + 1) * Scale); | 
 |       continue; | 
 |     } | 
 |     const APInt &SrcBits = SrcBitElements[I]; | 
 |     for (unsigned J = 0; J != Scale; ++J) { | 
 |       unsigned Idx = (I * Scale) + (IsLittleEndian ? J : (Scale - J - 1)); | 
 |       APInt &DstBits = DstBitElements[Idx]; | 
 |       DstBits = SrcBits.extractBits(DstEltSizeInBits, J * DstEltSizeInBits); | 
 |     } | 
 |   } | 
 | } | 
 |  | 
 | bool BuildVectorSDNode::isConstant() const { | 
 |   for (const SDValue &Op : op_values()) { | 
 |     unsigned Opc = Op.getOpcode(); | 
 |     if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP) | 
 |       return false; | 
 |   } | 
 |   return true; | 
 | } | 
 |  | 
 | std::optional<std::pair<APInt, APInt>> | 
 | BuildVectorSDNode::isConstantSequence() const { | 
 |   unsigned NumOps = getNumOperands(); | 
 |   if (NumOps < 2) | 
 |     return std::nullopt; | 
 |  | 
 |   if (!isa<ConstantSDNode>(getOperand(0)) || | 
 |       !isa<ConstantSDNode>(getOperand(1))) | 
 |     return std::nullopt; | 
 |  | 
 |   unsigned EltSize = getValueType(0).getScalarSizeInBits(); | 
 |   APInt Start = getConstantOperandAPInt(0).trunc(EltSize); | 
 |   APInt Stride = getConstantOperandAPInt(1).trunc(EltSize) - Start; | 
 |  | 
 |   if (Stride.isZero()) | 
 |     return std::nullopt; | 
 |  | 
 |   for (unsigned i = 2; i < NumOps; ++i) { | 
 |     if (!isa<ConstantSDNode>(getOperand(i))) | 
 |       return std::nullopt; | 
 |  | 
 |     APInt Val = getConstantOperandAPInt(i).trunc(EltSize); | 
 |     if (Val != (Start + (Stride * i))) | 
 |       return std::nullopt; | 
 |   } | 
 |  | 
 |   return std::make_pair(Start, Stride); | 
 | } | 
 |  | 
 | bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { | 
 |   // Find the first non-undef value in the shuffle mask. | 
 |   unsigned i, e; | 
 |   for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) | 
 |     /* search */; | 
 |  | 
 |   // If all elements are undefined, this shuffle can be considered a splat | 
 |   // (although it should eventually get simplified away completely). | 
 |   if (i == e) | 
 |     return true; | 
 |  | 
 |   // Make sure all remaining elements are either undef or the same as the first | 
 |   // non-undef value. | 
 |   for (int Idx = Mask[i]; i != e; ++i) | 
 |     if (Mask[i] >= 0 && Mask[i] != Idx) | 
 |       return false; | 
 |   return true; | 
 | } | 
 |  | 
 | // Returns the SDNode if it is a constant integer BuildVector | 
 | // or constant integer. | 
 | SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const { | 
 |   if (isa<ConstantSDNode>(N)) | 
 |     return N.getNode(); | 
 |   if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) | 
 |     return N.getNode(); | 
 |   // Treat a GlobalAddress supporting constant offset folding as a | 
 |   // constant integer. | 
 |   if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N)) | 
 |     if (GA->getOpcode() == ISD::GlobalAddress && | 
 |         TLI->isOffsetFoldingLegal(GA)) | 
 |       return GA; | 
 |   if ((N.getOpcode() == ISD::SPLAT_VECTOR) && | 
 |       isa<ConstantSDNode>(N.getOperand(0))) | 
 |     return N.getNode(); | 
 |   return nullptr; | 
 | } | 
 |  | 
 | // Returns the SDNode if it is a constant float BuildVector | 
 | // or constant float. | 
 | SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const { | 
 |   if (isa<ConstantFPSDNode>(N)) | 
 |     return N.getNode(); | 
 |  | 
 |   if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) | 
 |     return N.getNode(); | 
 |  | 
 |   if ((N.getOpcode() == ISD::SPLAT_VECTOR) && | 
 |       isa<ConstantFPSDNode>(N.getOperand(0))) | 
 |     return N.getNode(); | 
 |  | 
 |   return nullptr; | 
 | } | 
 |  | 
 | void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) { | 
 |   assert(!Node->OperandList && "Node already has operands"); | 
 |   assert(SDNode::getMaxNumOperands() >= Vals.size() && | 
 |          "too many operands to fit into SDNode"); | 
 |   SDUse *Ops = OperandRecycler.allocate( | 
 |       ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator); | 
 |  | 
 |   bool IsDivergent = false; | 
 |   for (unsigned I = 0; I != Vals.size(); ++I) { | 
 |     Ops[I].setUser(Node); | 
 |     Ops[I].setInitial(Vals[I]); | 
 |     if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence. | 
 |       IsDivergent |= Ops[I].getNode()->isDivergent(); | 
 |   } | 
 |   Node->NumOperands = Vals.size(); | 
 |   Node->OperandList = Ops; | 
 |   if (!TLI->isSDNodeAlwaysUniform(Node)) { | 
 |     IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, UA); | 
 |     Node->SDNodeBits.IsDivergent = IsDivergent; | 
 |   } | 
 |   checkForCycles(Node); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getTokenFactor(const SDLoc &DL, | 
 |                                      SmallVectorImpl<SDValue> &Vals) { | 
 |   size_t Limit = SDNode::getMaxNumOperands(); | 
 |   while (Vals.size() > Limit) { | 
 |     unsigned SliceIdx = Vals.size() - Limit; | 
 |     auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit); | 
 |     SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs); | 
 |     Vals.erase(Vals.begin() + SliceIdx, Vals.end()); | 
 |     Vals.emplace_back(NewTF); | 
 |   } | 
 |   return getNode(ISD::TokenFactor, DL, MVT::Other, Vals); | 
 | } | 
 |  | 
 | SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL, | 
 |                                         EVT VT, SDNodeFlags Flags) { | 
 |   switch (Opcode) { | 
 |   default: | 
 |     return SDValue(); | 
 |   case ISD::ADD: | 
 |   case ISD::OR: | 
 |   case ISD::XOR: | 
 |   case ISD::UMAX: | 
 |     return getConstant(0, DL, VT); | 
 |   case ISD::MUL: | 
 |     return getConstant(1, DL, VT); | 
 |   case ISD::AND: | 
 |   case ISD::UMIN: | 
 |     return getAllOnesConstant(DL, VT); | 
 |   case ISD::SMAX: | 
 |     return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT); | 
 |   case ISD::SMIN: | 
 |     return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT); | 
 |   case ISD::FADD: | 
 |     return getConstantFP(-0.0, DL, VT); | 
 |   case ISD::FMUL: | 
 |     return getConstantFP(1.0, DL, VT); | 
 |   case ISD::FMINNUM: | 
 |   case ISD::FMAXNUM: { | 
 |     // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF. | 
 |     const fltSemantics &Semantics = EVTToAPFloatSemantics(VT); | 
 |     APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) : | 
 |                         !Flags.hasNoInfs() ? APFloat::getInf(Semantics) : | 
 |                         APFloat::getLargest(Semantics); | 
 |     if (Opcode == ISD::FMAXNUM) | 
 |       NeutralAF.changeSign(); | 
 |  | 
 |     return getConstantFP(NeutralAF, DL, VT); | 
 |   } | 
 |   case ISD::FMINIMUM: | 
 |   case ISD::FMAXIMUM: { | 
 |     // Neutral element for fminimum is Inf or FLT_MAX, depending on FMF. | 
 |     const fltSemantics &Semantics = EVTToAPFloatSemantics(VT); | 
 |     APFloat NeutralAF = !Flags.hasNoInfs() ? APFloat::getInf(Semantics) | 
 |                                            : APFloat::getLargest(Semantics); | 
 |     if (Opcode == ISD::FMAXIMUM) | 
 |       NeutralAF.changeSign(); | 
 |  | 
 |     return getConstantFP(NeutralAF, DL, VT); | 
 |   } | 
 |  | 
 |   } | 
 | } | 
 |  | 
 | /// Helper used to make a call to a library function that has one argument of | 
 | /// pointer type. | 
 | /// | 
 | /// Such functions include 'fegetmode', 'fesetenv' and some others, which are | 
 | /// used to get or set floating-point state. They have one argument of pointer | 
 | /// type, which points to the memory region containing bits of the | 
 | /// floating-point state. The value returned by such function is ignored in the | 
 | /// created call. | 
 | /// | 
 | /// \param LibFunc Reference to library function (value of RTLIB::Libcall). | 
 | /// \param Ptr Pointer used to save/load state. | 
 | /// \param InChain Ingoing token chain. | 
 | /// \returns Outgoing chain token. | 
 | SDValue SelectionDAG::makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, | 
 |                                             SDValue InChain, | 
 |                                             const SDLoc &DLoc) { | 
 |   assert(InChain.getValueType() == MVT::Other && "Expected token chain"); | 
 |   TargetLowering::ArgListTy Args; | 
 |   TargetLowering::ArgListEntry Entry; | 
 |   Entry.Node = Ptr; | 
 |   Entry.Ty = Ptr.getValueType().getTypeForEVT(*getContext()); | 
 |   Args.push_back(Entry); | 
 |   RTLIB::Libcall LC = static_cast<RTLIB::Libcall>(LibFunc); | 
 |   SDValue Callee = getExternalSymbol(TLI->getLibcallName(LC), | 
 |                                      TLI->getPointerTy(getDataLayout())); | 
 |   TargetLowering::CallLoweringInfo CLI(*this); | 
 |   CLI.setDebugLoc(DLoc).setChain(InChain).setLibCallee( | 
 |       TLI->getLibcallCallingConv(LC), Type::getVoidTy(*getContext()), Callee, | 
 |       std::move(Args)); | 
 |   return TLI->LowerCallTo(CLI).second; | 
 | } | 
 |  | 
 | void SelectionDAG::copyExtraInfo(SDNode *From, SDNode *To) { | 
 |   assert(From && To && "Invalid SDNode; empty source SDValue?"); | 
 |   auto I = SDEI.find(From); | 
 |   if (I == SDEI.end()) | 
 |     return; | 
 |  | 
 |   // Use of operator[] on the DenseMap may cause an insertion, which invalidates | 
 |   // the iterator, hence the need to make a copy to prevent a use-after-free. | 
 |   NodeExtraInfo NEI = I->second; | 
 |   if (LLVM_LIKELY(!NEI.PCSections)) { | 
 |     // No deep copy required for the types of extra info set. | 
 |     // | 
 |     // FIXME: Investigate if other types of extra info also need deep copy. This | 
 |     // depends on the types of nodes they can be attached to: if some extra info | 
 |     // is only ever attached to nodes where a replacement To node is always the | 
 |     // node where later use and propagation of the extra info has the intended | 
 |     // semantics, no deep copy is required. | 
 |     SDEI[To] = std::move(NEI); | 
 |     return; | 
 |   } | 
 |  | 
 |   // We need to copy NodeExtraInfo to all _new_ nodes that are being introduced | 
 |   // through the replacement of From with To. Otherwise, replacements of a node | 
 |   // (From) with more complex nodes (To and its operands) may result in lost | 
 |   // extra info where the root node (To) is insignificant in further propagating | 
 |   // and using extra info when further lowering to MIR. | 
 |   // | 
 |   // In the first step pre-populate the visited set with the nodes reachable | 
 |   // from the old From node. This avoids copying NodeExtraInfo to parts of the | 
 |   // DAG that is not new and should be left untouched. | 
 |   SmallVector<const SDNode *> Leafs{From}; // Leafs reachable with VisitFrom. | 
 |   DenseSet<const SDNode *> FromReach; // The set of nodes reachable from From. | 
 |   auto VisitFrom = [&](auto &&Self, const SDNode *N, int MaxDepth) { | 
 |     if (MaxDepth == 0) { | 
 |       // Remember this node in case we need to increase MaxDepth and continue | 
 |       // populating FromReach from this node. | 
 |       Leafs.emplace_back(N); | 
 |       return; | 
 |     } | 
 |     if (!FromReach.insert(N).second) | 
 |       return; | 
 |     for (const SDValue &Op : N->op_values()) | 
 |       Self(Self, Op.getNode(), MaxDepth - 1); | 
 |   }; | 
 |  | 
 |   // Copy extra info to To and all its transitive operands (that are new). | 
 |   SmallPtrSet<const SDNode *, 8> Visited; | 
 |   auto DeepCopyTo = [&](auto &&Self, const SDNode *N) { | 
 |     if (FromReach.contains(N)) | 
 |       return true; | 
 |     if (!Visited.insert(N).second) | 
 |       return true; | 
 |     if (getEntryNode().getNode() == N) | 
 |       return false; | 
 |     for (const SDValue &Op : N->op_values()) { | 
 |       if (!Self(Self, Op.getNode())) | 
 |         return false; | 
 |     } | 
 |     // Copy only if entry node was not reached. | 
 |     SDEI[N] = NEI; | 
 |     return true; | 
 |   }; | 
 |  | 
 |   // We first try with a lower MaxDepth, assuming that the path to common | 
 |   // operands between From and To is relatively short. This significantly | 
 |   // improves performance in the common case. The initial MaxDepth is big | 
 |   // enough to avoid retry in the common case; the last MaxDepth is large | 
 |   // enough to avoid having to use the fallback below (and protects from | 
 |   // potential stack exhaustion from recursion). | 
 |   for (int PrevDepth = 0, MaxDepth = 16; MaxDepth <= 1024; | 
 |        PrevDepth = MaxDepth, MaxDepth *= 2, Visited.clear()) { | 
 |     // StartFrom is the previous (or initial) set of leafs reachable at the | 
 |     // previous maximum depth. | 
 |     SmallVector<const SDNode *> StartFrom; | 
 |     std::swap(StartFrom, Leafs); | 
 |     for (const SDNode *N : StartFrom) | 
 |       VisitFrom(VisitFrom, N, MaxDepth - PrevDepth); | 
 |     if (LLVM_LIKELY(DeepCopyTo(DeepCopyTo, To))) | 
 |       return; | 
 |     // This should happen very rarely (reached the entry node). | 
 |     LLVM_DEBUG(dbgs() << __func__ << ": MaxDepth=" << MaxDepth << " too low\n"); | 
 |     assert(!Leafs.empty()); | 
 |   } | 
 |  | 
 |   // This should not happen - but if it did, that means the subgraph reachable | 
 |   // from From has depth greater or equal to maximum MaxDepth, and VisitFrom() | 
 |   // could not visit all reachable common operands. Consequently, we were able | 
 |   // to reach the entry node. | 
 |   errs() << "warning: incomplete propagation of SelectionDAG::NodeExtraInfo\n"; | 
 |   assert(false && "From subgraph too complex - increase max. MaxDepth?"); | 
 |   // Best-effort fallback if assertions disabled. | 
 |   SDEI[To] = std::move(NEI); | 
 | } | 
 |  | 
 | #ifndef NDEBUG | 
 | static void checkForCyclesHelper(const SDNode *N, | 
 |                                  SmallPtrSetImpl<const SDNode*> &Visited, | 
 |                                  SmallPtrSetImpl<const SDNode*> &Checked, | 
 |                                  const llvm::SelectionDAG *DAG) { | 
 |   // If this node has already been checked, don't check it again. | 
 |   if (Checked.count(N)) | 
 |     return; | 
 |  | 
 |   // If a node has already been visited on this depth-first walk, reject it as | 
 |   // a cycle. | 
 |   if (!Visited.insert(N).second) { | 
 |     errs() << "Detected cycle in SelectionDAG\n"; | 
 |     dbgs() << "Offending node:\n"; | 
 |     N->dumprFull(DAG); dbgs() << "\n"; | 
 |     abort(); | 
 |   } | 
 |  | 
 |   for (const SDValue &Op : N->op_values()) | 
 |     checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG); | 
 |  | 
 |   Checked.insert(N); | 
 |   Visited.erase(N); | 
 | } | 
 | #endif | 
 |  | 
 | void llvm::checkForCycles(const llvm::SDNode *N, | 
 |                           const llvm::SelectionDAG *DAG, | 
 |                           bool force) { | 
 | #ifndef NDEBUG | 
 |   bool check = force; | 
 | #ifdef EXPENSIVE_CHECKS | 
 |   check = true; | 
 | #endif  // EXPENSIVE_CHECKS | 
 |   if (check) { | 
 |     assert(N && "Checking nonexistent SDNode"); | 
 |     SmallPtrSet<const SDNode*, 32> visited; | 
 |     SmallPtrSet<const SDNode*, 32> checked; | 
 |     checkForCyclesHelper(N, visited, checked, DAG); | 
 |   } | 
 | #endif  // !NDEBUG | 
 | } | 
 |  | 
 | void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) { | 
 |   checkForCycles(DAG->getRoot().getNode(), DAG, force); | 
 | } |