| //===- DAGCombiner.cpp - Implement a DAG node combiner --------------------===// |
| // |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| // See https://llvm.org/LICENSE.txt for license information. |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| // |
| //===----------------------------------------------------------------------===// |
| // |
| // This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run |
| // both before and after the DAG is legalized. |
| // |
| // This pass is not a substitute for the LLVM IR instcombine pass. This pass is |
| // primarily intended to handle simplification opportunities that are implicit |
| // in the LLVM IR and exposed by the various codegen lowering phases. |
| // |
| //===----------------------------------------------------------------------===// |
| |
| #include "llvm/ADT/APFloat.h" |
| #include "llvm/ADT/APInt.h" |
| #include "llvm/ADT/ArrayRef.h" |
| #include "llvm/ADT/DenseMap.h" |
| #include "llvm/ADT/IntervalMap.h" |
| #include "llvm/ADT/STLExtras.h" |
| #include "llvm/ADT/SetVector.h" |
| #include "llvm/ADT/SmallBitVector.h" |
| #include "llvm/ADT/SmallPtrSet.h" |
| #include "llvm/ADT/SmallSet.h" |
| #include "llvm/ADT/SmallVector.h" |
| #include "llvm/ADT/Statistic.h" |
| #include "llvm/Analysis/AliasAnalysis.h" |
| #include "llvm/Analysis/MemoryLocation.h" |
| #include "llvm/Analysis/TargetLibraryInfo.h" |
| #include "llvm/Analysis/ValueTracking.h" |
| #include "llvm/Analysis/VectorUtils.h" |
| #include "llvm/CodeGen/ByteProvider.h" |
| #include "llvm/CodeGen/DAGCombine.h" |
| #include "llvm/CodeGen/ISDOpcodes.h" |
| #include "llvm/CodeGen/MachineFunction.h" |
| #include "llvm/CodeGen/MachineMemOperand.h" |
| #include "llvm/CodeGen/SDPatternMatch.h" |
| #include "llvm/CodeGen/SelectionDAG.h" |
| #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h" |
| #include "llvm/CodeGen/SelectionDAGNodes.h" |
| #include "llvm/CodeGen/SelectionDAGTargetInfo.h" |
| #include "llvm/CodeGen/TargetLowering.h" |
| #include "llvm/CodeGen/TargetRegisterInfo.h" |
| #include "llvm/CodeGen/TargetSubtargetInfo.h" |
| #include "llvm/CodeGen/ValueTypes.h" |
| #include "llvm/CodeGenTypes/MachineValueType.h" |
| #include "llvm/IR/Attributes.h" |
| #include "llvm/IR/Constant.h" |
| #include "llvm/IR/DataLayout.h" |
| #include "llvm/IR/DerivedTypes.h" |
| #include "llvm/IR/Function.h" |
| #include "llvm/IR/Metadata.h" |
| #include "llvm/Support/Casting.h" |
| #include "llvm/Support/CodeGen.h" |
| #include "llvm/Support/CommandLine.h" |
| #include "llvm/Support/Compiler.h" |
| #include "llvm/Support/Debug.h" |
| #include "llvm/Support/DebugCounter.h" |
| #include "llvm/Support/ErrorHandling.h" |
| #include "llvm/Support/KnownBits.h" |
| #include "llvm/Support/MathExtras.h" |
| #include "llvm/Support/raw_ostream.h" |
| #include "llvm/Target/TargetMachine.h" |
| #include "llvm/Target/TargetOptions.h" |
| #include <algorithm> |
| #include <cassert> |
| #include <cstdint> |
| #include <functional> |
| #include <iterator> |
| #include <optional> |
| #include <string> |
| #include <tuple> |
| #include <utility> |
| #include <variant> |
| |
| #include "MatchContext.h" |
| |
| using namespace llvm; |
| using namespace llvm::SDPatternMatch; |
| |
| #define DEBUG_TYPE "dagcombine" |
| |
| STATISTIC(NodesCombined , "Number of dag nodes combined"); |
| STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created"); |
| STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created"); |
| STATISTIC(OpsNarrowed , "Number of load/op/store narrowed"); |
| STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int"); |
| STATISTIC(SlicedLoads, "Number of load sliced"); |
| STATISTIC(NumFPLogicOpsConv, "Number of logic ops converted to fp ops"); |
| |
| DEBUG_COUNTER(DAGCombineCounter, "dagcombine", |
| "Controls whether a DAG combine is performed for a node"); |
| |
| static cl::opt<bool> |
| CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, |
| cl::desc("Enable DAG combiner's use of IR alias analysis")); |
| |
| static cl::opt<bool> |
| UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true), |
| cl::desc("Enable DAG combiner's use of TBAA")); |
| |
| #ifndef NDEBUG |
| static cl::opt<std::string> |
| CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden, |
| cl::desc("Only use DAG-combiner alias analysis in this" |
| " function")); |
| #endif |
| |
| /// Hidden option to stress test load slicing, i.e., when this option |
| /// is enabled, load slicing bypasses most of its profitability guards. |
| static cl::opt<bool> |
| StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden, |
| cl::desc("Bypass the profitability model of load slicing"), |
| cl::init(false)); |
| |
| static cl::opt<bool> |
| MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true), |
| cl::desc("DAG combiner may split indexing from loads")); |
| |
| static cl::opt<bool> |
| EnableStoreMerging("combiner-store-merging", cl::Hidden, cl::init(true), |
| cl::desc("DAG combiner enable merging multiple stores " |
| "into a wider store")); |
| |
| static cl::opt<unsigned> TokenFactorInlineLimit( |
| "combiner-tokenfactor-inline-limit", cl::Hidden, cl::init(2048), |
| cl::desc("Limit the number of operands to inline for Token Factors")); |
| |
| static cl::opt<unsigned> StoreMergeDependenceLimit( |
| "combiner-store-merge-dependence-limit", cl::Hidden, cl::init(10), |
| cl::desc("Limit the number of times for the same StoreNode and RootNode " |
| "to bail out in store merging dependence check")); |
| |
| static cl::opt<bool> EnableReduceLoadOpStoreWidth( |
| "combiner-reduce-load-op-store-width", cl::Hidden, cl::init(true), |
| cl::desc("DAG combiner enable reducing the width of load/op/store " |
| "sequence")); |
| static cl::opt<bool> ReduceLoadOpStoreWidthForceNarrowingProfitable( |
| "combiner-reduce-load-op-store-width-force-narrowing-profitable", |
| cl::Hidden, cl::init(false), |
| cl::desc("DAG combiner force override the narrowing profitable check when " |
| "reducing the width of load/op/store sequences")); |
| |
| static cl::opt<bool> EnableShrinkLoadReplaceStoreWithStore( |
| "combiner-shrink-load-replace-store-with-store", cl::Hidden, cl::init(true), |
| cl::desc("DAG combiner enable load/<replace bytes>/store with " |
| "a narrower store")); |
| |
| static cl::opt<bool> DisableCombines("combiner-disabled", cl::Hidden, |
| cl::init(false), |
| cl::desc("Disable the DAG combiner")); |
| |
| namespace { |
| |
| class DAGCombiner { |
| SelectionDAG &DAG; |
| const TargetLowering &TLI; |
| const SelectionDAGTargetInfo *STI; |
| CombineLevel Level = BeforeLegalizeTypes; |
| CodeGenOptLevel OptLevel; |
| bool LegalDAG = false; |
| bool LegalOperations = false; |
| bool LegalTypes = false; |
| bool ForCodeSize; |
| bool DisableGenericCombines; |
| |
| /// Worklist of all of the nodes that need to be simplified. |
| /// |
| /// This must behave as a stack -- new nodes to process are pushed onto the |
| /// back and when processing we pop off of the back. |
| /// |
| /// The worklist will not contain duplicates but may contain null entries |
| /// due to nodes being deleted from the underlying DAG. For fast lookup and |
| /// deduplication, the index of the node in this vector is stored in the |
| /// node in SDNode::CombinerWorklistIndex. |
| SmallVector<SDNode *, 64> Worklist; |
| |
| /// This records all nodes attempted to be added to the worklist since we |
| /// considered a new worklist entry. As we keep do not add duplicate nodes |
| /// in the worklist, this is different from the tail of the worklist. |
| SmallSetVector<SDNode *, 32> PruningList; |
| |
| /// Map from candidate StoreNode to the pair of RootNode and count. |
| /// The count is used to track how many times we have seen the StoreNode |
| /// with the same RootNode bail out in dependence check. If we have seen |
| /// the bail out for the same pair many times over a limit, we won't |
| /// consider the StoreNode with the same RootNode as store merging |
| /// candidate again. |
| DenseMap<SDNode *, std::pair<SDNode *, unsigned>> StoreRootCountMap; |
| |
| // BatchAA - Used for DAG load/store alias analysis. |
| BatchAAResults *BatchAA; |
| |
| /// This caches all chains that have already been processed in |
| /// DAGCombiner::getStoreMergeCandidates() and found to have no mergeable |
| /// stores candidates. |
| SmallPtrSet<SDNode *, 4> ChainsWithoutMergeableStores; |
| |
| /// When an instruction is simplified, add all users of the instruction to |
| /// the work lists because they might get more simplified now. |
| void AddUsersToWorklist(SDNode *N) { |
| for (SDNode *Node : N->users()) |
| AddToWorklist(Node); |
| } |
| |
| /// Convenient shorthand to add a node and all of its user to the worklist. |
| void AddToWorklistWithUsers(SDNode *N) { |
| AddUsersToWorklist(N); |
| AddToWorklist(N); |
| } |
| |
| // Prune potentially dangling nodes. This is called after |
| // any visit to a node, but should also be called during a visit after any |
| // failed combine which may have created a DAG node. |
| void clearAddedDanglingWorklistEntries() { |
| // Check any nodes added to the worklist to see if they are prunable. |
| while (!PruningList.empty()) { |
| auto *N = PruningList.pop_back_val(); |
| if (N->use_empty()) |
| recursivelyDeleteUnusedNodes(N); |
| } |
| } |
| |
| SDNode *getNextWorklistEntry() { |
| // Before we do any work, remove nodes that are not in use. |
| clearAddedDanglingWorklistEntries(); |
| SDNode *N = nullptr; |
| // The Worklist holds the SDNodes in order, but it may contain null |
| // entries. |
| while (!N && !Worklist.empty()) { |
| N = Worklist.pop_back_val(); |
| } |
| |
| if (N) { |
| assert(N->getCombinerWorklistIndex() >= 0 && |
| "Found a worklist entry without a corresponding map entry!"); |
| // Set to -2 to indicate that we combined the node. |
| N->setCombinerWorklistIndex(-2); |
| } |
| return N; |
| } |
| |
| /// Call the node-specific routine that folds each particular type of node. |
| SDValue visit(SDNode *N); |
| |
| public: |
| DAGCombiner(SelectionDAG &D, BatchAAResults *BatchAA, CodeGenOptLevel OL) |
| : DAG(D), TLI(D.getTargetLoweringInfo()), |
| STI(D.getSubtarget().getSelectionDAGInfo()), OptLevel(OL), |
| BatchAA(BatchAA) { |
| ForCodeSize = DAG.shouldOptForSize(); |
| DisableGenericCombines = |
| DisableCombines || (STI && STI->disableGenericCombines(OptLevel)); |
| |
| MaximumLegalStoreInBits = 0; |
| // We use the minimum store size here, since that's all we can guarantee |
| // for the scalable vector types. |
| for (MVT VT : MVT::all_valuetypes()) |
| if (EVT(VT).isSimple() && VT != MVT::Other && |
| TLI.isTypeLegal(EVT(VT)) && |
| VT.getSizeInBits().getKnownMinValue() >= MaximumLegalStoreInBits) |
| MaximumLegalStoreInBits = VT.getSizeInBits().getKnownMinValue(); |
| } |
| |
| void ConsiderForPruning(SDNode *N) { |
| // Mark this for potential pruning. |
| PruningList.insert(N); |
| } |
| |
| /// Add to the worklist making sure its instance is at the back (next to be |
| /// processed.) |
| void AddToWorklist(SDNode *N, bool IsCandidateForPruning = true, |
| bool SkipIfCombinedBefore = false) { |
| assert(N->getOpcode() != ISD::DELETED_NODE && |
| "Deleted Node added to Worklist"); |
| |
| // Skip handle nodes as they can't usefully be combined and confuse the |
| // zero-use deletion strategy. |
| if (N->getOpcode() == ISD::HANDLENODE) |
| return; |
| |
| if (SkipIfCombinedBefore && N->getCombinerWorklistIndex() == -2) |
| return; |
| |
| if (IsCandidateForPruning) |
| ConsiderForPruning(N); |
| |
| if (N->getCombinerWorklistIndex() < 0) { |
| N->setCombinerWorklistIndex(Worklist.size()); |
| Worklist.push_back(N); |
| } |
| } |
| |
| /// Remove all instances of N from the worklist. |
| void removeFromWorklist(SDNode *N) { |
| PruningList.remove(N); |
| StoreRootCountMap.erase(N); |
| |
| int WorklistIndex = N->getCombinerWorklistIndex(); |
| // If not in the worklist, the index might be -1 or -2 (was combined |
| // before). As the node gets deleted anyway, there's no need to update |
| // the index. |
| if (WorklistIndex < 0) |
| return; // Not in the worklist. |
| |
| // Null out the entry rather than erasing it to avoid a linear operation. |
| Worklist[WorklistIndex] = nullptr; |
| N->setCombinerWorklistIndex(-1); |
| } |
| |
| void deleteAndRecombine(SDNode *N); |
| bool recursivelyDeleteUnusedNodes(SDNode *N); |
| |
| /// Replaces all uses of the results of one DAG node with new values. |
| SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, |
| bool AddTo = true); |
| |
| /// Replaces all uses of the results of one DAG node with new values. |
| SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) { |
| return CombineTo(N, &Res, 1, AddTo); |
| } |
| |
| /// Replaces all uses of the results of one DAG node with new values. |
| SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, |
| bool AddTo = true) { |
| SDValue To[] = { Res0, Res1 }; |
| return CombineTo(N, To, 2, AddTo); |
| } |
| |
| void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO); |
| |
| private: |
| unsigned MaximumLegalStoreInBits; |
| |
| /// Check the specified integer node value to see if it can be simplified or |
| /// if things it uses can be simplified by bit propagation. |
| /// If so, return true. |
| bool SimplifyDemandedBits(SDValue Op) { |
| unsigned BitWidth = Op.getScalarValueSizeInBits(); |
| APInt DemandedBits = APInt::getAllOnes(BitWidth); |
| return SimplifyDemandedBits(Op, DemandedBits); |
| } |
| |
| bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits) { |
| EVT VT = Op.getValueType(); |
| APInt DemandedElts = VT.isFixedLengthVector() |
| ? APInt::getAllOnes(VT.getVectorNumElements()) |
| : APInt(1, 1); |
| return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, false); |
| } |
| |
| /// Check the specified vector node value to see if it can be simplified or |
| /// if things it uses can be simplified as it only uses some of the |
| /// elements. If so, return true. |
| bool SimplifyDemandedVectorElts(SDValue Op) { |
| // TODO: For now just pretend it cannot be simplified. |
| if (Op.getValueType().isScalableVector()) |
| return false; |
| |
| unsigned NumElts = Op.getValueType().getVectorNumElements(); |
| APInt DemandedElts = APInt::getAllOnes(NumElts); |
| return SimplifyDemandedVectorElts(Op, DemandedElts); |
| } |
| |
| bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, |
| const APInt &DemandedElts, |
| bool AssumeSingleUse = false); |
| bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts, |
| bool AssumeSingleUse = false); |
| |
| bool CombineToPreIndexedLoadStore(SDNode *N); |
| bool CombineToPostIndexedLoadStore(SDNode *N); |
| SDValue SplitIndexingFromLoad(LoadSDNode *LD); |
| bool SliceUpLoad(SDNode *N); |
| |
| // Looks up the chain to find a unique (unaliased) store feeding the passed |
| // load. If no such store is found, returns a nullptr. |
| // Note: This will look past a CALLSEQ_START if the load is chained to it so |
| // so that it can find stack stores for byval params. |
| StoreSDNode *getUniqueStoreFeeding(LoadSDNode *LD, int64_t &Offset); |
| // Scalars have size 0 to distinguish from singleton vectors. |
| SDValue ForwardStoreValueToDirectLoad(LoadSDNode *LD); |
| bool getTruncatedStoreValue(StoreSDNode *ST, SDValue &Val); |
| bool extendLoadedValueToExtension(LoadSDNode *LD, SDValue &Val); |
| |
| void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad); |
| SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace); |
| SDValue SExtPromoteOperand(SDValue Op, EVT PVT); |
| SDValue ZExtPromoteOperand(SDValue Op, EVT PVT); |
| SDValue PromoteIntBinOp(SDValue Op); |
| SDValue PromoteIntShiftOp(SDValue Op); |
| SDValue PromoteExtend(SDValue Op); |
| bool PromoteLoad(SDValue Op); |
| |
| SDValue foldShiftToAvg(SDNode *N); |
| // Fold `a bitwiseop (~b +/- c)` -> `a bitwiseop ~(b -/+ c)` |
| SDValue foldBitwiseOpWithNeg(SDNode *N, const SDLoc &DL, EVT VT); |
| |
| SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS, |
| SDValue RHS, SDValue True, SDValue False, |
| ISD::CondCode CC); |
| |
| /// Call the node-specific routine that knows how to fold each |
| /// particular type of node. If that doesn't do anything, try the |
| /// target-specific DAG combines. |
| SDValue combine(SDNode *N); |
| |
| // Visitation implementation - Implement dag node combining for different |
| // node types. The semantics are as follows: |
| // Return Value: |
| // SDValue.getNode() == 0 - No change was made |
| // SDValue.getNode() == N - N was replaced, is dead and has been handled. |
| // otherwise - N should be replaced by the returned Operand. |
| // |
| SDValue visitTokenFactor(SDNode *N); |
| SDValue visitMERGE_VALUES(SDNode *N); |
| SDValue visitADD(SDNode *N); |
| SDValue visitADDLike(SDNode *N); |
| SDValue visitADDLikeCommutative(SDValue N0, SDValue N1, |
| SDNode *LocReference); |
| SDValue visitSUB(SDNode *N); |
| SDValue visitADDSAT(SDNode *N); |
| SDValue visitSUBSAT(SDNode *N); |
| SDValue visitADDC(SDNode *N); |
| SDValue visitADDO(SDNode *N); |
| SDValue visitUADDOLike(SDValue N0, SDValue N1, SDNode *N); |
| SDValue visitSUBC(SDNode *N); |
| SDValue visitSUBO(SDNode *N); |
| SDValue visitADDE(SDNode *N); |
| SDValue visitUADDO_CARRY(SDNode *N); |
| SDValue visitSADDO_CARRY(SDNode *N); |
| SDValue visitUADDO_CARRYLike(SDValue N0, SDValue N1, SDValue CarryIn, |
| SDNode *N); |
| SDValue visitSADDO_CARRYLike(SDValue N0, SDValue N1, SDValue CarryIn, |
| SDNode *N); |
| SDValue visitSUBE(SDNode *N); |
| SDValue visitUSUBO_CARRY(SDNode *N); |
| SDValue visitSSUBO_CARRY(SDNode *N); |
| template <class MatchContextClass> SDValue visitMUL(SDNode *N); |
| SDValue visitMULFIX(SDNode *N); |
| SDValue useDivRem(SDNode *N); |
| SDValue visitSDIV(SDNode *N); |
| SDValue visitSDIVLike(SDValue N0, SDValue N1, SDNode *N); |
| SDValue visitUDIV(SDNode *N); |
| SDValue visitUDIVLike(SDValue N0, SDValue N1, SDNode *N); |
| SDValue visitREM(SDNode *N); |
| SDValue visitMULHU(SDNode *N); |
| SDValue visitMULHS(SDNode *N); |
| SDValue visitAVG(SDNode *N); |
| SDValue visitABD(SDNode *N); |
| SDValue visitSMUL_LOHI(SDNode *N); |
| SDValue visitUMUL_LOHI(SDNode *N); |
| SDValue visitMULO(SDNode *N); |
| SDValue visitIMINMAX(SDNode *N); |
| SDValue visitAND(SDNode *N); |
| SDValue visitANDLike(SDValue N0, SDValue N1, SDNode *N); |
| SDValue visitOR(SDNode *N); |
| SDValue visitORLike(SDValue N0, SDValue N1, const SDLoc &DL); |
| SDValue visitXOR(SDNode *N); |
| SDValue SimplifyVCastOp(SDNode *N, const SDLoc &DL); |
| SDValue SimplifyVBinOp(SDNode *N, const SDLoc &DL); |
| SDValue visitSHL(SDNode *N); |
| SDValue visitSRA(SDNode *N); |
| SDValue visitSRL(SDNode *N); |
| SDValue visitFunnelShift(SDNode *N); |
| SDValue visitSHLSAT(SDNode *N); |
| SDValue visitRotate(SDNode *N); |
| SDValue visitABS(SDNode *N); |
| SDValue visitBSWAP(SDNode *N); |
| SDValue visitBITREVERSE(SDNode *N); |
| SDValue visitCTLZ(SDNode *N); |
| SDValue visitCTLZ_ZERO_UNDEF(SDNode *N); |
| SDValue visitCTTZ(SDNode *N); |
| SDValue visitCTTZ_ZERO_UNDEF(SDNode *N); |
| SDValue visitCTPOP(SDNode *N); |
| SDValue visitSELECT(SDNode *N); |
| SDValue visitVSELECT(SDNode *N); |
| SDValue visitVP_SELECT(SDNode *N); |
| SDValue visitSELECT_CC(SDNode *N); |
| SDValue visitSETCC(SDNode *N); |
| SDValue visitSETCCCARRY(SDNode *N); |
| SDValue visitSIGN_EXTEND(SDNode *N); |
| SDValue visitZERO_EXTEND(SDNode *N); |
| SDValue visitANY_EXTEND(SDNode *N); |
| SDValue visitAssertExt(SDNode *N); |
| SDValue visitAssertAlign(SDNode *N); |
| SDValue visitSIGN_EXTEND_INREG(SDNode *N); |
| SDValue visitEXTEND_VECTOR_INREG(SDNode *N); |
| SDValue visitTRUNCATE(SDNode *N); |
| SDValue visitTRUNCATE_USAT_U(SDNode *N); |
| SDValue visitBITCAST(SDNode *N); |
| SDValue visitFREEZE(SDNode *N); |
| SDValue visitBUILD_PAIR(SDNode *N); |
| SDValue visitFADD(SDNode *N); |
| SDValue visitVP_FADD(SDNode *N); |
| SDValue visitVP_FSUB(SDNode *N); |
| SDValue visitSTRICT_FADD(SDNode *N); |
| SDValue visitFSUB(SDNode *N); |
| SDValue visitFMUL(SDNode *N); |
| template <class MatchContextClass> SDValue visitFMA(SDNode *N); |
| SDValue visitFMAD(SDNode *N); |
| SDValue visitFDIV(SDNode *N); |
| SDValue visitFREM(SDNode *N); |
| SDValue visitFSQRT(SDNode *N); |
| SDValue visitFCOPYSIGN(SDNode *N); |
| SDValue visitFPOW(SDNode *N); |
| SDValue visitFCANONICALIZE(SDNode *N); |
| SDValue visitSINT_TO_FP(SDNode *N); |
| SDValue visitUINT_TO_FP(SDNode *N); |
| SDValue visitFP_TO_SINT(SDNode *N); |
| SDValue visitFP_TO_UINT(SDNode *N); |
| SDValue visitXROUND(SDNode *N); |
| SDValue visitFP_ROUND(SDNode *N); |
| SDValue visitFP_EXTEND(SDNode *N); |
| SDValue visitFNEG(SDNode *N); |
| SDValue visitFABS(SDNode *N); |
| SDValue visitFCEIL(SDNode *N); |
| SDValue visitFTRUNC(SDNode *N); |
| SDValue visitFFREXP(SDNode *N); |
| SDValue visitFFLOOR(SDNode *N); |
| SDValue visitFMinMax(SDNode *N); |
| SDValue visitBRCOND(SDNode *N); |
| SDValue visitBR_CC(SDNode *N); |
| SDValue visitLOAD(SDNode *N); |
| |
| SDValue replaceStoreChain(StoreSDNode *ST, SDValue BetterChain); |
| SDValue replaceStoreOfFPConstant(StoreSDNode *ST); |
| SDValue replaceStoreOfInsertLoad(StoreSDNode *ST); |
| |
| bool refineExtractVectorEltIntoMultipleNarrowExtractVectorElts(SDNode *N); |
| |
| SDValue visitSTORE(SDNode *N); |
| SDValue visitATOMIC_STORE(SDNode *N); |
| SDValue visitLIFETIME_END(SDNode *N); |
| SDValue visitINSERT_VECTOR_ELT(SDNode *N); |
| SDValue visitEXTRACT_VECTOR_ELT(SDNode *N); |
| SDValue visitBUILD_VECTOR(SDNode *N); |
| SDValue visitCONCAT_VECTORS(SDNode *N); |
| SDValue visitEXTRACT_SUBVECTOR(SDNode *N); |
| SDValue visitVECTOR_SHUFFLE(SDNode *N); |
| SDValue visitSCALAR_TO_VECTOR(SDNode *N); |
| SDValue visitINSERT_SUBVECTOR(SDNode *N); |
| SDValue visitVECTOR_COMPRESS(SDNode *N); |
| SDValue visitMLOAD(SDNode *N); |
| SDValue visitMSTORE(SDNode *N); |
| SDValue visitMGATHER(SDNode *N); |
| SDValue visitMSCATTER(SDNode *N); |
| SDValue visitMHISTOGRAM(SDNode *N); |
| SDValue visitPARTIAL_REDUCE_MLA(SDNode *N); |
| SDValue visitVPGATHER(SDNode *N); |
| SDValue visitVPSCATTER(SDNode *N); |
| SDValue visitVP_STRIDED_LOAD(SDNode *N); |
| SDValue visitVP_STRIDED_STORE(SDNode *N); |
| SDValue visitFP_TO_FP16(SDNode *N); |
| SDValue visitFP16_TO_FP(SDNode *N); |
| SDValue visitFP_TO_BF16(SDNode *N); |
| SDValue visitBF16_TO_FP(SDNode *N); |
| SDValue visitVECREDUCE(SDNode *N); |
| SDValue visitVPOp(SDNode *N); |
| SDValue visitGET_FPENV_MEM(SDNode *N); |
| SDValue visitSET_FPENV_MEM(SDNode *N); |
| |
| template <class MatchContextClass> |
| SDValue visitFADDForFMACombine(SDNode *N); |
| template <class MatchContextClass> |
| SDValue visitFSUBForFMACombine(SDNode *N); |
| SDValue visitFMULForFMADistributiveCombine(SDNode *N); |
| |
| SDValue XformToShuffleWithZero(SDNode *N); |
| bool reassociationCanBreakAddressingModePattern(unsigned Opc, |
| const SDLoc &DL, |
| SDNode *N, |
| SDValue N0, |
| SDValue N1); |
| SDValue reassociateOpsCommutative(unsigned Opc, const SDLoc &DL, SDValue N0, |
| SDValue N1, SDNodeFlags Flags); |
| SDValue reassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0, |
| SDValue N1, SDNodeFlags Flags); |
| SDValue reassociateReduction(unsigned RedOpc, unsigned Opc, const SDLoc &DL, |
| EVT VT, SDValue N0, SDValue N1, |
| SDNodeFlags Flags = SDNodeFlags()); |
| |
| SDValue visitShiftByConstant(SDNode *N); |
| |
| SDValue foldSelectOfConstants(SDNode *N); |
| SDValue foldVSelectOfConstants(SDNode *N); |
| SDValue foldBinOpIntoSelect(SDNode *BO); |
| bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS); |
| SDValue hoistLogicOpWithSameOpcodeHands(SDNode *N); |
| SDValue SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2); |
| SDValue SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1, |
| SDValue N2, SDValue N3, ISD::CondCode CC, |
| bool NotExtCompare = false); |
| SDValue convertSelectOfFPConstantsToLoadOffset( |
| const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2, SDValue N3, |
| ISD::CondCode CC); |
| SDValue foldSignChangeInBitcast(SDNode *N); |
| SDValue foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, SDValue N1, |
| SDValue N2, SDValue N3, ISD::CondCode CC); |
| SDValue foldSelectOfBinops(SDNode *N); |
| SDValue foldSextSetcc(SDNode *N); |
| SDValue foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1, |
| const SDLoc &DL); |
| SDValue foldSubToUSubSat(EVT DstVT, SDNode *N, const SDLoc &DL); |
| SDValue foldABSToABD(SDNode *N, const SDLoc &DL); |
| SDValue foldSelectToABD(SDValue LHS, SDValue RHS, SDValue True, |
| SDValue False, ISD::CondCode CC, const SDLoc &DL); |
| SDValue unfoldMaskedMerge(SDNode *N); |
| SDValue unfoldExtremeBitClearingToShifts(SDNode *N); |
| SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, |
| const SDLoc &DL, bool foldBooleans); |
| SDValue rebuildSetCC(SDValue N); |
| |
| bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, |
| SDValue &CC, bool MatchStrict = false) const; |
| bool isOneUseSetCC(SDValue N) const; |
| |
| SDValue foldAddToAvg(SDNode *N, const SDLoc &DL); |
| SDValue foldSubToAvg(SDNode *N, const SDLoc &DL); |
| |
| SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, |
| unsigned HiOp); |
| SDValue CombineConsecutiveLoads(SDNode *N, EVT VT); |
| SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG, |
| const TargetLowering &TLI); |
| SDValue foldPartialReduceMLAMulOp(SDNode *N); |
| SDValue foldPartialReduceAdd(SDNode *N); |
| |
| SDValue CombineExtLoad(SDNode *N); |
| SDValue CombineZExtLogicopShiftLoad(SDNode *N); |
| SDValue combineRepeatedFPDivisors(SDNode *N); |
| SDValue combineFMulOrFDivWithIntPow2(SDNode *N); |
| SDValue replaceShuffleOfInsert(ShuffleVectorSDNode *Shuf); |
| SDValue mergeInsertEltWithShuffle(SDNode *N, unsigned InsIndex); |
| SDValue combineInsertEltToShuffle(SDNode *N, unsigned InsIndex); |
| SDValue combineInsertEltToLoad(SDNode *N, unsigned InsIndex); |
| SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT); |
| SDValue BuildSDIV(SDNode *N); |
| SDValue BuildSDIVPow2(SDNode *N); |
| SDValue BuildUDIV(SDNode *N); |
| SDValue BuildSREMPow2(SDNode *N); |
| SDValue buildOptimizedSREM(SDValue N0, SDValue N1, SDNode *N); |
| SDValue BuildLogBase2(SDValue V, const SDLoc &DL, |
| bool KnownNeverZero = false, |
| bool InexpensiveOnly = false, |
| std::optional<EVT> OutVT = std::nullopt); |
| SDValue BuildDivEstimate(SDValue N, SDValue Op, SDNodeFlags Flags); |
| SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags); |
| SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags Flags); |
| SDValue buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags, bool Recip); |
| SDValue buildSqrtNROneConst(SDValue Arg, SDValue Est, unsigned Iterations, |
| SDNodeFlags Flags, bool Reciprocal); |
| SDValue buildSqrtNRTwoConst(SDValue Arg, SDValue Est, unsigned Iterations, |
| SDNodeFlags Flags, bool Reciprocal); |
| SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, |
| bool DemandHighBits = true); |
| SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1); |
| SDValue MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg, |
| SDValue InnerPos, SDValue InnerNeg, bool FromAdd, |
| bool HasPos, unsigned PosOpcode, |
| unsigned NegOpcode, const SDLoc &DL); |
| SDValue MatchFunnelPosNeg(SDValue N0, SDValue N1, SDValue Pos, SDValue Neg, |
| SDValue InnerPos, SDValue InnerNeg, bool FromAdd, |
| bool HasPos, unsigned PosOpcode, |
| unsigned NegOpcode, const SDLoc &DL); |
| SDValue MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL, |
| bool FromAdd); |
| SDValue MatchLoadCombine(SDNode *N); |
| SDValue mergeTruncStores(StoreSDNode *N); |
| SDValue reduceLoadWidth(SDNode *N); |
| SDValue ReduceLoadOpStoreWidth(SDNode *N); |
| SDValue splitMergedValStore(StoreSDNode *ST); |
| SDValue TransformFPLoadStorePair(SDNode *N); |
| SDValue convertBuildVecZextToZext(SDNode *N); |
| SDValue convertBuildVecZextToBuildVecWithZeros(SDNode *N); |
| SDValue reduceBuildVecExtToExtBuildVec(SDNode *N); |
| SDValue reduceBuildVecTruncToBitCast(SDNode *N); |
| SDValue reduceBuildVecToShuffle(SDNode *N); |
| SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N, |
| ArrayRef<int> VectorMask, SDValue VecIn1, |
| SDValue VecIn2, unsigned LeftIdx, |
| bool DidSplitVec); |
| SDValue matchVSelectOpSizesWithSetCC(SDNode *Cast); |
| |
| /// Walk up chain skipping non-aliasing memory nodes, |
| /// looking for aliasing nodes and adding them to the Aliases vector. |
| void GatherAllAliases(SDNode *N, SDValue OriginalChain, |
| SmallVectorImpl<SDValue> &Aliases); |
| |
| /// Return true if there is any possibility that the two addresses overlap. |
| bool mayAlias(SDNode *Op0, SDNode *Op1) const; |
| |
| /// Walk up chain skipping non-aliasing memory nodes, looking for a better |
| /// chain (aliasing node.) |
| SDValue FindBetterChain(SDNode *N, SDValue Chain); |
| |
| /// Try to replace a store and any possibly adjacent stores on |
| /// consecutive chains with better chains. Return true only if St is |
| /// replaced. |
| /// |
| /// Notice that other chains may still be replaced even if the function |
| /// returns false. |
| bool findBetterNeighborChains(StoreSDNode *St); |
| |
| // Helper for findBetterNeighborChains. Walk up store chain add additional |
| // chained stores that do not overlap and can be parallelized. |
| bool parallelizeChainedStores(StoreSDNode *St); |
| |
| /// Holds a pointer to an LSBaseSDNode as well as information on where it |
| /// is located in a sequence of memory operations connected by a chain. |
| struct MemOpLink { |
| // Ptr to the mem node. |
| LSBaseSDNode *MemNode; |
| |
| // Offset from the base ptr. |
| int64_t OffsetFromBase; |
| |
| MemOpLink(LSBaseSDNode *N, int64_t Offset) |
| : MemNode(N), OffsetFromBase(Offset) {} |
| }; |
| |
| // Classify the origin of a stored value. |
| enum class StoreSource { Unknown, Constant, Extract, Load }; |
| StoreSource getStoreSource(SDValue StoreVal) { |
| switch (StoreVal.getOpcode()) { |
| case ISD::Constant: |
| case ISD::ConstantFP: |
| return StoreSource::Constant; |
| case ISD::BUILD_VECTOR: |
| if (ISD::isBuildVectorOfConstantSDNodes(StoreVal.getNode()) || |
| ISD::isBuildVectorOfConstantFPSDNodes(StoreVal.getNode())) |
| return StoreSource::Constant; |
| return StoreSource::Unknown; |
| case ISD::EXTRACT_VECTOR_ELT: |
| case ISD::EXTRACT_SUBVECTOR: |
| return StoreSource::Extract; |
| case ISD::LOAD: |
| return StoreSource::Load; |
| default: |
| return StoreSource::Unknown; |
| } |
| } |
| |
| /// This is a helper function for visitMUL to check the profitability |
| /// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). |
| /// MulNode is the original multiply, AddNode is (add x, c1), |
| /// and ConstNode is c2. |
| bool isMulAddWithConstProfitable(SDNode *MulNode, SDValue AddNode, |
| SDValue ConstNode); |
| |
| /// This is a helper function for visitAND and visitZERO_EXTEND. Returns |
| /// true if the (and (load x) c) pattern matches an extload. ExtVT returns |
| /// the type of the loaded value to be extended. |
| bool isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN, |
| EVT LoadResultTy, EVT &ExtVT); |
| |
| /// Helper function to calculate whether the given Load/Store can have its |
| /// width reduced to ExtVT. |
| bool isLegalNarrowLdSt(LSBaseSDNode *LDSTN, ISD::LoadExtType ExtType, |
| EVT &MemVT, unsigned ShAmt = 0); |
| |
| /// Used by BackwardsPropagateMask to find suitable loads. |
| bool SearchForAndLoads(SDNode *N, SmallVectorImpl<LoadSDNode*> &Loads, |
| SmallPtrSetImpl<SDNode*> &NodesWithConsts, |
| ConstantSDNode *Mask, SDNode *&NodeToMask); |
| /// Attempt to propagate a given AND node back to load leaves so that they |
| /// can be combined into narrow loads. |
| bool BackwardsPropagateMask(SDNode *N); |
| |
| /// Helper function for mergeConsecutiveStores which merges the component |
| /// store chains. |
| SDValue getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes, |
| unsigned NumStores); |
| |
| /// Helper function for mergeConsecutiveStores which checks if all the store |
| /// nodes have the same underlying object. We can still reuse the first |
| /// store's pointer info if all the stores are from the same object. |
| bool hasSameUnderlyingObj(ArrayRef<MemOpLink> StoreNodes); |
| |
| /// This is a helper function for mergeConsecutiveStores. When the source |
| /// elements of the consecutive stores are all constants or all extracted |
| /// vector elements, try to merge them into one larger store introducing |
| /// bitcasts if necessary. \return True if a merged store was created. |
| bool mergeStoresOfConstantsOrVecElts(SmallVectorImpl<MemOpLink> &StoreNodes, |
| EVT MemVT, unsigned NumStores, |
| bool IsConstantSrc, bool UseVector, |
| bool UseTrunc); |
| |
| /// This is a helper function for mergeConsecutiveStores. Stores that |
| /// potentially may be merged with St are placed in StoreNodes. On success, |
| /// returns a chain predecessor to all store candidates. |
| SDNode *getStoreMergeCandidates(StoreSDNode *St, |
| SmallVectorImpl<MemOpLink> &StoreNodes); |
| |
| /// Helper function for mergeConsecutiveStores. Checks if candidate stores |
| /// have indirect dependency through their operands. RootNode is the |
| /// predecessor to all stores calculated by getStoreMergeCandidates and is |
| /// used to prune the dependency check. \return True if safe to merge. |
| bool checkMergeStoreCandidatesForDependencies( |
| SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores, |
| SDNode *RootNode); |
| |
| /// Helper function for tryStoreMergeOfLoads. Checks if the load/store |
| /// chain has a call in it. \return True if a call is found. |
| bool hasCallInLdStChain(StoreSDNode *St, LoadSDNode *Ld); |
| |
| /// This is a helper function for mergeConsecutiveStores. Given a list of |
| /// store candidates, find the first N that are consecutive in memory. |
| /// Returns 0 if there are not at least 2 consecutive stores to try merging. |
| unsigned getConsecutiveStores(SmallVectorImpl<MemOpLink> &StoreNodes, |
| int64_t ElementSizeBytes) const; |
| |
| /// This is a helper function for mergeConsecutiveStores. It is used for |
| /// store chains that are composed entirely of constant values. |
| bool tryStoreMergeOfConstants(SmallVectorImpl<MemOpLink> &StoreNodes, |
| unsigned NumConsecutiveStores, |
| EVT MemVT, SDNode *Root, bool AllowVectors); |
| |
| /// This is a helper function for mergeConsecutiveStores. It is used for |
| /// store chains that are composed entirely of extracted vector elements. |
| /// When extracting multiple vector elements, try to store them in one |
| /// vector store rather than a sequence of scalar stores. |
| bool tryStoreMergeOfExtracts(SmallVectorImpl<MemOpLink> &StoreNodes, |
| unsigned NumConsecutiveStores, EVT MemVT, |
| SDNode *Root); |
| |
| /// This is a helper function for mergeConsecutiveStores. It is used for |
| /// store chains that are composed entirely of loaded values. |
| bool tryStoreMergeOfLoads(SmallVectorImpl<MemOpLink> &StoreNodes, |
| unsigned NumConsecutiveStores, EVT MemVT, |
| SDNode *Root, bool AllowVectors, |
| bool IsNonTemporalStore, bool IsNonTemporalLoad); |
| |
| /// Merge consecutive store operations into a wide store. |
| /// This optimization uses wide integers or vectors when possible. |
| /// \return true if stores were merged. |
| bool mergeConsecutiveStores(StoreSDNode *St); |
| |
| /// Try to transform a truncation where C is a constant: |
| /// (trunc (and X, C)) -> (and (trunc X), (trunc C)) |
| /// |
| /// \p N needs to be a truncation and its first operand an AND. Other |
| /// requirements are checked by the function (e.g. that trunc is |
| /// single-use) and if missed an empty SDValue is returned. |
| SDValue distributeTruncateThroughAnd(SDNode *N); |
| |
| /// Helper function to determine whether the target supports operation |
| /// given by \p Opcode for type \p VT, that is, whether the operation |
| /// is legal or custom before legalizing operations, and whether is |
| /// legal (but not custom) after legalization. |
| bool hasOperation(unsigned Opcode, EVT VT) { |
| return TLI.isOperationLegalOrCustom(Opcode, VT, LegalOperations); |
| } |
| |
| bool hasUMin(EVT VT) const { |
| auto LK = TLI.getTypeConversion(*DAG.getContext(), VT); |
| return (LK.first == TargetLoweringBase::TypeLegal || |
| LK.first == TargetLoweringBase::TypePromoteInteger) && |
| TLI.isOperationLegal(ISD::UMIN, LK.second); |
| } |
| |
| public: |
| /// Runs the dag combiner on all nodes in the work list |
| void Run(CombineLevel AtLevel); |
| |
| SelectionDAG &getDAG() const { return DAG; } |
| |
| /// Convenience wrapper around TargetLowering::getShiftAmountTy. |
| EVT getShiftAmountTy(EVT LHSTy) { |
| return TLI.getShiftAmountTy(LHSTy, DAG.getDataLayout()); |
| } |
| |
| /// This method returns true if we are running before type legalization or |
| /// if the specified VT is legal. |
| bool isTypeLegal(const EVT &VT) { |
| if (!LegalTypes) return true; |
| return TLI.isTypeLegal(VT); |
| } |
| |
| /// Convenience wrapper around TargetLowering::getSetCCResultType |
| EVT getSetCCResultType(EVT VT) const { |
| return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); |
| } |
| |
| void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs, |
| SDValue OrigLoad, SDValue ExtLoad, |
| ISD::NodeType ExtType); |
| }; |
| |
| /// This class is a DAGUpdateListener that removes any deleted |
| /// nodes from the worklist. |
| class WorklistRemover : public SelectionDAG::DAGUpdateListener { |
| DAGCombiner &DC; |
| |
| public: |
| explicit WorklistRemover(DAGCombiner &dc) |
| : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {} |
| |
| void NodeDeleted(SDNode *N, SDNode *E) override { |
| DC.removeFromWorklist(N); |
| } |
| }; |
| |
| class WorklistInserter : public SelectionDAG::DAGUpdateListener { |
| DAGCombiner &DC; |
| |
| public: |
| explicit WorklistInserter(DAGCombiner &dc) |
| : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {} |
| |
| // FIXME: Ideally we could add N to the worklist, but this causes exponential |
| // compile time costs in large DAGs, e.g. Halide. |
| void NodeInserted(SDNode *N) override { DC.ConsiderForPruning(N); } |
| }; |
| |
| } // end anonymous namespace |
| |
| //===----------------------------------------------------------------------===// |
| // TargetLowering::DAGCombinerInfo implementation |
| //===----------------------------------------------------------------------===// |
| |
| void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) { |
| ((DAGCombiner*)DC)->AddToWorklist(N); |
| } |
| |
| SDValue TargetLowering::DAGCombinerInfo:: |
| CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo) { |
| return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo); |
| } |
| |
| SDValue TargetLowering::DAGCombinerInfo:: |
| CombineTo(SDNode *N, SDValue Res, bool AddTo) { |
| return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo); |
| } |
| |
| SDValue TargetLowering::DAGCombinerInfo:: |
| CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) { |
| return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo); |
| } |
| |
| bool TargetLowering::DAGCombinerInfo:: |
| recursivelyDeleteUnusedNodes(SDNode *N) { |
| return ((DAGCombiner*)DC)->recursivelyDeleteUnusedNodes(N); |
| } |
| |
| void TargetLowering::DAGCombinerInfo:: |
| CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { |
| return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO); |
| } |
| |
| //===----------------------------------------------------------------------===// |
| // Helper Functions |
| //===----------------------------------------------------------------------===// |
| |
| void DAGCombiner::deleteAndRecombine(SDNode *N) { |
| removeFromWorklist(N); |
| |
| // If the operands of this node are only used by the node, they will now be |
| // dead. Make sure to re-visit them and recursively delete dead nodes. |
| for (const SDValue &Op : N->ops()) |
| // For an operand generating multiple values, one of the values may |
| // become dead allowing further simplification (e.g. split index |
| // arithmetic from an indexed load). |
| if (Op->hasOneUse() || Op->getNumValues() > 1) |
| AddToWorklist(Op.getNode()); |
| |
| DAG.DeleteNode(N); |
| } |
| |
| // APInts must be the same size for most operations, this helper |
| // function zero extends the shorter of the pair so that they match. |
| // We provide an Offset so that we can create bitwidths that won't overflow. |
| static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) { |
| unsigned Bits = Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth()); |
| LHS = LHS.zext(Bits); |
| RHS = RHS.zext(Bits); |
| } |
| |
| // Return true if this node is a setcc, or is a select_cc |
| // that selects between the target values used for true and false, making it |
| // equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to |
| // the appropriate nodes based on the type of node we are checking. This |
| // simplifies life a bit for the callers. |
| bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, |
| SDValue &CC, bool MatchStrict) const { |
| if (N.getOpcode() == ISD::SETCC) { |
| LHS = N.getOperand(0); |
| RHS = N.getOperand(1); |
| CC = N.getOperand(2); |
| return true; |
| } |
| |
| if (MatchStrict && |
| (N.getOpcode() == ISD::STRICT_FSETCC || |
| N.getOpcode() == ISD::STRICT_FSETCCS)) { |
| LHS = N.getOperand(1); |
| RHS = N.getOperand(2); |
| CC = N.getOperand(3); |
| return true; |
| } |
| |
| if (N.getOpcode() != ISD::SELECT_CC || !TLI.isConstTrueVal(N.getOperand(2)) || |
| !TLI.isConstFalseVal(N.getOperand(3))) |
| return false; |
| |
| if (TLI.getBooleanContents(N.getValueType()) == |
| TargetLowering::UndefinedBooleanContent) |
| return false; |
| |
| LHS = N.getOperand(0); |
| RHS = N.getOperand(1); |
| CC = N.getOperand(4); |
| return true; |
| } |
| |
| /// Return true if this is a SetCC-equivalent operation with only one use. |
| /// If this is true, it allows the users to invert the operation for free when |
| /// it is profitable to do so. |
| bool DAGCombiner::isOneUseSetCC(SDValue N) const { |
| SDValue N0, N1, N2; |
| if (isSetCCEquivalent(N, N0, N1, N2) && N->hasOneUse()) |
| return true; |
| return false; |
| } |
| |
| static bool isConstantSplatVectorMaskForType(SDNode *N, EVT ScalarTy) { |
| if (!ScalarTy.isSimple()) |
| return false; |
| |
| uint64_t MaskForTy = 0ULL; |
| switch (ScalarTy.getSimpleVT().SimpleTy) { |
| case MVT::i8: |
| MaskForTy = 0xFFULL; |
| break; |
| case MVT::i16: |
| MaskForTy = 0xFFFFULL; |
| break; |
| case MVT::i32: |
| MaskForTy = 0xFFFFFFFFULL; |
| break; |
| default: |
| return false; |
| break; |
| } |
| |
| APInt Val; |
| if (ISD::isConstantSplatVector(N, Val)) |
| return Val.getLimitedValue() == MaskForTy; |
| |
| return false; |
| } |
| |
| // Determines if it is a constant integer or a splat/build vector of constant |
| // integers (and undefs). |
| // Do not permit build vector implicit truncation. |
| static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) { |
| if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N)) |
| return !(Const->isOpaque() && NoOpaques); |
| if (N.getOpcode() != ISD::BUILD_VECTOR && N.getOpcode() != ISD::SPLAT_VECTOR) |
| return false; |
| unsigned BitWidth = N.getScalarValueSizeInBits(); |
| for (const SDValue &Op : N->op_values()) { |
| if (Op.isUndef()) |
| continue; |
| ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Op); |
| if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth || |
| (Const->isOpaque() && NoOpaques)) |
| return false; |
| } |
| return true; |
| } |
| |
| // Determines if a BUILD_VECTOR is composed of all-constants possibly mixed with |
| // undef's. |
| static bool isAnyConstantBuildVector(SDValue V, bool NoOpaques = false) { |
| if (V.getOpcode() != ISD::BUILD_VECTOR) |
| return false; |
| return isConstantOrConstantVector(V, NoOpaques) || |
| ISD::isBuildVectorOfConstantFPSDNodes(V.getNode()); |
| } |
| |
| // Determine if this an indexed load with an opaque target constant index. |
| static bool canSplitIdx(LoadSDNode *LD) { |
| return MaySplitLoadIndex && |
| (LD->getOperand(2).getOpcode() != ISD::TargetConstant || |
| !cast<ConstantSDNode>(LD->getOperand(2))->isOpaque()); |
| } |
| |
| bool DAGCombiner::reassociationCanBreakAddressingModePattern(unsigned Opc, |
| const SDLoc &DL, |
| SDNode *N, |
| SDValue N0, |
| SDValue N1) { |
| // Currently this only tries to ensure we don't undo the GEP splits done by |
| // CodeGenPrepare when shouldConsiderGEPOffsetSplit is true. To ensure this, |
| // we check if the following transformation would be problematic: |
| // (load/store (add, (add, x, offset1), offset2)) -> |
| // (load/store (add, x, offset1+offset2)). |
| |
| // (load/store (add, (add, x, y), offset2)) -> |
| // (load/store (add, (add, x, offset2), y)). |
| |
| if (!N0.isAnyAdd()) |
| return false; |
| |
| // Check for vscale addressing modes. |
| // (load/store (add/sub (add x, y), vscale)) |
| // (load/store (add/sub (add x, y), (lsl vscale, C))) |
| // (load/store (add/sub (add x, y), (mul vscale, C))) |
| if ((N1.getOpcode() == ISD::VSCALE || |
| ((N1.getOpcode() == ISD::SHL || N1.getOpcode() == ISD::MUL) && |
| N1.getOperand(0).getOpcode() == ISD::VSCALE && |
| isa<ConstantSDNode>(N1.getOperand(1)))) && |
| N1.getValueType().getFixedSizeInBits() <= 64) { |
| int64_t ScalableOffset = N1.getOpcode() == ISD::VSCALE |
| ? N1.getConstantOperandVal(0) |
| : (N1.getOperand(0).getConstantOperandVal(0) * |
| (N1.getOpcode() == ISD::SHL |
| ? (1LL << N1.getConstantOperandVal(1)) |
| : N1.getConstantOperandVal(1))); |
| if (Opc == ISD::SUB) |
| ScalableOffset = -ScalableOffset; |
| if (all_of(N->users(), [&](SDNode *Node) { |
| if (auto *LoadStore = dyn_cast<MemSDNode>(Node); |
| LoadStore && LoadStore->getBasePtr().getNode() == N) { |
| TargetLoweringBase::AddrMode AM; |
| AM.HasBaseReg = true; |
| AM.ScalableOffset = ScalableOffset; |
| EVT VT = LoadStore->getMemoryVT(); |
| unsigned AS = LoadStore->getAddressSpace(); |
| Type *AccessTy = VT.getTypeForEVT(*DAG.getContext()); |
| return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, |
| AS); |
| } |
| return false; |
| })) |
| return true; |
| } |
| |
| if (Opc != ISD::ADD) |
| return false; |
| |
| auto *C2 = dyn_cast<ConstantSDNode>(N1); |
| if (!C2) |
| return false; |
| |
| const APInt &C2APIntVal = C2->getAPIntValue(); |
| if (C2APIntVal.getSignificantBits() > 64) |
| return false; |
| |
| if (auto *C1 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { |
| if (N0.hasOneUse()) |
| return false; |
| |
| const APInt &C1APIntVal = C1->getAPIntValue(); |
| const APInt CombinedValueIntVal = C1APIntVal + C2APIntVal; |
| if (CombinedValueIntVal.getSignificantBits() > 64) |
| return false; |
| const int64_t CombinedValue = CombinedValueIntVal.getSExtValue(); |
| |
| for (SDNode *Node : N->users()) { |
| if (auto *LoadStore = dyn_cast<MemSDNode>(Node)) { |
| // Is x[offset2] already not a legal addressing mode? If so then |
| // reassociating the constants breaks nothing (we test offset2 because |
| // that's the one we hope to fold into the load or store). |
| TargetLoweringBase::AddrMode AM; |
| AM.HasBaseReg = true; |
| AM.BaseOffs = C2APIntVal.getSExtValue(); |
| EVT VT = LoadStore->getMemoryVT(); |
| unsigned AS = LoadStore->getAddressSpace(); |
| Type *AccessTy = VT.getTypeForEVT(*DAG.getContext()); |
| if (!TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS)) |
| continue; |
| |
| // Would x[offset1+offset2] still be a legal addressing mode? |
| AM.BaseOffs = CombinedValue; |
| if (!TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS)) |
| return true; |
| } |
| } |
| } else { |
| if (auto *GA = dyn_cast<GlobalAddressSDNode>(N0.getOperand(1))) |
| if (GA->getOpcode() == ISD::GlobalAddress && TLI.isOffsetFoldingLegal(GA)) |
| return false; |
| |
| for (SDNode *Node : N->users()) { |
| auto *LoadStore = dyn_cast<MemSDNode>(Node); |
| if (!LoadStore) |
| return false; |
| |
| // Is x[offset2] a legal addressing mode? If so then |
| // reassociating the constants breaks address pattern |
| TargetLoweringBase::AddrMode AM; |
| AM.HasBaseReg = true; |
| AM.BaseOffs = C2APIntVal.getSExtValue(); |
| EVT VT = LoadStore->getMemoryVT(); |
| unsigned AS = LoadStore->getAddressSpace(); |
| Type *AccessTy = VT.getTypeForEVT(*DAG.getContext()); |
| if (!TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS)) |
| return false; |
| } |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /// Helper for DAGCombiner::reassociateOps. Try to reassociate (Opc N0, N1) if |
| /// \p N0 is the same kind of operation as \p Opc. |
| SDValue DAGCombiner::reassociateOpsCommutative(unsigned Opc, const SDLoc &DL, |
| SDValue N0, SDValue N1, |
| SDNodeFlags Flags) { |
| EVT VT = N0.getValueType(); |
| |
| if (N0.getOpcode() != Opc) |
| return SDValue(); |
| |
| SDValue N00 = N0.getOperand(0); |
| SDValue N01 = N0.getOperand(1); |
| |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N01)) { |
| SDNodeFlags NewFlags; |
| if (N0.getOpcode() == ISD::ADD && N0->getFlags().hasNoUnsignedWrap() && |
| Flags.hasNoUnsignedWrap()) |
| NewFlags |= SDNodeFlags::NoUnsignedWrap; |
| |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N1)) { |
| // Reassociate: (op (op x, c1), c2) -> (op x, (op c1, c2)) |
| if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, {N01, N1})) { |
| NewFlags.setDisjoint(Flags.hasDisjoint() && |
| N0->getFlags().hasDisjoint()); |
| return DAG.getNode(Opc, DL, VT, N00, OpNode, NewFlags); |
| } |
| return SDValue(); |
| } |
| if (TLI.isReassocProfitable(DAG, N0, N1)) { |
| // Reassociate: (op (op x, c1), y) -> (op (op x, y), c1) |
| // iff (op x, c1) has one use |
| SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N00, N1, NewFlags); |
| return DAG.getNode(Opc, DL, VT, OpNode, N01, NewFlags); |
| } |
| } |
| |
| // Check for repeated operand logic simplifications. |
| if (Opc == ISD::AND || Opc == ISD::OR) { |
| // (N00 & N01) & N00 --> N00 & N01 |
| // (N00 & N01) & N01 --> N00 & N01 |
| // (N00 | N01) | N00 --> N00 | N01 |
| // (N00 | N01) | N01 --> N00 | N01 |
| if (N1 == N00 || N1 == N01) |
| return N0; |
| } |
| if (Opc == ISD::XOR) { |
| // (N00 ^ N01) ^ N00 --> N01 |
| if (N1 == N00) |
| return N01; |
| // (N00 ^ N01) ^ N01 --> N00 |
| if (N1 == N01) |
| return N00; |
| } |
| |
| if (TLI.isReassocProfitable(DAG, N0, N1)) { |
| if (N1 != N01) { |
| // Reassociate if (op N00, N1) already exist |
| if (SDNode *NE = DAG.getNodeIfExists(Opc, DAG.getVTList(VT), {N00, N1})) { |
| // if Op (Op N00, N1), N01 already exist |
| // we need to stop reassciate to avoid dead loop |
| if (!DAG.doesNodeExist(Opc, DAG.getVTList(VT), {SDValue(NE, 0), N01})) |
| return DAG.getNode(Opc, DL, VT, SDValue(NE, 0), N01); |
| } |
| } |
| |
| if (N1 != N00) { |
| // Reassociate if (op N01, N1) already exist |
| if (SDNode *NE = DAG.getNodeIfExists(Opc, DAG.getVTList(VT), {N01, N1})) { |
| // if Op (Op N01, N1), N00 already exist |
| // we need to stop reassciate to avoid dead loop |
| if (!DAG.doesNodeExist(Opc, DAG.getVTList(VT), {SDValue(NE, 0), N00})) |
| return DAG.getNode(Opc, DL, VT, SDValue(NE, 0), N00); |
| } |
| } |
| |
| // Reassociate the operands from (OR/AND (OR/AND(N00, N001)), N1) to (OR/AND |
| // (OR/AND(N00, N1)), N01) when N00 and N1 are comparisons with the same |
| // predicate or to (OR/AND (OR/AND(N1, N01)), N00) when N01 and N1 are |
| // comparisons with the same predicate. This enables optimizations as the |
| // following one: |
| // CMP(A,C)||CMP(B,C) => CMP(MIN/MAX(A,B), C) |
| // CMP(A,C)&&CMP(B,C) => CMP(MIN/MAX(A,B), C) |
| if (Opc == ISD::AND || Opc == ISD::OR) { |
| if (N1->getOpcode() == ISD::SETCC && N00->getOpcode() == ISD::SETCC && |
| N01->getOpcode() == ISD::SETCC) { |
| ISD::CondCode CC1 = cast<CondCodeSDNode>(N1.getOperand(2))->get(); |
| ISD::CondCode CC00 = cast<CondCodeSDNode>(N00.getOperand(2))->get(); |
| ISD::CondCode CC01 = cast<CondCodeSDNode>(N01.getOperand(2))->get(); |
| if (CC1 == CC00 && CC1 != CC01) { |
| SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N00, N1, Flags); |
| return DAG.getNode(Opc, DL, VT, OpNode, N01, Flags); |
| } |
| if (CC1 == CC01 && CC1 != CC00) { |
| SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N01, N1, Flags); |
| return DAG.getNode(Opc, DL, VT, OpNode, N00, Flags); |
| } |
| } |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| /// Try to reassociate commutative (Opc N0, N1) if either \p N0 or \p N1 is the |
| /// same kind of operation as \p Opc. |
| SDValue DAGCombiner::reassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0, |
| SDValue N1, SDNodeFlags Flags) { |
| assert(TLI.isCommutativeBinOp(Opc) && "Operation not commutative."); |
| |
| // Floating-point reassociation is not allowed without loose FP math. |
| if (N0.getValueType().isFloatingPoint() || |
| N1.getValueType().isFloatingPoint()) |
| if (!Flags.hasAllowReassociation() || !Flags.hasNoSignedZeros()) |
| return SDValue(); |
| |
| if (SDValue Combined = reassociateOpsCommutative(Opc, DL, N0, N1, Flags)) |
| return Combined; |
| if (SDValue Combined = reassociateOpsCommutative(Opc, DL, N1, N0, Flags)) |
| return Combined; |
| return SDValue(); |
| } |
| |
| // Try to fold Opc(vecreduce(x), vecreduce(y)) -> vecreduce(Opc(x, y)) |
| // Note that we only expect Flags to be passed from FP operations. For integer |
| // operations they need to be dropped. |
| SDValue DAGCombiner::reassociateReduction(unsigned RedOpc, unsigned Opc, |
| const SDLoc &DL, EVT VT, SDValue N0, |
| SDValue N1, SDNodeFlags Flags) { |
| if (N0.getOpcode() == RedOpc && N1.getOpcode() == RedOpc && |
| N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() && |
| N0->hasOneUse() && N1->hasOneUse() && |
| TLI.isOperationLegalOrCustom(Opc, N0.getOperand(0).getValueType()) && |
| TLI.shouldReassociateReduction(RedOpc, N0.getOperand(0).getValueType())) { |
| SelectionDAG::FlagInserter FlagsInserter(DAG, Flags); |
| return DAG.getNode(RedOpc, DL, VT, |
| DAG.getNode(Opc, DL, N0.getOperand(0).getValueType(), |
| N0.getOperand(0), N1.getOperand(0))); |
| } |
| |
| // Reassociate op(op(vecreduce(a), b), op(vecreduce(c), d)) into |
| // op(vecreduce(op(a, c)), op(b, d)), to combine the reductions into a |
| // single node. |
| SDValue A, B, C, D, RedA, RedB; |
| if (sd_match(N0, m_OneUse(m_c_BinOp( |
| Opc, |
| m_AllOf(m_OneUse(m_UnaryOp(RedOpc, m_Value(A))), |
| m_Value(RedA)), |
| m_Value(B)))) && |
| sd_match(N1, m_OneUse(m_c_BinOp( |
| Opc, |
| m_AllOf(m_OneUse(m_UnaryOp(RedOpc, m_Value(C))), |
| m_Value(RedB)), |
| m_Value(D)))) && |
| !sd_match(B, m_UnaryOp(RedOpc, m_Value())) && |
| !sd_match(D, m_UnaryOp(RedOpc, m_Value())) && |
| A.getValueType() == C.getValueType() && |
| hasOperation(Opc, A.getValueType()) && |
| TLI.shouldReassociateReduction(RedOpc, VT)) { |
| if ((Opc == ISD::FADD || Opc == ISD::FMUL) && |
| (!N0->getFlags().hasAllowReassociation() || |
| !N1->getFlags().hasAllowReassociation() || |
| !RedA->getFlags().hasAllowReassociation() || |
| !RedB->getFlags().hasAllowReassociation())) |
| return SDValue(); |
| SelectionDAG::FlagInserter FlagsInserter( |
| DAG, Flags & N0->getFlags() & N1->getFlags() & RedA->getFlags() & |
| RedB->getFlags()); |
| SDValue Op = DAG.getNode(Opc, DL, A.getValueType(), A, C); |
| SDValue Red = DAG.getNode(RedOpc, DL, VT, Op); |
| SDValue Op2 = DAG.getNode(Opc, DL, VT, B, D); |
| return DAG.getNode(Opc, DL, VT, Red, Op2); |
| } |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, |
| bool AddTo) { |
| assert(N->getNumValues() == NumTo && "Broken CombineTo call!"); |
| ++NodesCombined; |
| LLVM_DEBUG(dbgs() << "\nReplacing.1 "; N->dump(&DAG); dbgs() << "\nWith: "; |
| To[0].dump(&DAG); |
| dbgs() << " and " << NumTo - 1 << " other values\n"); |
| for (unsigned i = 0, e = NumTo; i != e; ++i) |
| assert((!To[i].getNode() || |
| N->getValueType(i) == To[i].getValueType()) && |
| "Cannot combine value to value of different type!"); |
| |
| WorklistRemover DeadNodes(*this); |
| DAG.ReplaceAllUsesWith(N, To); |
| if (AddTo) { |
| // Push the new nodes and any users onto the worklist |
| for (unsigned i = 0, e = NumTo; i != e; ++i) { |
| if (To[i].getNode()) |
| AddToWorklistWithUsers(To[i].getNode()); |
| } |
| } |
| |
| // Finally, if the node is now dead, remove it from the graph. The node |
| // may not be dead if the replacement process recursively simplified to |
| // something else needing this node. |
| if (N->use_empty()) |
| deleteAndRecombine(N); |
| return SDValue(N, 0); |
| } |
| |
| void DAGCombiner:: |
| CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { |
| // Replace the old value with the new one. |
| ++NodesCombined; |
| LLVM_DEBUG(dbgs() << "\nReplacing.2 "; TLO.Old.dump(&DAG); |
| dbgs() << "\nWith: "; TLO.New.dump(&DAG); dbgs() << '\n'); |
| |
| // Replace all uses. |
| DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New); |
| |
| // Push the new node and any (possibly new) users onto the worklist. |
| AddToWorklistWithUsers(TLO.New.getNode()); |
| |
| // Finally, if the node is now dead, remove it from the graph. |
| recursivelyDeleteUnusedNodes(TLO.Old.getNode()); |
| } |
| |
| /// Check the specified integer node value to see if it can be simplified or if |
| /// things it uses can be simplified by bit propagation. If so, return true. |
| bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, |
| const APInt &DemandedElts, |
| bool AssumeSingleUse) { |
| TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations); |
| KnownBits Known; |
| if (!TLI.SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, 0, |
| AssumeSingleUse)) |
| return false; |
| |
| // Revisit the node. |
| AddToWorklist(Op.getNode()); |
| |
| CommitTargetLoweringOpt(TLO); |
| return true; |
| } |
| |
| /// Check the specified vector node value to see if it can be simplified or |
| /// if things it uses can be simplified as it only uses some of the elements. |
| /// If so, return true. |
| bool DAGCombiner::SimplifyDemandedVectorElts(SDValue Op, |
| const APInt &DemandedElts, |
| bool AssumeSingleUse) { |
| TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations); |
| APInt KnownUndef, KnownZero; |
| if (!TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, |
| TLO, 0, AssumeSingleUse)) |
| return false; |
| |
| // Revisit the node. |
| AddToWorklist(Op.getNode()); |
| |
| CommitTargetLoweringOpt(TLO); |
| return true; |
| } |
| |
| void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) { |
| SDLoc DL(Load); |
| EVT VT = Load->getValueType(0); |
| SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, SDValue(ExtLoad, 0)); |
| |
| LLVM_DEBUG(dbgs() << "\nReplacing.9 "; Load->dump(&DAG); dbgs() << "\nWith: "; |
| Trunc.dump(&DAG); dbgs() << '\n'); |
| |
| DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc); |
| DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1)); |
| |
| AddToWorklist(Trunc.getNode()); |
| recursivelyDeleteUnusedNodes(Load); |
| } |
| |
| SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) { |
| Replace = false; |
| SDLoc DL(Op); |
| if (ISD::isUNINDEXEDLoad(Op.getNode())) { |
| LoadSDNode *LD = cast<LoadSDNode>(Op); |
| EVT MemVT = LD->getMemoryVT(); |
| ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? ISD::EXTLOAD |
| : LD->getExtensionType(); |
| Replace = true; |
| return DAG.getExtLoad(ExtType, DL, PVT, |
| LD->getChain(), LD->getBasePtr(), |
| MemVT, LD->getMemOperand()); |
| } |
| |
| unsigned Opc = Op.getOpcode(); |
| switch (Opc) { |
| default: break; |
| case ISD::AssertSext: |
| if (SDValue Op0 = SExtPromoteOperand(Op.getOperand(0), PVT)) |
| return DAG.getNode(ISD::AssertSext, DL, PVT, Op0, Op.getOperand(1)); |
| break; |
| case ISD::AssertZext: |
| if (SDValue Op0 = ZExtPromoteOperand(Op.getOperand(0), PVT)) |
| return DAG.getNode(ISD::AssertZext, DL, PVT, Op0, Op.getOperand(1)); |
| break; |
| case ISD::Constant: { |
| unsigned ExtOpc = |
| Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; |
| return DAG.getNode(ExtOpc, DL, PVT, Op); |
| } |
| } |
| |
| if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT)) |
| return SDValue(); |
| return DAG.getNode(ISD::ANY_EXTEND, DL, PVT, Op); |
| } |
| |
| SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) { |
| if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT)) |
| return SDValue(); |
| EVT OldVT = Op.getValueType(); |
| SDLoc DL(Op); |
| bool Replace = false; |
| SDValue NewOp = PromoteOperand(Op, PVT, Replace); |
| if (!NewOp.getNode()) |
| return SDValue(); |
| AddToWorklist(NewOp.getNode()); |
| |
| if (Replace) |
| ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); |
| return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, NewOp.getValueType(), NewOp, |
| DAG.getValueType(OldVT)); |
| } |
| |
| SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) { |
| EVT OldVT = Op.getValueType(); |
| SDLoc DL(Op); |
| bool Replace = false; |
| SDValue NewOp = PromoteOperand(Op, PVT, Replace); |
| if (!NewOp.getNode()) |
| return SDValue(); |
| AddToWorklist(NewOp.getNode()); |
| |
| if (Replace) |
| ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); |
| return DAG.getZeroExtendInReg(NewOp, DL, OldVT); |
| } |
| |
| /// Promote the specified integer binary operation if the target indicates it is |
| /// beneficial. e.g. On x86, it's usually better to promote i16 operations to |
| /// i32 since i16 instructions are longer. |
| SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) { |
| if (!LegalOperations) |
| return SDValue(); |
| |
| EVT VT = Op.getValueType(); |
| if (VT.isVector() || !VT.isInteger()) |
| return SDValue(); |
| |
| // If operation type is 'undesirable', e.g. i16 on x86, consider |
| // promoting it. |
| unsigned Opc = Op.getOpcode(); |
| if (TLI.isTypeDesirableForOp(Opc, VT)) |
| return SDValue(); |
| |
| EVT PVT = VT; |
| // Consult target whether it is a good idea to promote this operation and |
| // what's the right type to promote it to. |
| if (TLI.IsDesirableToPromoteOp(Op, PVT)) { |
| assert(PVT != VT && "Don't know what type to promote to!"); |
| |
| LLVM_DEBUG(dbgs() << "\nPromoting "; Op.dump(&DAG)); |
| |
| bool Replace0 = false; |
| SDValue N0 = Op.getOperand(0); |
| SDValue NN0 = PromoteOperand(N0, PVT, Replace0); |
| |
| bool Replace1 = false; |
| SDValue N1 = Op.getOperand(1); |
| SDValue NN1 = PromoteOperand(N1, PVT, Replace1); |
| SDLoc DL(Op); |
| |
| SDValue RV = |
| DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, NN0, NN1)); |
| |
| // We are always replacing N0/N1's use in N and only need additional |
| // replacements if there are additional uses. |
| // Note: We are checking uses of the *nodes* (SDNode) rather than values |
| // (SDValue) here because the node may reference multiple values |
| // (for example, the chain value of a load node). |
| Replace0 &= !N0->hasOneUse(); |
| Replace1 &= (N0 != N1) && !N1->hasOneUse(); |
| |
| // Combine Op here so it is preserved past replacements. |
| CombineTo(Op.getNode(), RV); |
| |
| // If operands have a use ordering, make sure we deal with |
| // predecessor first. |
| if (Replace0 && Replace1 && N0->isPredecessorOf(N1.getNode())) { |
| std::swap(N0, N1); |
| std::swap(NN0, NN1); |
| } |
| |
| if (Replace0) { |
| AddToWorklist(NN0.getNode()); |
| ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode()); |
| } |
| if (Replace1) { |
| AddToWorklist(NN1.getNode()); |
| ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode()); |
| } |
| return Op; |
| } |
| return SDValue(); |
| } |
| |
| /// Promote the specified integer shift operation if the target indicates it is |
| /// beneficial. e.g. On x86, it's usually better to promote i16 operations to |
| /// i32 since i16 instructions are longer. |
| SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) { |
| if (!LegalOperations) |
| return SDValue(); |
| |
| EVT VT = Op.getValueType(); |
| if (VT.isVector() || !VT.isInteger()) |
| return SDValue(); |
| |
| // If operation type is 'undesirable', e.g. i16 on x86, consider |
| // promoting it. |
| unsigned Opc = Op.getOpcode(); |
| if (TLI.isTypeDesirableForOp(Opc, VT)) |
| return SDValue(); |
| |
| EVT PVT = VT; |
| // Consult target whether it is a good idea to promote this operation and |
| // what's the right type to promote it to. |
| if (TLI.IsDesirableToPromoteOp(Op, PVT)) { |
| assert(PVT != VT && "Don't know what type to promote to!"); |
| |
| LLVM_DEBUG(dbgs() << "\nPromoting "; Op.dump(&DAG)); |
| |
| bool Replace = false; |
| SDValue N0 = Op.getOperand(0); |
| if (Opc == ISD::SRA) |
| N0 = SExtPromoteOperand(N0, PVT); |
| else if (Opc == ISD::SRL) |
| N0 = ZExtPromoteOperand(N0, PVT); |
| else |
| N0 = PromoteOperand(N0, PVT, Replace); |
| |
| if (!N0.getNode()) |
| return SDValue(); |
| |
| SDLoc DL(Op); |
| SDValue N1 = Op.getOperand(1); |
| SDValue RV = |
| DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, N0, N1)); |
| |
| if (Replace) |
| ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode()); |
| |
| // Deal with Op being deleted. |
| if (Op && Op.getOpcode() != ISD::DELETED_NODE) |
| return RV; |
| } |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::PromoteExtend(SDValue Op) { |
| if (!LegalOperations) |
| return SDValue(); |
| |
| EVT VT = Op.getValueType(); |
| if (VT.isVector() || !VT.isInteger()) |
| return SDValue(); |
| |
| // If operation type is 'undesirable', e.g. i16 on x86, consider |
| // promoting it. |
| unsigned Opc = Op.getOpcode(); |
| if (TLI.isTypeDesirableForOp(Opc, VT)) |
| return SDValue(); |
| |
| EVT PVT = VT; |
| // Consult target whether it is a good idea to promote this operation and |
| // what's the right type to promote it to. |
| if (TLI.IsDesirableToPromoteOp(Op, PVT)) { |
| assert(PVT != VT && "Don't know what type to promote to!"); |
| // fold (aext (aext x)) -> (aext x) |
| // fold (aext (zext x)) -> (zext x) |
| // fold (aext (sext x)) -> (sext x) |
| LLVM_DEBUG(dbgs() << "\nPromoting "; Op.dump(&DAG)); |
| return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0)); |
| } |
| return SDValue(); |
| } |
| |
| bool DAGCombiner::PromoteLoad(SDValue Op) { |
| if (!LegalOperations) |
| return false; |
| |
| if (!ISD::isUNINDEXEDLoad(Op.getNode())) |
| return false; |
| |
| EVT VT = Op.getValueType(); |
| if (VT.isVector() || !VT.isInteger()) |
| return false; |
| |
| // If operation type is 'undesirable', e.g. i16 on x86, consider |
| // promoting it. |
| unsigned Opc = Op.getOpcode(); |
| if (TLI.isTypeDesirableForOp(Opc, VT)) |
| return false; |
| |
| EVT PVT = VT; |
| // Consult target whether it is a good idea to promote this operation and |
| // what's the right type to promote it to. |
| if (TLI.IsDesirableToPromoteOp(Op, PVT)) { |
| assert(PVT != VT && "Don't know what type to promote to!"); |
| |
| SDLoc DL(Op); |
| SDNode *N = Op.getNode(); |
| LoadSDNode *LD = cast<LoadSDNode>(N); |
| EVT MemVT = LD->getMemoryVT(); |
| ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? ISD::EXTLOAD |
| : LD->getExtensionType(); |
| SDValue NewLD = DAG.getExtLoad(ExtType, DL, PVT, |
| LD->getChain(), LD->getBasePtr(), |
| MemVT, LD->getMemOperand()); |
| SDValue Result = DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD); |
| |
| LLVM_DEBUG(dbgs() << "\nPromoting "; N->dump(&DAG); dbgs() << "\nTo: "; |
| Result.dump(&DAG); dbgs() << '\n'); |
| |
| DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); |
| DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1)); |
| |
| AddToWorklist(Result.getNode()); |
| recursivelyDeleteUnusedNodes(N); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /// Recursively delete a node which has no uses and any operands for |
| /// which it is the only use. |
| /// |
| /// Note that this both deletes the nodes and removes them from the worklist. |
| /// It also adds any nodes who have had a user deleted to the worklist as they |
| /// may now have only one use and subject to other combines. |
| bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) { |
| if (!N->use_empty()) |
| return false; |
| |
| SmallSetVector<SDNode *, 16> Nodes; |
| Nodes.insert(N); |
| do { |
| N = Nodes.pop_back_val(); |
| if (!N) |
| continue; |
| |
| if (N->use_empty()) { |
| for (const SDValue &ChildN : N->op_values()) |
| Nodes.insert(ChildN.getNode()); |
| |
| removeFromWorklist(N); |
| DAG.DeleteNode(N); |
| } else { |
| AddToWorklist(N); |
| } |
| } while (!Nodes.empty()); |
| return true; |
| } |
| |
| //===----------------------------------------------------------------------===// |
| // Main DAG Combiner implementation |
| //===----------------------------------------------------------------------===// |
| |
| void DAGCombiner::Run(CombineLevel AtLevel) { |
| // set the instance variables, so that the various visit routines may use it. |
| Level = AtLevel; |
| LegalDAG = Level >= AfterLegalizeDAG; |
| LegalOperations = Level >= AfterLegalizeVectorOps; |
| LegalTypes = Level >= AfterLegalizeTypes; |
| |
| WorklistInserter AddNodes(*this); |
| |
| // Add all the dag nodes to the worklist. |
| // |
| // Note: All nodes are not added to PruningList here, this is because the only |
| // nodes which can be deleted are those which have no uses and all other nodes |
| // which would otherwise be added to the worklist by the first call to |
| // getNextWorklistEntry are already present in it. |
| for (SDNode &Node : DAG.allnodes()) |
| AddToWorklist(&Node, /* IsCandidateForPruning */ Node.use_empty()); |
| |
| // Create a dummy node (which is not added to allnodes), that adds a reference |
| // to the root node, preventing it from being deleted, and tracking any |
| // changes of the root. |
| HandleSDNode Dummy(DAG.getRoot()); |
| |
| // While we have a valid worklist entry node, try to combine it. |
| while (SDNode *N = getNextWorklistEntry()) { |
| // If N has no uses, it is dead. Make sure to revisit all N's operands once |
| // N is deleted from the DAG, since they too may now be dead or may have a |
| // reduced number of uses, allowing other xforms. |
| if (recursivelyDeleteUnusedNodes(N)) |
| continue; |
| |
| WorklistRemover DeadNodes(*this); |
| |
| // If this combine is running after legalizing the DAG, re-legalize any |
| // nodes pulled off the worklist. |
| if (LegalDAG) { |
| SmallSetVector<SDNode *, 16> UpdatedNodes; |
| bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes); |
| |
| for (SDNode *LN : UpdatedNodes) |
| AddToWorklistWithUsers(LN); |
| |
| if (!NIsValid) |
| continue; |
| } |
| |
| LLVM_DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG)); |
| |
| // Add any operands of the new node which have not yet been combined to the |
| // worklist as well. getNextWorklistEntry flags nodes that have been |
| // combined before. Because the worklist uniques things already, this won't |
| // repeatedly process the same operand. |
| for (const SDValue &ChildN : N->op_values()) |
| AddToWorklist(ChildN.getNode(), /*IsCandidateForPruning=*/true, |
| /*SkipIfCombinedBefore=*/true); |
| |
| SDValue RV = combine(N); |
| |
| if (!RV.getNode()) |
| continue; |
| |
| ++NodesCombined; |
| |
| // Invalidate cached info. |
| ChainsWithoutMergeableStores.clear(); |
| |
| // If we get back the same node we passed in, rather than a new node or |
| // zero, we know that the node must have defined multiple values and |
| // CombineTo was used. Since CombineTo takes care of the worklist |
| // mechanics for us, we have no work to do in this case. |
| if (RV.getNode() == N) |
| continue; |
| |
| assert(N->getOpcode() != ISD::DELETED_NODE && |
| RV.getOpcode() != ISD::DELETED_NODE && |
| "Node was deleted but visit returned new node!"); |
| |
| LLVM_DEBUG(dbgs() << " ... into: "; RV.dump(&DAG)); |
| |
| if (N->getNumValues() == RV->getNumValues()) |
| DAG.ReplaceAllUsesWith(N, RV.getNode()); |
| else { |
| assert(N->getValueType(0) == RV.getValueType() && |
| N->getNumValues() == 1 && "Type mismatch"); |
| DAG.ReplaceAllUsesWith(N, &RV); |
| } |
| |
| // Push the new node and any users onto the worklist. Omit this if the |
| // new node is the EntryToken (e.g. if a store managed to get optimized |
| // out), because re-visiting the EntryToken and its users will not uncover |
| // any additional opportunities, but there may be a large number of such |
| // users, potentially causing compile time explosion. |
| if (RV.getOpcode() != ISD::EntryToken) |
| AddToWorklistWithUsers(RV.getNode()); |
| |
| // Finally, if the node is now dead, remove it from the graph. The node |
| // may not be dead if the replacement process recursively simplified to |
| // something else needing this node. This will also take care of adding any |
| // operands which have lost a user to the worklist. |
| recursivelyDeleteUnusedNodes(N); |
| } |
| |
| // If the root changed (e.g. it was a dead load, update the root). |
| DAG.setRoot(Dummy.getValue()); |
| DAG.RemoveDeadNodes(); |
| } |
| |
| SDValue DAGCombiner::visit(SDNode *N) { |
| // clang-format off |
| switch (N->getOpcode()) { |
| default: break; |
| case ISD::TokenFactor: return visitTokenFactor(N); |
| case ISD::MERGE_VALUES: return visitMERGE_VALUES(N); |
| case ISD::ADD: return visitADD(N); |
| case ISD::SUB: return visitSUB(N); |
| case ISD::SADDSAT: |
| case ISD::UADDSAT: return visitADDSAT(N); |
| case ISD::SSUBSAT: |
| case ISD::USUBSAT: return visitSUBSAT(N); |
| case ISD::ADDC: return visitADDC(N); |
| case ISD::SADDO: |
| case ISD::UADDO: return visitADDO(N); |
| case ISD::SUBC: return visitSUBC(N); |
| case ISD::SSUBO: |
| case ISD::USUBO: return visitSUBO(N); |
| case ISD::ADDE: return visitADDE(N); |
| case ISD::UADDO_CARRY: return visitUADDO_CARRY(N); |
| case ISD::SADDO_CARRY: return visitSADDO_CARRY(N); |
| case ISD::SUBE: return visitSUBE(N); |
| case ISD::USUBO_CARRY: return visitUSUBO_CARRY(N); |
| case ISD::SSUBO_CARRY: return visitSSUBO_CARRY(N); |
| case ISD::SMULFIX: |
| case ISD::SMULFIXSAT: |
| case ISD::UMULFIX: |
| case ISD::UMULFIXSAT: return visitMULFIX(N); |
| case ISD::MUL: return visitMUL<EmptyMatchContext>(N); |
| case ISD::SDIV: return visitSDIV(N); |
| case ISD::UDIV: return visitUDIV(N); |
| case ISD::SREM: |
| case ISD::UREM: return visitREM(N); |
| case ISD::MULHU: return visitMULHU(N); |
| case ISD::MULHS: return visitMULHS(N); |
| case ISD::AVGFLOORS: |
| case ISD::AVGFLOORU: |
| case ISD::AVGCEILS: |
| case ISD::AVGCEILU: return visitAVG(N); |
| case ISD::ABDS: |
| case ISD::ABDU: return visitABD(N); |
| case ISD::SMUL_LOHI: return visitSMUL_LOHI(N); |
| case ISD::UMUL_LOHI: return visitUMUL_LOHI(N); |
| case ISD::SMULO: |
| case ISD::UMULO: return visitMULO(N); |
| case ISD::SMIN: |
| case ISD::SMAX: |
| case ISD::UMIN: |
| case ISD::UMAX: return visitIMINMAX(N); |
| case ISD::AND: return visitAND(N); |
| case ISD::OR: return visitOR(N); |
| case ISD::XOR: return visitXOR(N); |
| case ISD::SHL: return visitSHL(N); |
| case ISD::SRA: return visitSRA(N); |
| case ISD::SRL: return visitSRL(N); |
| case ISD::ROTR: |
| case ISD::ROTL: return visitRotate(N); |
| case ISD::FSHL: |
| case ISD::FSHR: return visitFunnelShift(N); |
| case ISD::SSHLSAT: |
| case ISD::USHLSAT: return visitSHLSAT(N); |
| case ISD::ABS: return visitABS(N); |
| case ISD::BSWAP: return visitBSWAP(N); |
| case ISD::BITREVERSE: return visitBITREVERSE(N); |
| case ISD::CTLZ: return visitCTLZ(N); |
| case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N); |
| case ISD::CTTZ: return visitCTTZ(N); |
| case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N); |
| case ISD::CTPOP: return visitCTPOP(N); |
| case ISD::SELECT: return visitSELECT(N); |
| case ISD::VSELECT: return visitVSELECT(N); |
| case ISD::SELECT_CC: return visitSELECT_CC(N); |
| case ISD::SETCC: return visitSETCC(N); |
| case ISD::SETCCCARRY: return visitSETCCCARRY(N); |
| case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N); |
| case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N); |
| case ISD::ANY_EXTEND: return visitANY_EXTEND(N); |
| case ISD::AssertSext: |
| case ISD::AssertZext: return visitAssertExt(N); |
| case ISD::AssertAlign: return visitAssertAlign(N); |
| case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N); |
| case ISD::SIGN_EXTEND_VECTOR_INREG: |
| case ISD::ZERO_EXTEND_VECTOR_INREG: |
| case ISD::ANY_EXTEND_VECTOR_INREG: return visitEXTEND_VECTOR_INREG(N); |
| case ISD::TRUNCATE: return visitTRUNCATE(N); |
| case ISD::TRUNCATE_USAT_U: return visitTRUNCATE_USAT_U(N); |
| case ISD::BITCAST: return visitBITCAST(N); |
| case ISD::BUILD_PAIR: return visitBUILD_PAIR(N); |
| case ISD::FADD: return visitFADD(N); |
| case ISD::STRICT_FADD: return visitSTRICT_FADD(N); |
| case ISD::FSUB: return visitFSUB(N); |
| case ISD::FMUL: return visitFMUL(N); |
| case ISD::FMA: return visitFMA<EmptyMatchContext>(N); |
| case ISD::FMAD: return visitFMAD(N); |
| case ISD::FDIV: return visitFDIV(N); |
| case ISD::FREM: return visitFREM(N); |
| case ISD::FSQRT: return visitFSQRT(N); |
| case ISD::FCOPYSIGN: return visitFCOPYSIGN(N); |
| case ISD::FPOW: return visitFPOW(N); |
| case ISD::SINT_TO_FP: return visitSINT_TO_FP(N); |
| case ISD::UINT_TO_FP: return visitUINT_TO_FP(N); |
| case ISD::FP_TO_SINT: return visitFP_TO_SINT(N); |
| case ISD::FP_TO_UINT: return visitFP_TO_UINT(N); |
| case ISD::LROUND: |
| case ISD::LLROUND: |
| case ISD::LRINT: |
| case ISD::LLRINT: return visitXROUND(N); |
| case ISD::FP_ROUND: return visitFP_ROUND(N); |
| case ISD::FP_EXTEND: return visitFP_EXTEND(N); |
| case ISD::FNEG: return visitFNEG(N); |
| case ISD::FABS: return visitFABS(N); |
| case ISD::FFLOOR: return visitFFLOOR(N); |
| case ISD::FMINNUM: |
| case ISD::FMAXNUM: |
| case ISD::FMINIMUM: |
| case ISD::FMAXIMUM: |
| case ISD::FMINIMUMNUM: |
| case ISD::FMAXIMUMNUM: return visitFMinMax(N); |
| case ISD::FCEIL: return visitFCEIL(N); |
| case ISD::FTRUNC: return visitFTRUNC(N); |
| case ISD::FFREXP: return visitFFREXP(N); |
| case ISD::BRCOND: return visitBRCOND(N); |
| case ISD::BR_CC: return visitBR_CC(N); |
| case ISD::LOAD: return visitLOAD(N); |
| case ISD::STORE: return visitSTORE(N); |
| case ISD::ATOMIC_STORE: return visitATOMIC_STORE(N); |
| case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N); |
| case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N); |
| case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N); |
| case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N); |
| case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N); |
| case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N); |
| case ISD::SCALAR_TO_VECTOR: return visitSCALAR_TO_VECTOR(N); |
| case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N); |
| case ISD::MGATHER: return visitMGATHER(N); |
| case ISD::MLOAD: return visitMLOAD(N); |
| case ISD::MSCATTER: return visitMSCATTER(N); |
| case ISD::MSTORE: return visitMSTORE(N); |
| case ISD::EXPERIMENTAL_VECTOR_HISTOGRAM: return visitMHISTOGRAM(N); |
| case ISD::PARTIAL_REDUCE_SMLA: |
| case ISD::PARTIAL_REDUCE_UMLA: |
| case ISD::PARTIAL_REDUCE_SUMLA: |
| return visitPARTIAL_REDUCE_MLA(N); |
| case ISD::VECTOR_COMPRESS: return visitVECTOR_COMPRESS(N); |
| case ISD::LIFETIME_END: return visitLIFETIME_END(N); |
| case ISD::FP_TO_FP16: return visitFP_TO_FP16(N); |
| case ISD::FP16_TO_FP: return visitFP16_TO_FP(N); |
| case ISD::FP_TO_BF16: return visitFP_TO_BF16(N); |
| case ISD::BF16_TO_FP: return visitBF16_TO_FP(N); |
| case ISD::FREEZE: return visitFREEZE(N); |
| case ISD::GET_FPENV_MEM: return visitGET_FPENV_MEM(N); |
| case ISD::SET_FPENV_MEM: return visitSET_FPENV_MEM(N); |
| case ISD::FCANONICALIZE: return visitFCANONICALIZE(N); |
| case ISD::VECREDUCE_FADD: |
| case ISD::VECREDUCE_FMUL: |
| case ISD::VECREDUCE_ADD: |
| case ISD::VECREDUCE_MUL: |
| case ISD::VECREDUCE_AND: |
| case ISD::VECREDUCE_OR: |
| case ISD::VECREDUCE_XOR: |
| case ISD::VECREDUCE_SMAX: |
| case ISD::VECREDUCE_SMIN: |
| case ISD::VECREDUCE_UMAX: |
| case ISD::VECREDUCE_UMIN: |
| case ISD::VECREDUCE_FMAX: |
| case ISD::VECREDUCE_FMIN: |
| case ISD::VECREDUCE_FMAXIMUM: |
| case ISD::VECREDUCE_FMINIMUM: return visitVECREDUCE(N); |
| #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) case ISD::SDOPC: |
| #include "llvm/IR/VPIntrinsics.def" |
| return visitVPOp(N); |
| } |
| // clang-format on |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::combine(SDNode *N) { |
| if (!DebugCounter::shouldExecute(DAGCombineCounter)) |
| return SDValue(); |
| |
| SDValue RV; |
| if (!DisableGenericCombines) |
| RV = visit(N); |
| |
| // If nothing happened, try a target-specific DAG combine. |
| if (!RV.getNode()) { |
| assert(N->getOpcode() != ISD::DELETED_NODE && |
| "Node was deleted but visit returned NULL!"); |
| |
| if (N->getOpcode() >= ISD::BUILTIN_OP_END || |
| TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) { |
| |
| // Expose the DAG combiner to the target combiner impls. |
| TargetLowering::DAGCombinerInfo |
| DagCombineInfo(DAG, Level, false, this); |
| |
| RV = TLI.PerformDAGCombine(N, DagCombineInfo); |
| } |
| } |
| |
| // If nothing happened still, try promoting the operation. |
| if (!RV.getNode()) { |
| switch (N->getOpcode()) { |
| default: break; |
| case ISD::ADD: |
| case ISD::SUB: |
| case ISD::MUL: |
| case ISD::AND: |
| case ISD::OR: |
| case ISD::XOR: |
| RV = PromoteIntBinOp(SDValue(N, 0)); |
| break; |
| case ISD::SHL: |
| case ISD::SRA: |
| case ISD::SRL: |
| RV = PromoteIntShiftOp(SDValue(N, 0)); |
| break; |
| case ISD::SIGN_EXTEND: |
| case ISD::ZERO_EXTEND: |
| case ISD::ANY_EXTEND: |
| RV = PromoteExtend(SDValue(N, 0)); |
| break; |
| case ISD::LOAD: |
| if (PromoteLoad(SDValue(N, 0))) |
| RV = SDValue(N, 0); |
| break; |
| } |
| } |
| |
| // If N is a commutative binary node, try to eliminate it if the commuted |
| // version is already present in the DAG. |
| if (!RV.getNode() && TLI.isCommutativeBinOp(N->getOpcode())) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| |
| // Constant operands are canonicalized to RHS. |
| if (N0 != N1 && (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1))) { |
| SDValue Ops[] = {N1, N0}; |
| SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops, |
| N->getFlags()); |
| if (CSENode) |
| return SDValue(CSENode, 0); |
| } |
| } |
| |
| return RV; |
| } |
| |
| /// Given a node, return its input chain if it has one, otherwise return a null |
| /// sd operand. |
| static SDValue getInputChainForNode(SDNode *N) { |
| if (unsigned NumOps = N->getNumOperands()) { |
| if (N->getOperand(0).getValueType() == MVT::Other) |
| return N->getOperand(0); |
| if (N->getOperand(NumOps-1).getValueType() == MVT::Other) |
| return N->getOperand(NumOps-1); |
| for (unsigned i = 1; i < NumOps-1; ++i) |
| if (N->getOperand(i).getValueType() == MVT::Other) |
| return N->getOperand(i); |
| } |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitFCANONICALIZE(SDNode *N) { |
| SDValue Operand = N->getOperand(0); |
| EVT VT = Operand.getValueType(); |
| SDLoc dl(N); |
| |
| // Canonicalize undef to quiet NaN. |
| if (Operand.isUndef()) { |
| APFloat CanonicalQNaN = APFloat::getQNaN(VT.getFltSemantics()); |
| return DAG.getConstantFP(CanonicalQNaN, dl, VT); |
| } |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitTokenFactor(SDNode *N) { |
| // If N has two operands, where one has an input chain equal to the other, |
| // the 'other' chain is redundant. |
| if (N->getNumOperands() == 2) { |
| if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1)) |
| return N->getOperand(0); |
| if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0)) |
| return N->getOperand(1); |
| } |
| |
| // Don't simplify token factors if optnone. |
| if (OptLevel == CodeGenOptLevel::None) |
| return SDValue(); |
| |
| // Don't simplify the token factor if the node itself has too many operands. |
| if (N->getNumOperands() > TokenFactorInlineLimit) |
| return SDValue(); |
| |
| // If the sole user is a token factor, we should make sure we have a |
| // chance to merge them together. This prevents TF chains from inhibiting |
| // optimizations. |
| if (N->hasOneUse() && N->user_begin()->getOpcode() == ISD::TokenFactor) |
| AddToWorklist(*(N->user_begin())); |
| |
| SmallVector<SDNode *, 8> TFs; // List of token factors to visit. |
| SmallVector<SDValue, 8> Ops; // Ops for replacing token factor. |
| SmallPtrSet<SDNode*, 16> SeenOps; |
| bool Changed = false; // If we should replace this token factor. |
| |
| // Start out with this token factor. |
| TFs.push_back(N); |
| |
| // Iterate through token factors. The TFs grows when new token factors are |
| // encountered. |
| for (unsigned i = 0; i < TFs.size(); ++i) { |
| // Limit number of nodes to inline, to avoid quadratic compile times. |
| // We have to add the outstanding Token Factors to Ops, otherwise we might |
| // drop Ops from the resulting Token Factors. |
| if (Ops.size() > TokenFactorInlineLimit) { |
| for (unsigned j = i; j < TFs.size(); j++) |
| Ops.emplace_back(TFs[j], 0); |
| // Drop unprocessed Token Factors from TFs, so we do not add them to the |
| // combiner worklist later. |
| TFs.resize(i); |
| break; |
| } |
| |
| SDNode *TF = TFs[i]; |
| // Check each of the operands. |
| for (const SDValue &Op : TF->op_values()) { |
| switch (Op.getOpcode()) { |
| case ISD::EntryToken: |
| // Entry tokens don't need to be added to the list. They are |
| // redundant. |
| Changed = true; |
| break; |
| |
| case ISD::TokenFactor: |
| if (Op.hasOneUse() && !is_contained(TFs, Op.getNode())) { |
| // Queue up for processing. |
| TFs.push_back(Op.getNode()); |
| Changed = true; |
| break; |
| } |
| [[fallthrough]]; |
| |
| default: |
| // Only add if it isn't already in the list. |
| if (SeenOps.insert(Op.getNode()).second) |
| Ops.push_back(Op); |
| else |
| Changed = true; |
| break; |
| } |
| } |
| } |
| |
| // Re-visit inlined Token Factors, to clean them up in case they have been |
| // removed. Skip the first Token Factor, as this is the current node. |
| for (unsigned i = 1, e = TFs.size(); i < e; i++) |
| AddToWorklist(TFs[i]); |
| |
| // Remove Nodes that are chained to another node in the list. Do so |
| // by walking up chains breath-first stopping when we've seen |
| // another operand. In general we must climb to the EntryNode, but we can exit |
| // early if we find all remaining work is associated with just one operand as |
| // no further pruning is possible. |
| |
| // List of nodes to search through and original Ops from which they originate. |
| SmallVector<std::pair<SDNode *, unsigned>, 8> Worklist; |
| SmallVector<unsigned, 8> OpWorkCount; // Count of work for each Op. |
| SmallPtrSet<SDNode *, 16> SeenChains; |
| bool DidPruneOps = false; |
| |
| unsigned NumLeftToConsider = 0; |
| for (const SDValue &Op : Ops) { |
| Worklist.push_back(std::make_pair(Op.getNode(), NumLeftToConsider++)); |
| OpWorkCount.push_back(1); |
| } |
| |
| auto AddToWorklist = [&](unsigned CurIdx, SDNode *Op, unsigned OpNumber) { |
| // If this is an Op, we can remove the op from the list. Remark any |
| // search associated with it as from the current OpNumber. |
| if (SeenOps.contains(Op)) { |
| Changed = true; |
| DidPruneOps = true; |
| unsigned OrigOpNumber = 0; |
| while (OrigOpNumber < Ops.size() && Ops[OrigOpNumber].getNode() != Op) |
| OrigOpNumber++; |
| assert((OrigOpNumber != Ops.size()) && |
| "expected to find TokenFactor Operand"); |
| // Re-mark worklist from OrigOpNumber to OpNumber |
| for (unsigned i = CurIdx + 1; i < Worklist.size(); ++i) { |
| if (Worklist[i].second == OrigOpNumber) { |
| Worklist[i].second = OpNumber; |
| } |
| } |
| OpWorkCount[OpNumber] += OpWorkCount[OrigOpNumber]; |
| OpWorkCount[OrigOpNumber] = 0; |
| NumLeftToConsider--; |
| } |
| // Add if it's a new chain |
| if (SeenChains.insert(Op).second) { |
| OpWorkCount[OpNumber]++; |
| Worklist.push_back(std::make_pair(Op, OpNumber)); |
| } |
| }; |
| |
| for (unsigned i = 0; i < Worklist.size() && i < 1024; ++i) { |
| // We need at least be consider at least 2 Ops to prune. |
| if (NumLeftToConsider <= 1) |
| break; |
| auto CurNode = Worklist[i].first; |
| auto CurOpNumber = Worklist[i].second; |
| assert((OpWorkCount[CurOpNumber] > 0) && |
| "Node should not appear in worklist"); |
| switch (CurNode->getOpcode()) { |
| case ISD::EntryToken: |
| // Hitting EntryToken is the only way for the search to terminate without |
| // hitting |
| // another operand's search. Prevent us from marking this operand |
| // considered. |
| NumLeftToConsider++; |
| break; |
| case ISD::TokenFactor: |
| for (const SDValue &Op : CurNode->op_values()) |
| AddToWorklist(i, Op.getNode(), CurOpNumber); |
| break; |
| case ISD::LIFETIME_START: |
| case ISD::LIFETIME_END: |
| case ISD::CopyFromReg: |
| case ISD::CopyToReg: |
| AddToWorklist(i, CurNode->getOperand(0).getNode(), CurOpNumber); |
| break; |
| default: |
| if (auto *MemNode = dyn_cast<MemSDNode>(CurNode)) |
| AddToWorklist(i, MemNode->getChain().getNode(), CurOpNumber); |
| break; |
| } |
| OpWorkCount[CurOpNumber]--; |
| if (OpWorkCount[CurOpNumber] == 0) |
| NumLeftToConsider--; |
| } |
| |
| // If we've changed things around then replace token factor. |
| if (Changed) { |
| SDValue Result; |
| if (Ops.empty()) { |
| // The entry token is the only possible outcome. |
| Result = DAG.getEntryNode(); |
| } else { |
| if (DidPruneOps) { |
| SmallVector<SDValue, 8> PrunedOps; |
| // |
| for (const SDValue &Op : Ops) { |
| if (SeenChains.count(Op.getNode()) == 0) |
| PrunedOps.push_back(Op); |
| } |
| Result = DAG.getTokenFactor(SDLoc(N), PrunedOps); |
| } else { |
| Result = DAG.getTokenFactor(SDLoc(N), Ops); |
| } |
| } |
| return Result; |
| } |
| return SDValue(); |
| } |
| |
| /// MERGE_VALUES can always be eliminated. |
| SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) { |
| WorklistRemover DeadNodes(*this); |
| // Replacing results may cause a different MERGE_VALUES to suddenly |
| // be CSE'd with N, and carry its uses with it. Iterate until no |
| // uses remain, to ensure that the node can be safely deleted. |
| // First add the users of this node to the work list so that they |
| // can be tried again once they have new operands. |
| AddUsersToWorklist(N); |
| do { |
| // Do as a single replacement to avoid rewalking use lists. |
| SmallVector<SDValue, 8> Ops(N->ops()); |
| DAG.ReplaceAllUsesWith(N, Ops.data()); |
| } while (!N->use_empty()); |
| deleteAndRecombine(N); |
| return SDValue(N, 0); // Return N so it doesn't get rechecked! |
| } |
| |
| /// If \p N is a ConstantSDNode with isOpaque() == false return it casted to a |
| /// ConstantSDNode pointer else nullptr. |
| static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) { |
| ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N); |
| return Const != nullptr && !Const->isOpaque() ? Const : nullptr; |
| } |
| |
| // isTruncateOf - If N is a truncate of some other value, return true, record |
| // the value being truncated in Op and which of Op's bits are zero/one in Known. |
| // This function computes KnownBits to avoid a duplicated call to |
| // computeKnownBits in the caller. |
| static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op, |
| KnownBits &Known) { |
| if (N->getOpcode() == ISD::TRUNCATE) { |
| Op = N->getOperand(0); |
| Known = DAG.computeKnownBits(Op); |
| if (N->getFlags().hasNoUnsignedWrap()) |
| Known.Zero.setBitsFrom(N.getScalarValueSizeInBits()); |
| return true; |
| } |
| |
| if (N.getValueType().getScalarType() != MVT::i1 || |
| !sd_match( |
| N, m_c_SetCC(m_Value(Op), m_Zero(), m_SpecificCondCode(ISD::SETNE)))) |
| return false; |
| |
| Known = DAG.computeKnownBits(Op); |
| return (Known.Zero | 1).isAllOnes(); |
| } |
| |
| /// Return true if 'Use' is a load or a store that uses N as its base pointer |
| /// and that N may be folded in the load / store addressing mode. |
| static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, SelectionDAG &DAG, |
| const TargetLowering &TLI) { |
| EVT VT; |
| unsigned AS; |
| |
| if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) { |
| if (LD->isIndexed() || LD->getBasePtr().getNode() != N) |
| return false; |
| VT = LD->getMemoryVT(); |
| AS = LD->getAddressSpace(); |
| } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) { |
| if (ST->isIndexed() || ST->getBasePtr().getNode() != N) |
| return false; |
| VT = ST->getMemoryVT(); |
| AS = ST->getAddressSpace(); |
| } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(Use)) { |
| if (LD->isIndexed() || LD->getBasePtr().getNode() != N) |
| return false; |
| VT = LD->getMemoryVT(); |
| AS = LD->getAddressSpace(); |
| } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(Use)) { |
| if (ST->isIndexed() || ST->getBasePtr().getNode() != N) |
| return false; |
| VT = ST->getMemoryVT(); |
| AS = ST->getAddressSpace(); |
| } else { |
| return false; |
| } |
| |
| TargetLowering::AddrMode AM; |
| if (N->isAnyAdd()) { |
| AM.HasBaseReg = true; |
| ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| if (Offset) |
| // [reg +/- imm] |
| AM.BaseOffs = Offset->getSExtValue(); |
| else |
| // [reg +/- reg] |
| AM.Scale = 1; |
| } else if (N->getOpcode() == ISD::SUB) { |
| AM.HasBaseReg = true; |
| ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| if (Offset) |
| // [reg +/- imm] |
| AM.BaseOffs = -Offset->getSExtValue(); |
| else |
| // [reg +/- reg] |
| AM.Scale = 1; |
| } else { |
| return false; |
| } |
| |
| return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, |
| VT.getTypeForEVT(*DAG.getContext()), AS); |
| } |
| |
| /// This inverts a canonicalization in IR that replaces a variable select arm |
| /// with an identity constant. Codegen improves if we re-use the variable |
| /// operand rather than load a constant. This can also be converted into a |
| /// masked vector operation if the target supports it. |
| static SDValue foldSelectWithIdentityConstant(SDNode *N, SelectionDAG &DAG, |
| bool ShouldCommuteOperands) { |
| // Match a select as operand 1. The identity constant that we are looking for |
| // is only valid as operand 1 of a non-commutative binop. |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| if (ShouldCommuteOperands) |
| std::swap(N0, N1); |
| |
| unsigned SelOpcode = N1.getOpcode(); |
| if ((SelOpcode != ISD::VSELECT && SelOpcode != ISD::SELECT) || |
| !N1.hasOneUse()) |
| return SDValue(); |
| |
| // We can't hoist all instructions because of immediate UB (not speculatable). |
| // For example div/rem by zero. |
| if (!DAG.isSafeToSpeculativelyExecuteNode(N)) |
| return SDValue(); |
| |
| unsigned Opcode = N->getOpcode(); |
| EVT VT = N->getValueType(0); |
| SDValue Cond = N1.getOperand(0); |
| SDValue TVal = N1.getOperand(1); |
| SDValue FVal = N1.getOperand(2); |
| const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| |
| // This transform increases uses of N0, so freeze it to be safe. |
| // binop N0, (vselect Cond, IDC, FVal) --> vselect Cond, N0, (binop N0, FVal) |
| unsigned OpNo = ShouldCommuteOperands ? 0 : 1; |
| if (isNeutralConstant(Opcode, N->getFlags(), TVal, OpNo) && |
| TLI.shouldFoldSelectWithIdentityConstant(Opcode, VT, SelOpcode, N0, |
| FVal)) { |
| SDValue F0 = DAG.getFreeze(N0); |
| SDValue NewBO = DAG.getNode(Opcode, SDLoc(N), VT, F0, FVal, N->getFlags()); |
| return DAG.getSelect(SDLoc(N), VT, Cond, F0, NewBO); |
| } |
| // binop N0, (vselect Cond, TVal, IDC) --> vselect Cond, (binop N0, TVal), N0 |
| if (isNeutralConstant(Opcode, N->getFlags(), FVal, OpNo) && |
| TLI.shouldFoldSelectWithIdentityConstant(Opcode, VT, SelOpcode, N0, |
| TVal)) { |
| SDValue F0 = DAG.getFreeze(N0); |
| SDValue NewBO = DAG.getNode(Opcode, SDLoc(N), VT, F0, TVal, N->getFlags()); |
| return DAG.getSelect(SDLoc(N), VT, Cond, NewBO, F0); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) { |
| const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| assert(TLI.isBinOp(BO->getOpcode()) && BO->getNumValues() == 1 && |
| "Unexpected binary operator"); |
| |
| if (SDValue Sel = foldSelectWithIdentityConstant(BO, DAG, false)) |
| return Sel; |
| |
| if (TLI.isCommutativeBinOp(BO->getOpcode())) |
| if (SDValue Sel = foldSelectWithIdentityConstant(BO, DAG, true)) |
| return Sel; |
| |
| // Don't do this unless the old select is going away. We want to eliminate the |
| // binary operator, not replace a binop with a select. |
| // TODO: Handle ISD::SELECT_CC. |
| unsigned SelOpNo = 0; |
| SDValue Sel = BO->getOperand(0); |
| auto BinOpcode = BO->getOpcode(); |
| if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse()) { |
| SelOpNo = 1; |
| Sel = BO->getOperand(1); |
| |
| // Peek through trunc to shift amount type. |
| if ((BinOpcode == ISD::SHL || BinOpcode == ISD::SRA || |
| BinOpcode == ISD::SRL) && Sel.hasOneUse()) { |
| // This is valid when the truncated bits of x are already zero. |
| SDValue Op; |
| KnownBits Known; |
| if (isTruncateOf(DAG, Sel, Op, Known) && |
| Known.countMaxActiveBits() < Sel.getScalarValueSizeInBits()) |
| Sel = Op; |
| } |
| } |
| |
| if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse()) |
| return SDValue(); |
| |
| SDValue CT = Sel.getOperand(1); |
| if (!isConstantOrConstantVector(CT, true) && |
| !DAG.isConstantFPBuildVectorOrConstantFP(CT)) |
| return SDValue(); |
| |
| SDValue CF = Sel.getOperand(2); |
| if (!isConstantOrConstantVector(CF, true) && |
| !DAG.isConstantFPBuildVectorOrConstantFP(CF)) |
| return SDValue(); |
| |
| // Bail out if any constants are opaque because we can't constant fold those. |
| // The exception is "and" and "or" with either 0 or -1 in which case we can |
| // propagate non constant operands into select. I.e.: |
| // and (select Cond, 0, -1), X --> select Cond, 0, X |
| // or X, (select Cond, -1, 0) --> select Cond, -1, X |
| bool CanFoldNonConst = |
| (BinOpcode == ISD::AND || BinOpcode == ISD::OR) && |
| ((isNullOrNullSplat(CT) && isAllOnesOrAllOnesSplat(CF)) || |
| (isNullOrNullSplat(CF) && isAllOnesOrAllOnesSplat(CT))); |
| |
| SDValue CBO = BO->getOperand(SelOpNo ^ 1); |
| if (!CanFoldNonConst && |
| !isConstantOrConstantVector(CBO, true) && |
| !DAG.isConstantFPBuildVectorOrConstantFP(CBO)) |
| return SDValue(); |
| |
| SDLoc DL(Sel); |
| SDValue NewCT, NewCF; |
| EVT VT = BO->getValueType(0); |
| |
| if (CanFoldNonConst) { |
| // If CBO is an opaque constant, we can't rely on getNode to constant fold. |
| if ((BinOpcode == ISD::AND && isNullOrNullSplat(CT)) || |
| (BinOpcode == ISD::OR && isAllOnesOrAllOnesSplat(CT))) |
| NewCT = CT; |
| else |
| NewCT = CBO; |
| |
| if ((BinOpcode == ISD::AND && isNullOrNullSplat(CF)) || |
| (BinOpcode == ISD::OR && isAllOnesOrAllOnesSplat(CF))) |
| NewCF = CF; |
| else |
| NewCF = CBO; |
| } else { |
| // We have a select-of-constants followed by a binary operator with a |
| // constant. Eliminate the binop by pulling the constant math into the |
| // select. Example: add (select Cond, CT, CF), CBO --> select Cond, CT + |
| // CBO, CF + CBO |
| NewCT = SelOpNo ? DAG.FoldConstantArithmetic(BinOpcode, DL, VT, {CBO, CT}) |
| : DAG.FoldConstantArithmetic(BinOpcode, DL, VT, {CT, CBO}); |
| if (!NewCT) |
| return SDValue(); |
| |
| NewCF = SelOpNo ? DAG.FoldConstantArithmetic(BinOpcode, DL, VT, {CBO, CF}) |
| : DAG.FoldConstantArithmetic(BinOpcode, DL, VT, {CF, CBO}); |
| if (!NewCF) |
| return SDValue(); |
| } |
| |
| SDValue SelectOp = DAG.getSelect(DL, VT, Sel.getOperand(0), NewCT, NewCF); |
| SelectOp->setFlags(BO->getFlags()); |
| return SelectOp; |
| } |
| |
| static SDValue foldAddSubBoolOfMaskedVal(SDNode *N, const SDLoc &DL, |
| SelectionDAG &DAG) { |
| assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && |
| "Expecting add or sub"); |
| |
| // Match a constant operand and a zext operand for the math instruction: |
| // add Z, C |
| // sub C, Z |
| bool IsAdd = N->getOpcode() == ISD::ADD; |
| SDValue C = IsAdd ? N->getOperand(1) : N->getOperand(0); |
| SDValue Z = IsAdd ? N->getOperand(0) : N->getOperand(1); |
| auto *CN = dyn_cast<ConstantSDNode>(C); |
| if (!CN || Z.getOpcode() != ISD::ZERO_EXTEND) |
| return SDValue(); |
| |
| // Match the zext operand as a setcc of a boolean. |
| if (Z.getOperand(0).getValueType() != MVT::i1) |
| return SDValue(); |
| |
| // Match the compare as: setcc (X & 1), 0, eq. |
| if (!sd_match(Z.getOperand(0), m_SetCC(m_And(m_Value(), m_One()), m_Zero(), |
| m_SpecificCondCode(ISD::SETEQ)))) |
| return SDValue(); |
| |
| // We are adding/subtracting a constant and an inverted low bit. Turn that |
| // into a subtract/add of the low bit with incremented/decremented constant: |
| // add (zext i1 (seteq (X & 1), 0)), C --> sub C+1, (zext (X & 1)) |
| // sub C, (zext i1 (seteq (X & 1), 0)) --> add C-1, (zext (X & 1)) |
| EVT VT = C.getValueType(); |
| SDValue LowBit = DAG.getZExtOrTrunc(Z.getOperand(0).getOperand(0), DL, VT); |
| SDValue C1 = IsAdd ? DAG.getConstant(CN->getAPIntValue() + 1, DL, VT) |
| : DAG.getConstant(CN->getAPIntValue() - 1, DL, VT); |
| return DAG.getNode(IsAdd ? ISD::SUB : ISD::ADD, DL, VT, C1, LowBit); |
| } |
| |
| // Attempt to form avgceil(A, B) from (A | B) - ((A ^ B) >> 1) |
| SDValue DAGCombiner::foldSubToAvg(SDNode *N, const SDLoc &DL) { |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N0.getValueType(); |
| SDValue A, B; |
| |
| if ((!LegalOperations || hasOperation(ISD::AVGCEILU, VT)) && |
| sd_match(N, m_Sub(m_Or(m_Value(A), m_Value(B)), |
| m_Srl(m_Xor(m_Deferred(A), m_Deferred(B)), m_One())))) { |
| return DAG.getNode(ISD::AVGCEILU, DL, VT, A, B); |
| } |
| if ((!LegalOperations || hasOperation(ISD::AVGCEILS, VT)) && |
| sd_match(N, m_Sub(m_Or(m_Value(A), m_Value(B)), |
| m_Sra(m_Xor(m_Deferred(A), m_Deferred(B)), m_One())))) { |
| return DAG.getNode(ISD::AVGCEILS, DL, VT, A, B); |
| } |
| return SDValue(); |
| } |
| |
| /// Try to fold a 'not' shifted sign-bit with add/sub with constant operand into |
| /// a shift and add with a different constant. |
| static SDValue foldAddSubOfSignBit(SDNode *N, const SDLoc &DL, |
| SelectionDAG &DAG) { |
| assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && |
| "Expecting add or sub"); |
| |
| // We need a constant operand for the add/sub, and the other operand is a |
| // logical shift right: add (srl), C or sub C, (srl). |
| bool IsAdd = N->getOpcode() == ISD::ADD; |
| SDValue ConstantOp = IsAdd ? N->getOperand(1) : N->getOperand(0); |
| SDValue ShiftOp = IsAdd ? N->getOperand(0) : N->getOperand(1); |
| if (!DAG.isConstantIntBuildVectorOrConstantInt(ConstantOp) || |
| ShiftOp.getOpcode() != ISD::SRL) |
| return SDValue(); |
| |
| // The shift must be of a 'not' value. |
| SDValue Not = ShiftOp.getOperand(0); |
| if (!Not.hasOneUse() || !isBitwiseNot(Not)) |
| return SDValue(); |
| |
| // The shift must be moving the sign bit to the least-significant-bit. |
| EVT VT = ShiftOp.getValueType(); |
| SDValue ShAmt = ShiftOp.getOperand(1); |
| ConstantSDNode *ShAmtC = isConstOrConstSplat(ShAmt); |
| if (!ShAmtC || ShAmtC->getAPIntValue() != (VT.getScalarSizeInBits() - 1)) |
| return SDValue(); |
| |
| // Eliminate the 'not' by adjusting the shift and add/sub constant: |
| // add (srl (not X), 31), C --> add (sra X, 31), (C + 1) |
| // sub C, (srl (not X), 31) --> add (srl X, 31), (C - 1) |
| if (SDValue NewC = DAG.FoldConstantArithmetic( |
| IsAdd ? ISD::ADD : ISD::SUB, DL, VT, |
| {ConstantOp, DAG.getConstant(1, DL, VT)})) { |
| SDValue NewShift = DAG.getNode(IsAdd ? ISD::SRA : ISD::SRL, DL, VT, |
| Not.getOperand(0), ShAmt); |
| return DAG.getNode(ISD::ADD, DL, VT, NewShift, NewC); |
| } |
| |
| return SDValue(); |
| } |
| |
| static bool |
| areBitwiseNotOfEachother(SDValue Op0, SDValue Op1) { |
| return (isBitwiseNot(Op0) && Op0.getOperand(0) == Op1) || |
| (isBitwiseNot(Op1) && Op1.getOperand(0) == Op0); |
| } |
| |
| /// Try to fold a node that behaves like an ADD (note that N isn't necessarily |
| /// an ISD::ADD here, it could for example be an ISD::OR if we know that there |
| /// are no common bits set in the operands). |
| SDValue DAGCombiner::visitADDLike(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| SDLoc DL(N); |
| |
| // fold (add x, undef) -> undef |
| if (N0.isUndef()) |
| return N0; |
| if (N1.isUndef()) |
| return N1; |
| |
| // fold (add c1, c2) -> c1+c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, {N0, N1})) |
| return C; |
| |
| // canonicalize constant to RHS |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(ISD::ADD, DL, VT, N1, N0); |
| |
| if (areBitwiseNotOfEachother(N0, N1)) |
| return DAG.getConstant(APInt::getAllOnes(VT.getScalarSizeInBits()), DL, VT); |
| |
| // fold vector ops |
| if (VT.isVector()) { |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (add x, 0) -> x, vector edition |
| if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) |
| return N0; |
| } |
| |
| // fold (add x, 0) -> x |
| if (isNullConstant(N1)) |
| return N0; |
| |
| if (N0.getOpcode() == ISD::SUB) { |
| SDValue N00 = N0.getOperand(0); |
| SDValue N01 = N0.getOperand(1); |
| |
| // fold ((A-c1)+c2) -> (A+(c2-c1)) |
| if (SDValue Sub = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {N1, N01})) |
| return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Sub); |
| |
| // fold ((c1-A)+c2) -> (c1+c2)-A |
| if (SDValue Add = DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, {N1, N00})) |
| return DAG.getNode(ISD::SUB, DL, VT, Add, N0.getOperand(1)); |
| } |
| |
| // add (sext i1 X), 1 -> zext (not i1 X) |
| // We don't transform this pattern: |
| // add (zext i1 X), -1 -> sext (not i1 X) |
| // because most (?) targets generate better code for the zext form. |
| if (N0.getOpcode() == ISD::SIGN_EXTEND && N0.hasOneUse() && |
| isOneOrOneSplat(N1)) { |
| SDValue X = N0.getOperand(0); |
| if ((!LegalOperations || |
| (TLI.isOperationLegal(ISD::XOR, X.getValueType()) && |
| TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) && |
| X.getScalarValueSizeInBits() == 1) { |
| SDValue Not = DAG.getNOT(DL, X, X.getValueType()); |
| return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Not); |
| } |
| } |
| |
| // Fold (add (or x, c0), c1) -> (add x, (c0 + c1)) |
| // iff (or x, c0) is equivalent to (add x, c0). |
| // Fold (add (xor x, c0), c1) -> (add x, (c0 + c1)) |
| // iff (xor x, c0) is equivalent to (add x, c0). |
| if (DAG.isADDLike(N0)) { |
| SDValue N01 = N0.getOperand(1); |
| if (SDValue Add = DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, {N1, N01})) |
| return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), Add); |
| } |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| // reassociate add |
| if (!reassociationCanBreakAddressingModePattern(ISD::ADD, DL, N, N0, N1)) { |
| if (SDValue RADD = reassociateOps(ISD::ADD, DL, N0, N1, N->getFlags())) |
| return RADD; |
| |
| // Reassociate (add (or x, c), y) -> (add add(x, y), c)) if (or x, c) is |
| // equivalent to (add x, c). |
| // Reassociate (add (xor x, c), y) -> (add add(x, y), c)) if (xor x, c) is |
| // equivalent to (add x, c). |
| // Do this optimization only when adding c does not introduce instructions |
| // for adding carries. |
| auto ReassociateAddOr = [&](SDValue N0, SDValue N1) { |
| if (DAG.isADDLike(N0) && N0.hasOneUse() && |
| isConstantOrConstantVector(N0.getOperand(1), /* NoOpaque */ true)) { |
| // If N0's type does not split or is a sign mask, it does not introduce |
| // add carry. |
| auto TyActn = TLI.getTypeAction(*DAG.getContext(), N0.getValueType()); |
| bool NoAddCarry = TyActn == TargetLoweringBase::TypeLegal || |
| TyActn == TargetLoweringBase::TypePromoteInteger || |
| isMinSignedConstant(N0.getOperand(1)); |
| if (NoAddCarry) |
| return DAG.getNode( |
| ISD::ADD, DL, VT, |
| DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(0)), |
| N0.getOperand(1)); |
| } |
| return SDValue(); |
| }; |
| if (SDValue Add = ReassociateAddOr(N0, N1)) |
| return Add; |
| if (SDValue Add = ReassociateAddOr(N1, N0)) |
| return Add; |
| |
| // Fold add(vecreduce(x), vecreduce(y)) -> vecreduce(add(x, y)) |
| if (SDValue SD = |
| reassociateReduction(ISD::VECREDUCE_ADD, ISD::ADD, DL, VT, N0, N1)) |
| return SD; |
| } |
| |
| SDValue A, B, C, D; |
| |
| // fold ((0-A) + B) -> B-A |
| if (sd_match(N0, m_Neg(m_Value(A)))) |
| return DAG.getNode(ISD::SUB, DL, VT, N1, A); |
| |
| // fold (A + (0-B)) -> A-B |
| if (sd_match(N1, m_Neg(m_Value(B)))) |
| return DAG.getNode(ISD::SUB, DL, VT, N0, B); |
| |
| // fold (A+(B-A)) -> B |
| if (sd_match(N1, m_Sub(m_Value(B), m_Specific(N0)))) |
| return B; |
| |
| // fold ((B-A)+A) -> B |
| if (sd_match(N0, m_Sub(m_Value(B), m_Specific(N1)))) |
| return B; |
| |
| // fold ((A-B)+(C-A)) -> (C-B) |
| if (sd_match(N0, m_Sub(m_Value(A), m_Value(B))) && |
| sd_match(N1, m_Sub(m_Value(C), m_Specific(A)))) |
| return DAG.getNode(ISD::SUB, DL, VT, C, B); |
| |
| // fold ((A-B)+(B-C)) -> (A-C) |
| if (sd_match(N0, m_Sub(m_Value(A), m_Value(B))) && |
| sd_match(N1, m_Sub(m_Specific(B), m_Value(C)))) |
| return DAG.getNode(ISD::SUB, DL, VT, A, C); |
| |
| // fold (A+(B-(A+C))) to (B-C) |
| // fold (A+(B-(C+A))) to (B-C) |
| if (sd_match(N1, m_Sub(m_Value(B), m_Add(m_Specific(N0), m_Value(C))))) |
| return DAG.getNode(ISD::SUB, DL, VT, B, C); |
| |
| // fold (A+((B-A)+or-C)) to (B+or-C) |
| if (sd_match(N1, |
| m_AnyOf(m_Add(m_Sub(m_Value(B), m_Specific(N0)), m_Value(C)), |
| m_Sub(m_Sub(m_Value(B), m_Specific(N0)), m_Value(C))))) |
| return DAG.getNode(N1.getOpcode(), DL, VT, B, C); |
| |
| // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant |
| if (sd_match(N0, m_OneUse(m_Sub(m_Value(A), m_Value(B)))) && |
| sd_match(N1, m_OneUse(m_Sub(m_Value(C), m_Value(D)))) && |
| (isConstantOrConstantVector(A) || isConstantOrConstantVector(C))) |
| return DAG.getNode(ISD::SUB, DL, VT, |
| DAG.getNode(ISD::ADD, SDLoc(N0), VT, A, C), |
| DAG.getNode(ISD::ADD, SDLoc(N1), VT, B, D)); |
| |
| // fold (add (umax X, C), -C) --> (usubsat X, C) |
| if (N0.getOpcode() == ISD::UMAX && hasOperation(ISD::USUBSAT, VT)) { |
| auto MatchUSUBSAT = [](ConstantSDNode *Max, ConstantSDNode *Op) { |
| return (!Max && !Op) || |
| (Max && Op && Max->getAPIntValue() == (-Op->getAPIntValue())); |
| }; |
| if (ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchUSUBSAT, |
| /*AllowUndefs*/ true)) |
| return DAG.getNode(ISD::USUBSAT, DL, VT, N0.getOperand(0), |
| N0.getOperand(1)); |
| } |
| |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| if (isOneOrOneSplat(N1)) { |
| // fold (add (xor a, -1), 1) -> (sub 0, a) |
| if (isBitwiseNot(N0)) |
| return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), |
| N0.getOperand(0)); |
| |
| // fold (add (add (xor a, -1), b), 1) -> (sub b, a) |
| if (N0.getOpcode() == ISD::ADD) { |
| SDValue A, Xor; |
| |
| if (isBitwiseNot(N0.getOperand(0))) { |
| A = N0.getOperand(1); |
| Xor = N0.getOperand(0); |
| } else if (isBitwiseNot(N0.getOperand(1))) { |
| A = N0.getOperand(0); |
| Xor = N0.getOperand(1); |
| } |
| |
| if (Xor) |
| return DAG.getNode(ISD::SUB, DL, VT, A, Xor.getOperand(0)); |
| } |
| |
| // Look for: |
| // add (add x, y), 1 |
| // And if the target does not like this form then turn into: |
| // sub y, (xor x, -1) |
| if (!TLI.preferIncOfAddToSubOfNot(VT) && N0.getOpcode() == ISD::ADD && |
| N0.hasOneUse() && |
| // Limit this to after legalization if the add has wrap flags |
| (Level >= AfterLegalizeDAG || (!N->getFlags().hasNoUnsignedWrap() && |
| !N->getFlags().hasNoSignedWrap()))) { |
| SDValue Not = DAG.getNOT(DL, N0.getOperand(0), VT); |
| return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(1), Not); |
| } |
| } |
| |
| // (x - y) + -1 -> add (xor y, -1), x |
| if (N0.getOpcode() == ISD::SUB && N0.hasOneUse() && |
| isAllOnesOrAllOnesSplat(N1, /*AllowUndefs=*/true)) { |
| SDValue Not = DAG.getNOT(DL, N0.getOperand(1), VT); |
| return DAG.getNode(ISD::ADD, DL, VT, Not, N0.getOperand(0)); |
| } |
| |
| // Fold add(mul(add(A, CA), CM), CB) -> add(mul(A, CM), CM*CA+CB). |
| // This can help if the inner add has multiple uses. |
| APInt CM, CA; |
| if (ConstantSDNode *CB = dyn_cast<ConstantSDNode>(N1)) { |
| if (VT.getScalarSizeInBits() <= 64) { |
| if (sd_match(N0, m_OneUse(m_Mul(m_Add(m_Value(A), m_ConstInt(CA)), |
| m_ConstInt(CM)))) && |
| TLI.isLegalAddImmediate( |
| (CA * CM + CB->getAPIntValue()).getSExtValue())) { |
| SDNodeFlags Flags; |
| // If all the inputs are nuw, the outputs can be nuw. If all the input |
| // are _also_ nsw the outputs can be too. |
| if (N->getFlags().hasNoUnsignedWrap() && |
| N0->getFlags().hasNoUnsignedWrap() && |
| N0.getOperand(0)->getFlags().hasNoUnsignedWrap()) { |
| Flags |= SDNodeFlags::NoUnsignedWrap; |
| if (N->getFlags().hasNoSignedWrap() && |
| N0->getFlags().hasNoSignedWrap() && |
| N0.getOperand(0)->getFlags().hasNoSignedWrap()) |
| Flags |= SDNodeFlags::NoSignedWrap; |
| } |
| SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N1), VT, A, |
| DAG.getConstant(CM, DL, VT), Flags); |
| return DAG.getNode( |
| ISD::ADD, DL, VT, Mul, |
| DAG.getConstant(CA * CM + CB->getAPIntValue(), DL, VT), Flags); |
| } |
| // Also look in case there is an intermediate add. |
| if (sd_match(N0, m_OneUse(m_Add( |
| m_OneUse(m_Mul(m_Add(m_Value(A), m_ConstInt(CA)), |
| m_ConstInt(CM))), |
| m_Value(B)))) && |
| TLI.isLegalAddImmediate( |
| (CA * CM + CB->getAPIntValue()).getSExtValue())) { |
| SDNodeFlags Flags; |
| // If all the inputs are nuw, the outputs can be nuw. If all the input |
| // are _also_ nsw the outputs can be too. |
| SDValue OMul = |
| N0.getOperand(0) == B ? N0.getOperand(1) : N0.getOperand(0); |
| if (N->getFlags().hasNoUnsignedWrap() && |
| N0->getFlags().hasNoUnsignedWrap() && |
| OMul->getFlags().hasNoUnsignedWrap() && |
| OMul.getOperand(0)->getFlags().hasNoUnsignedWrap()) { |
| Flags |= SDNodeFlags::NoUnsignedWrap; |
| if (N->getFlags().hasNoSignedWrap() && |
| N0->getFlags().hasNoSignedWrap() && |
| OMul->getFlags().hasNoSignedWrap() && |
| OMul.getOperand(0)->getFlags().hasNoSignedWrap()) |
| Flags |= SDNodeFlags::NoSignedWrap; |
| } |
| SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N1), VT, A, |
| DAG.getConstant(CM, DL, VT), Flags); |
| SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N1), VT, Mul, B, Flags); |
| return DAG.getNode( |
| ISD::ADD, DL, VT, Add, |
| DAG.getConstant(CA * CM + CB->getAPIntValue(), DL, VT), Flags); |
| } |
| } |
| } |
| |
| if (SDValue Combined = visitADDLikeCommutative(N0, N1, N)) |
| return Combined; |
| |
| if (SDValue Combined = visitADDLikeCommutative(N1, N0, N)) |
| return Combined; |
| |
| return SDValue(); |
| } |
| |
| // Attempt to form avgfloor(A, B) from (A & B) + ((A ^ B) >> 1) |
| SDValue DAGCombiner::foldAddToAvg(SDNode *N, const SDLoc &DL) { |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N0.getValueType(); |
| SDValue A, B; |
| |
| if ((!LegalOperations || hasOperation(ISD::AVGFLOORU, VT)) && |
| sd_match(N, m_Add(m_And(m_Value(A), m_Value(B)), |
| m_Srl(m_Xor(m_Deferred(A), m_Deferred(B)), m_One())))) { |
| return DAG.getNode(ISD::AVGFLOORU, DL, VT, A, B); |
| } |
| if ((!LegalOperations || hasOperation(ISD::AVGFLOORS, VT)) && |
| sd_match(N, m_Add(m_And(m_Value(A), m_Value(B)), |
| m_Sra(m_Xor(m_Deferred(A), m_Deferred(B)), m_One())))) { |
| return DAG.getNode(ISD::AVGFLOORS, DL, VT, A, B); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitADD(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| SDLoc DL(N); |
| |
| if (SDValue Combined = visitADDLike(N)) |
| return Combined; |
| |
| if (SDValue V = foldAddSubBoolOfMaskedVal(N, DL, DAG)) |
| return V; |
| |
| if (SDValue V = foldAddSubOfSignBit(N, DL, DAG)) |
| return V; |
| |
| if (SDValue V = MatchRotate(N0, N1, SDLoc(N), /*FromAdd=*/true)) |
| return V; |
| |
| // Try to match AVGFLOOR fixedwidth pattern |
| if (SDValue V = foldAddToAvg(N, DL)) |
| return V; |
| |
| // fold (a+b) -> (a|b) iff a and b share no bits. |
| if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) && |
| DAG.haveNoCommonBitsSet(N0, N1)) |
| return DAG.getNode(ISD::OR, DL, VT, N0, N1, SDNodeFlags::Disjoint); |
| |
| // Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)). |
| if (N0.getOpcode() == ISD::VSCALE && N1.getOpcode() == ISD::VSCALE) { |
| const APInt &C0 = N0->getConstantOperandAPInt(0); |
| const APInt &C1 = N1->getConstantOperandAPInt(0); |
| return DAG.getVScale(DL, VT, C0 + C1); |
| } |
| |
| // fold a+vscale(c1)+vscale(c2) -> a+vscale(c1+c2) |
| if (N0.getOpcode() == ISD::ADD && |
| N0.getOperand(1).getOpcode() == ISD::VSCALE && |
| N1.getOpcode() == ISD::VSCALE) { |
| const APInt &VS0 = N0.getOperand(1)->getConstantOperandAPInt(0); |
| const APInt &VS1 = N1->getConstantOperandAPInt(0); |
| SDValue VS = DAG.getVScale(DL, VT, VS0 + VS1); |
| return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), VS); |
| } |
| |
| // Fold (add step_vector(c1), step_vector(c2) to step_vector(c1+c2)) |
| if (N0.getOpcode() == ISD::STEP_VECTOR && |
| N1.getOpcode() == ISD::STEP_VECTOR) { |
| const APInt &C0 = N0->getConstantOperandAPInt(0); |
| const APInt &C1 = N1->getConstantOperandAPInt(0); |
| APInt NewStep = C0 + C1; |
| return DAG.getStepVector(DL, VT, NewStep); |
| } |
| |
| // Fold a + step_vector(c1) + step_vector(c2) to a + step_vector(c1+c2) |
| if (N0.getOpcode() == ISD::ADD && |
| N0.getOperand(1).getOpcode() == ISD::STEP_VECTOR && |
| N1.getOpcode() == ISD::STEP_VECTOR) { |
| const APInt &SV0 = N0.getOperand(1)->getConstantOperandAPInt(0); |
| const APInt &SV1 = N1->getConstantOperandAPInt(0); |
| APInt NewStep = SV0 + SV1; |
| SDValue SV = DAG.getStepVector(DL, VT, NewStep); |
| return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), SV); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitADDSAT(SDNode *N) { |
| unsigned Opcode = N->getOpcode(); |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| bool IsSigned = Opcode == ISD::SADDSAT; |
| SDLoc DL(N); |
| |
| // fold (add_sat x, undef) -> -1 |
| if (N0.isUndef() || N1.isUndef()) |
| return DAG.getAllOnesConstant(DL, VT); |
| |
| // fold (add_sat c1, c2) -> c3 |
| if (SDValue C = DAG.FoldConstantArithmetic(Opcode, DL, VT, {N0, N1})) |
| return C; |
| |
| // canonicalize constant to RHS |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(Opcode, DL, VT, N1, N0); |
| |
| // fold vector ops |
| if (VT.isVector()) { |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (add_sat x, 0) -> x, vector edition |
| if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) |
| return N0; |
| } |
| |
| // fold (add_sat x, 0) -> x |
| if (isNullConstant(N1)) |
| return N0; |
| |
| // If it cannot overflow, transform into an add. |
| if (DAG.willNotOverflowAdd(IsSigned, N0, N1)) |
| return DAG.getNode(ISD::ADD, DL, VT, N0, N1); |
| |
| return SDValue(); |
| } |
| |
| static SDValue getAsCarry(const TargetLowering &TLI, SDValue V, |
| bool ForceCarryReconstruction = false) { |
| bool Masked = false; |
| |
| // First, peel away TRUNCATE/ZERO_EXTEND/AND nodes due to legalization. |
| while (true) { |
| if (V.getOpcode() == ISD::TRUNCATE || V.getOpcode() == ISD::ZERO_EXTEND) { |
| V = V.getOperand(0); |
| continue; |
| } |
| |
| if (V.getOpcode() == ISD::AND && isOneConstant(V.getOperand(1))) { |
| if (ForceCarryReconstruction) |
| return V; |
| |
| Masked = true; |
| V = V.getOperand(0); |
| continue; |
| } |
| |
| if (ForceCarryReconstruction && V.getValueType() == MVT::i1) |
| return V; |
| |
| break; |
| } |
| |
| // If this is not a carry, return. |
| if (V.getResNo() != 1) |
| return SDValue(); |
| |
| if (V.getOpcode() != ISD::UADDO_CARRY && V.getOpcode() != ISD::USUBO_CARRY && |
| V.getOpcode() != ISD::UADDO && V.getOpcode() != ISD::USUBO) |
| return SDValue(); |
| |
| EVT VT = V->getValueType(0); |
| if (!TLI.isOperationLegalOrCustom(V.getOpcode(), VT)) |
| return SDValue(); |
| |
| // If the result is masked, then no matter what kind of bool it is we can |
| // return. If it isn't, then we need to make sure the bool type is either 0 or |
| // 1 and not other values. |
| if (Masked || |
| TLI.getBooleanContents(V.getValueType()) == |
| TargetLoweringBase::ZeroOrOneBooleanContent) |
| return V; |
| |
| return SDValue(); |
| } |
| |
| /// Given the operands of an add/sub operation, see if the 2nd operand is a |
| /// masked 0/1 whose source operand is actually known to be 0/-1. If so, invert |
| /// the opcode and bypass the mask operation. |
| static SDValue foldAddSubMasked1(bool IsAdd, SDValue N0, SDValue N1, |
| SelectionDAG &DAG, const SDLoc &DL) { |
| if (N1.getOpcode() == ISD::ZERO_EXTEND) |
| N1 = N1.getOperand(0); |
| |
| if (N1.getOpcode() != ISD::AND || !isOneOrOneSplat(N1->getOperand(1))) |
| return SDValue(); |
| |
| EVT VT = N0.getValueType(); |
| SDValue N10 = N1.getOperand(0); |
| if (N10.getValueType() != VT && N10.getOpcode() == ISD::TRUNCATE) |
| N10 = N10.getOperand(0); |
| |
| if (N10.getValueType() != VT) |
| return SDValue(); |
| |
| if (DAG.ComputeNumSignBits(N10) != VT.getScalarSizeInBits()) |
| return SDValue(); |
| |
| // add N0, (and (AssertSext X, i1), 1) --> sub N0, X |
| // sub N0, (and (AssertSext X, i1), 1) --> add N0, X |
| return DAG.getNode(IsAdd ? ISD::SUB : ISD::ADD, DL, VT, N0, N10); |
| } |
| |
| /// Helper for doing combines based on N0 and N1 being added to each other. |
| SDValue DAGCombiner::visitADDLikeCommutative(SDValue N0, SDValue N1, |
| SDNode *LocReference) { |
| EVT VT = N0.getValueType(); |
| SDLoc DL(LocReference); |
| |
| // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n)) |
| SDValue Y, N; |
| if (sd_match(N1, m_Shl(m_Neg(m_Value(Y)), m_Value(N)))) |
| return DAG.getNode(ISD::SUB, DL, VT, N0, |
| DAG.getNode(ISD::SHL, DL, VT, Y, N)); |
| |
| if (SDValue V = foldAddSubMasked1(true, N0, N1, DAG, DL)) |
| return V; |
| |
| // Look for: |
| // add (add x, 1), y |
| // And if the target does not like this form then turn into: |
| // sub y, (xor x, -1) |
| if (!TLI.preferIncOfAddToSubOfNot(VT) && N0.getOpcode() == ISD::ADD && |
| N0.hasOneUse() && isOneOrOneSplat(N0.getOperand(1)) && |
| // Limit this to after legalization if the add has wrap flags |
| (Level >= AfterLegalizeDAG || (!N0->getFlags().hasNoUnsignedWrap() && |
| !N0->getFlags().hasNoSignedWrap()))) { |
| SDValue Not = DAG.getNOT(DL, N0.getOperand(0), VT); |
| return DAG.getNode(ISD::SUB, DL, VT, N1, Not); |
| } |
| |
| if (N0.getOpcode() == ISD::SUB && N0.hasOneUse()) { |
| // Hoist one-use subtraction by non-opaque constant: |
| // (x - C) + y -> (x + y) - C |
| // This is necessary because SUB(X,C) -> ADD(X,-C) doesn't work for vectors. |
| if (isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) { |
| SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), N1); |
| return DAG.getNode(ISD::SUB, DL, VT, Add, N0.getOperand(1)); |
| } |
| // Hoist one-use subtraction from non-opaque constant: |
| // (C - x) + y -> (y - x) + C |
| if (isConstantOrConstantVector(N0.getOperand(0), /*NoOpaques=*/true)) { |
| SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1)); |
| return DAG.getNode(ISD::ADD, DL, VT, Sub, N0.getOperand(0)); |
| } |
| } |
| |
| // add (mul x, C), x -> mul x, C+1 |
| if (N0.getOpcode() == ISD::MUL && N0.getOperand(0) == N1 && |
| isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true) && |
| N0.hasOneUse()) { |
| SDValue NewC = DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(1), |
| DAG.getConstant(1, DL, VT)); |
| return DAG.getNode(ISD::MUL, DL, VT, N0.getOperand(0), NewC); |
| } |
| |
| // If the target's bool is represented as 0/1, prefer to make this 'sub 0/1' |
| // rather than 'add 0/-1' (the zext should get folded). |
| // add (sext i1 Y), X --> sub X, (zext i1 Y) |
| if (N0.getOpcode() == ISD::SIGN_EXTEND && |
| N0.getOperand(0).getScalarValueSizeInBits() == 1 && |
| TLI.getBooleanContents(VT) == TargetLowering::ZeroOrOneBooleanContent) { |
| SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)); |
| return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt); |
| } |
| |
| // add X, (sextinreg Y i1) -> sub X, (and Y 1) |
| if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) { |
| VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1)); |
| if (TN->getVT() == MVT::i1) { |
| SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0), |
| DAG.getConstant(1, DL, VT)); |
| return DAG.getNode(ISD::SUB, DL, VT, N0, ZExt); |
| } |
| } |
| |
| // (add X, (uaddo_carry Y, 0, Carry)) -> (uaddo_carry X, Y, Carry) |
| if (N1.getOpcode() == ISD::UADDO_CARRY && isNullConstant(N1.getOperand(1)) && |
| N1.getResNo() == 0) |
| return DAG.getNode(ISD::UADDO_CARRY, DL, N1->getVTList(), |
| N0, N1.getOperand(0), N1.getOperand(2)); |
| |
| // (add X, Carry) -> (uaddo_carry X, 0, Carry) |
| if (TLI.isOperationLegalOrCustom(ISD::UADDO_CARRY, VT)) |
| if (SDValue Carry = getAsCarry(TLI, N1)) |
| return DAG.getNode(ISD::UADDO_CARRY, DL, |
| DAG.getVTList(VT, Carry.getValueType()), N0, |
| DAG.getConstant(0, DL, VT), Carry); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitADDC(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| SDLoc DL(N); |
| |
| // If the flag result is dead, turn this into an ADD. |
| if (!N->hasAnyUseOfValue(1)) |
| return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1), |
| DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); |
| |
| // canonicalize constant to RHS. |
| ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); |
| ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); |
| if (N0C && !N1C) |
| return DAG.getNode(ISD::ADDC, DL, N->getVTList(), N1, N0); |
| |
| // fold (addc x, 0) -> x + no carry out |
| if (isNullConstant(N1)) |
| return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, |
| DL, MVT::Glue)); |
| |
| // If it cannot overflow, transform into an add. |
| if (DAG.computeOverflowForUnsignedAdd(N0, N1) == SelectionDAG::OFK_Never) |
| return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1), |
| DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); |
| |
| return SDValue(); |
| } |
| |
| /** |
| * Flips a boolean if it is cheaper to compute. If the Force parameters is set, |
| * then the flip also occurs if computing the inverse is the same cost. |
| * This function returns an empty SDValue in case it cannot flip the boolean |
| * without increasing the cost of the computation. If you want to flip a boolean |
| * no matter what, use DAG.getLogicalNOT. |
| */ |
| static SDValue extractBooleanFlip(SDValue V, SelectionDAG &DAG, |
| const TargetLowering &TLI, |
| bool Force) { |
| if (Force && isa<ConstantSDNode>(V)) |
| return DAG.getLogicalNOT(SDLoc(V), V, V.getValueType()); |
| |
| if (V.getOpcode() != ISD::XOR) |
| return SDValue(); |
| |
| if (DAG.isBoolConstant(V.getOperand(1)) == true) |
| return V.getOperand(0); |
| if (Force && isConstOrConstSplat(V.getOperand(1), false)) |
| return DAG.getLogicalNOT(SDLoc(V), V, V.getValueType()); |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitADDO(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| bool IsSigned = (ISD::SADDO == N->getOpcode()); |
| |
| EVT CarryVT = N->getValueType(1); |
| SDLoc DL(N); |
| |
| // If the flag result is dead, turn this into an ADD. |
| if (!N->hasAnyUseOfValue(1)) |
| return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1), |
| DAG.getUNDEF(CarryVT)); |
| |
| // canonicalize constant to RHS. |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(N->getOpcode(), DL, N->getVTList(), N1, N0); |
| |
| // fold (addo x, 0) -> x + no carry out |
| if (isNullOrNullSplat(N1)) |
| return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT)); |
| |
| // If it cannot overflow, transform into an add. |
| if (DAG.willNotOverflowAdd(IsSigned, N0, N1)) |
| return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1), |
| DAG.getConstant(0, DL, CarryVT)); |
| |
| if (IsSigned) { |
| // fold (saddo (xor a, -1), 1) -> (ssub 0, a). |
| if (isBitwiseNot(N0) && isOneOrOneSplat(N1)) |
| return DAG.getNode(ISD::SSUBO, DL, N->getVTList(), |
| DAG.getConstant(0, DL, VT), N0.getOperand(0)); |
| } else { |
| // fold (uaddo (xor a, -1), 1) -> (usub 0, a) and flip carry. |
| if (isBitwiseNot(N0) && isOneOrOneSplat(N1)) { |
| SDValue Sub = DAG.getNode(ISD::USUBO, DL, N->getVTList(), |
| DAG.getConstant(0, DL, VT), N0.getOperand(0)); |
| return CombineTo( |
| N, Sub, DAG.getLogicalNOT(DL, Sub.getValue(1), Sub->getValueType(1))); |
| } |
| |
| if (SDValue Combined = visitUADDOLike(N0, N1, N)) |
| return Combined; |
| |
| if (SDValue Combined = visitUADDOLike(N1, N0, N)) |
| return Combined; |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitUADDOLike(SDValue N0, SDValue N1, SDNode *N) { |
| EVT VT = N0.getValueType(); |
| if (VT.isVector()) |
| return SDValue(); |
| |
| // (uaddo X, (uaddo_carry Y, 0, Carry)) -> (uaddo_carry X, Y, Carry) |
| // If Y + 1 cannot overflow. |
| if (N1.getOpcode() == ISD::UADDO_CARRY && isNullConstant(N1.getOperand(1))) { |
| SDValue Y = N1.getOperand(0); |
| SDValue One = DAG.getConstant(1, SDLoc(N), Y.getValueType()); |
| if (DAG.computeOverflowForUnsignedAdd(Y, One) == SelectionDAG::OFK_Never) |
| return DAG.getNode(ISD::UADDO_CARRY, SDLoc(N), N->getVTList(), N0, Y, |
| N1.getOperand(2)); |
| } |
| |
| // (uaddo X, Carry) -> (uaddo_carry X, 0, Carry) |
| if (TLI.isOperationLegalOrCustom(ISD::UADDO_CARRY, VT)) |
| if (SDValue Carry = getAsCarry(TLI, N1)) |
| return DAG.getNode(ISD::UADDO_CARRY, SDLoc(N), N->getVTList(), N0, |
| DAG.getConstant(0, SDLoc(N), VT), Carry); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitADDE(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue CarryIn = N->getOperand(2); |
| |
| // canonicalize constant to RHS |
| ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); |
| ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); |
| if (N0C && !N1C) |
| return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(), |
| N1, N0, CarryIn); |
| |
| // fold (adde x, y, false) -> (addc x, y) |
| if (CarryIn.getOpcode() == ISD::CARRY_FALSE) |
| return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitUADDO_CARRY(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue CarryIn = N->getOperand(2); |
| SDLoc DL(N); |
| |
| // canonicalize constant to RHS |
| ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); |
| ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); |
| if (N0C && !N1C) |
| return DAG.getNode(ISD::UADDO_CARRY, DL, N->getVTList(), N1, N0, CarryIn); |
| |
| // fold (uaddo_carry x, y, false) -> (uaddo x, y) |
| if (isNullConstant(CarryIn)) { |
| if (!LegalOperations || |
| TLI.isOperationLegalOrCustom(ISD::UADDO, N->getValueType(0))) |
| return DAG.getNode(ISD::UADDO, DL, N->getVTList(), N0, N1); |
| } |
| |
| // fold (uaddo_carry 0, 0, X) -> (and (ext/trunc X), 1) and no carry. |
| if (isNullConstant(N0) && isNullConstant(N1)) { |
| EVT VT = N0.getValueType(); |
| EVT CarryVT = CarryIn.getValueType(); |
| SDValue CarryExt = DAG.getBoolExtOrTrunc(CarryIn, DL, VT, CarryVT); |
| AddToWorklist(CarryExt.getNode()); |
| return CombineTo(N, DAG.getNode(ISD::AND, DL, VT, CarryExt, |
| DAG.getConstant(1, DL, VT)), |
| DAG.getConstant(0, DL, CarryVT)); |
| } |
| |
| if (SDValue Combined = visitUADDO_CARRYLike(N0, N1, CarryIn, N)) |
| return Combined; |
| |
| if (SDValue Combined = visitUADDO_CARRYLike(N1, N0, CarryIn, N)) |
| return Combined; |
| |
| // We want to avoid useless duplication. |
| // TODO: This is done automatically for binary operations. As UADDO_CARRY is |
| // not a binary operation, this is not really possible to leverage this |
| // existing mechanism for it. However, if more operations require the same |
| // deduplication logic, then it may be worth generalize. |
| SDValue Ops[] = {N1, N0, CarryIn}; |
| SDNode *CSENode = |
| DAG.getNodeIfExists(ISD::UADDO_CARRY, N->getVTList(), Ops, N->getFlags()); |
| if (CSENode) |
| return SDValue(CSENode, 0); |
| |
| return SDValue(); |
| } |
| |
| /** |
| * If we are facing some sort of diamond carry propagation pattern try to |
| * break it up to generate something like: |
| * (uaddo_carry X, 0, (uaddo_carry A, B, Z):Carry) |
| * |
| * The end result is usually an increase in operation required, but because the |
| * carry is now linearized, other transforms can kick in and optimize the DAG. |
| * |
| * Patterns typically look something like |
| * (uaddo A, B) |
| * / \ |
| * Carry Sum |
| * | \ |
| * | (uaddo_carry *, 0, Z) |
| * | / |
| * \ Carry |
| * | / |
| * (uaddo_carry X, *, *) |
| * |
| * But numerous variation exist. Our goal is to identify A, B, X and Z and |
| * produce a combine with a single path for carry propagation. |
| */ |
| static SDValue combineUADDO_CARRYDiamond(DAGCombiner &Combiner, |
| SelectionDAG &DAG, SDValue X, |
| SDValue Carry0, SDValue Carry1, |
| SDNode *N) { |
| if (Carry1.getResNo() != 1 || Carry0.getResNo() != 1) |
| return SDValue(); |
| if (Carry1.getOpcode() != ISD::UADDO) |
| return SDValue(); |
| |
| SDValue Z; |
| |
| /** |
| * First look for a suitable Z. It will present itself in the form of |
| * (uaddo_carry Y, 0, Z) or its equivalent (uaddo Y, 1) for Z=true |
| */ |
| if (Carry0.getOpcode() == ISD::UADDO_CARRY && |
| isNullConstant(Carry0.getOperand(1))) { |
| Z = Carry0.getOperand(2); |
| } else if (Carry0.getOpcode() == ISD::UADDO && |
| isOneConstant(Carry0.getOperand(1))) { |
| EVT VT = Carry0->getValueType(1); |
| Z = DAG.getConstant(1, SDLoc(Carry0.getOperand(1)), VT); |
| } else { |
| // We couldn't find a suitable Z. |
| return SDValue(); |
| } |
| |
| |
| auto cancelDiamond = [&](SDValue A,SDValue B) { |
| SDLoc DL(N); |
| SDValue NewY = |
| DAG.getNode(ISD::UADDO_CARRY, DL, Carry0->getVTList(), A, B, Z); |
| Combiner.AddToWorklist(NewY.getNode()); |
| return DAG.getNode(ISD::UADDO_CARRY, DL, N->getVTList(), X, |
| DAG.getConstant(0, DL, X.getValueType()), |
| NewY.getValue(1)); |
| }; |
| |
| /** |
| * (uaddo A, B) |
| * | |
| * Sum |
| * | |
| * (uaddo_carry *, 0, Z) |
| */ |
| if (Carry0.getOperand(0) == Carry1.getValue(0)) { |
| return cancelDiamond(Carry1.getOperand(0), Carry1.getOperand(1)); |
| } |
| |
| /** |
| * (uaddo_carry A, 0, Z) |
| * | |
| * Sum |
| * | |
| * (uaddo *, B) |
| */ |
| if (Carry1.getOperand(0) == Carry0.getValue(0)) { |
| return cancelDiamond(Carry0.getOperand(0), Carry1.getOperand(1)); |
| } |
| |
| if (Carry1.getOperand(1) == Carry0.getValue(0)) { |
| return cancelDiamond(Carry1.getOperand(0), Carry0.getOperand(0)); |
| } |
| |
| return SDValue(); |
| } |
| |
| // If we are facing some sort of diamond carry/borrow in/out pattern try to |
| // match patterns like: |
| // |
| // (uaddo A, B) CarryIn |
| // | \ | |
| // | \ | |
| // PartialSum PartialCarryOutX / |
| // | | / |
| // | ____|____________/ |
| // | / | |
| // (uaddo *, *) \________ |
| // | \ \ |
| // | \ | |
| // | PartialCarryOutY | |
| // | \ | |
| // | \ / |
| // AddCarrySum | ______/ |
| // | / |
| // CarryOut = (or *, *) |
| // |
| // And generate UADDO_CARRY (or USUBO_CARRY) with two result values: |
| // |
| // {AddCarrySum, CarryOut} = (uaddo_carry A, B, CarryIn) |
| // |
| // Our goal is to identify A, B, and CarryIn and produce UADDO_CARRY/USUBO_CARRY |
| // with a single path for carry/borrow out propagation. |
| static SDValue combineCarryDiamond(SelectionDAG &DAG, const TargetLowering &TLI, |
| SDValue N0, SDValue N1, SDNode *N) { |
| SDValue Carry0 = getAsCarry(TLI, N0); |
| if (!Carry0) |
| return SDValue(); |
| SDValue Carry1 = getAsCarry(TLI, N1); |
| if (!Carry1) |
| return SDValue(); |
| |
| unsigned Opcode = Carry0.getOpcode(); |
| if (Opcode != Carry1.getOpcode()) |
| return SDValue(); |
| if (Opcode != ISD::UADDO && Opcode != ISD::USUBO) |
| return SDValue(); |
| // Guarantee identical type of CarryOut |
| EVT CarryOutType = N->getValueType(0); |
| if (CarryOutType != Carry0.getValue(1).getValueType() || |
| CarryOutType != Carry1.getValue(1).getValueType()) |
| return SDValue(); |
| |
| // Canonicalize the add/sub of A and B (the top node in the above ASCII art) |
| // as Carry0 and the add/sub of the carry in as Carry1 (the middle node). |
| if (Carry1.getNode()->isOperandOf(Carry0.getNode())) |
| std::swap(Carry0, Carry1); |
| |
| // Check if nodes are connected in expected way. |
| if (Carry1.getOperand(0) != Carry0.getValue(0) && |
| Carry1.getOperand(1) != Carry0.getValue(0)) |
| return SDValue(); |
| |
| // The carry in value must be on the righthand side for subtraction. |
| unsigned CarryInOperandNum = |
| Carry1.getOperand(0) == Carry0.getValue(0) ? 1 : 0; |
| if (Opcode == ISD::USUBO && CarryInOperandNum != 1) |
| return SDValue(); |
| SDValue CarryIn = Carry1.getOperand(CarryInOperandNum); |
| |
| unsigned NewOp = Opcode == ISD::UADDO ? ISD::UADDO_CARRY : ISD::USUBO_CARRY; |
| if (!TLI.isOperationLegalOrCustom(NewOp, Carry0.getValue(0).getValueType())) |
| return SDValue(); |
| |
| // Verify that the carry/borrow in is plausibly a carry/borrow bit. |
| CarryIn = getAsCarry(TLI, CarryIn, true); |
| if (!CarryIn) |
| return SDValue(); |
| |
| SDLoc DL(N); |
| CarryIn = DAG.getBoolExtOrTrunc(CarryIn, DL, Carry1->getValueType(1), |
| Carry1->getValueType(0)); |
| SDValue Merged = |
| DAG.getNode(NewOp, DL, Carry1->getVTList(), Carry0.getOperand(0), |
| Carry0.getOperand(1), CarryIn); |
| |
| // Please note that because we have proven that the result of the UADDO/USUBO |
| // of A and B feeds into the UADDO/USUBO that does the carry/borrow in, we can |
| // therefore prove that if the first UADDO/USUBO overflows, the second |
| // UADDO/USUBO cannot. For example consider 8-bit numbers where 0xFF is the |
| // maximum value. |
| // |
| // 0xFF + 0xFF == 0xFE with carry but 0xFE + 1 does not carry |
| // 0x00 - 0xFF == 1 with a carry/borrow but 1 - 1 == 0 (no carry/borrow) |
| // |
| // This is important because it means that OR and XOR can be used to merge |
| // carry flags; and that AND can return a constant zero. |
| // |
| // TODO: match other operations that can merge flags (ADD, etc) |
| DAG.ReplaceAllUsesOfValueWith(Carry1.getValue(0), Merged.getValue(0)); |
| if (N->getOpcode() == ISD::AND) |
| return DAG.getConstant(0, DL, CarryOutType); |
| return Merged.getValue(1); |
| } |
| |
| SDValue DAGCombiner::visitUADDO_CARRYLike(SDValue N0, SDValue N1, |
| SDValue CarryIn, SDNode *N) { |
| // fold (uaddo_carry (xor a, -1), b, c) -> (usubo_carry b, a, !c) and flip |
| // carry. |
| if (isBitwiseNot(N0)) |
| if (SDValue NotC = extractBooleanFlip(CarryIn, DAG, TLI, true)) { |
| SDLoc DL(N); |
| SDValue Sub = DAG.getNode(ISD::USUBO_CARRY, DL, N->getVTList(), N1, |
| N0.getOperand(0), NotC); |
| return CombineTo( |
| N, Sub, DAG.getLogicalNOT(DL, Sub.getValue(1), Sub->getValueType(1))); |
| } |
| |
| // Iff the flag result is dead: |
| // (uaddo_carry (add|uaddo X, Y), 0, Carry) -> (uaddo_carry X, Y, Carry) |
| // Don't do this if the Carry comes from the uaddo. It won't remove the uaddo |
| // or the dependency between the instructions. |
| if ((N0.getOpcode() == ISD::ADD || |
| (N0.getOpcode() == ISD::UADDO && N0.getResNo() == 0 && |
| N0.getValue(1) != CarryIn)) && |
| isNullConstant(N1) && !N->hasAnyUseOfValue(1)) |
| return DAG.getNode(ISD::UADDO_CARRY, SDLoc(N), N->getVTList(), |
| N0.getOperand(0), N0.getOperand(1), CarryIn); |
| |
| /** |
| * When one of the uaddo_carry argument is itself a carry, we may be facing |
| * a diamond carry propagation. In which case we try to transform the DAG |
| * to ensure linear carry propagation if that is possible. |
| */ |
| if (auto Y = getAsCarry(TLI, N1)) { |
| // Because both are carries, Y and Z can be swapped. |
| if (auto R = combineUADDO_CARRYDiamond(*this, DAG, N0, Y, CarryIn, N)) |
| return R; |
| if (auto R = combineUADDO_CARRYDiamond(*this, DAG, N0, CarryIn, Y, N)) |
| return R; |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSADDO_CARRYLike(SDValue N0, SDValue N1, |
| SDValue CarryIn, SDNode *N) { |
| // fold (saddo_carry (xor a, -1), b, c) -> (ssubo_carry b, a, !c) |
| if (isBitwiseNot(N0)) { |
| if (SDValue NotC = extractBooleanFlip(CarryIn, DAG, TLI, true)) |
| return DAG.getNode(ISD::SSUBO_CARRY, SDLoc(N), N->getVTList(), N1, |
| N0.getOperand(0), NotC); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSADDO_CARRY(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue CarryIn = N->getOperand(2); |
| SDLoc DL(N); |
| |
| // canonicalize constant to RHS |
| ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); |
| ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); |
| if (N0C && !N1C) |
| return DAG.getNode(ISD::SADDO_CARRY, DL, N->getVTList(), N1, N0, CarryIn); |
| |
| // fold (saddo_carry x, y, false) -> (saddo x, y) |
| if (isNullConstant(CarryIn)) { |
| if (!LegalOperations || |
| TLI.isOperationLegalOrCustom(ISD::SADDO, N->getValueType(0))) |
| return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N0, N1); |
| } |
| |
| if (SDValue Combined = visitSADDO_CARRYLike(N0, N1, CarryIn, N)) |
| return Combined; |
| |
| if (SDValue Combined = visitSADDO_CARRYLike(N1, N0, CarryIn, N)) |
| return Combined; |
| |
| return SDValue(); |
| } |
| |
| // Attempt to create a USUBSAT(LHS, RHS) node with DstVT, performing a |
| // clamp/truncation if necessary. |
| static SDValue getTruncatedUSUBSAT(EVT DstVT, EVT SrcVT, SDValue LHS, |
| SDValue RHS, SelectionDAG &DAG, |
| const SDLoc &DL) { |
| assert(DstVT.getScalarSizeInBits() <= SrcVT.getScalarSizeInBits() && |
| "Illegal truncation"); |
| |
| if (DstVT == SrcVT) |
| return DAG.getNode(ISD::USUBSAT, DL, DstVT, LHS, RHS); |
| |
| // If the LHS is zero-extended then we can perform the USUBSAT as DstVT by |
| // clamping RHS. |
| APInt UpperBits = APInt::getBitsSetFrom(SrcVT.getScalarSizeInBits(), |
| DstVT.getScalarSizeInBits()); |
| if (!DAG.MaskedValueIsZero(LHS, UpperBits)) |
| return SDValue(); |
| |
| SDValue SatLimit = |
| DAG.getConstant(APInt::getLowBitsSet(SrcVT.getScalarSizeInBits(), |
| DstVT.getScalarSizeInBits()), |
| DL, SrcVT); |
| RHS = DAG.getNode(ISD::UMIN, DL, SrcVT, RHS, SatLimit); |
| RHS = DAG.getNode(ISD::TRUNCATE, DL, DstVT, RHS); |
| LHS = DAG.getNode(ISD::TRUNCATE, DL, DstVT, LHS); |
| return DAG.getNode(ISD::USUBSAT, DL, DstVT, LHS, RHS); |
| } |
| |
| // Try to find umax(a,b) - b or a - umin(a,b) patterns that may be converted to |
| // usubsat(a,b), optionally as a truncated type. |
| SDValue DAGCombiner::foldSubToUSubSat(EVT DstVT, SDNode *N, const SDLoc &DL) { |
| if (N->getOpcode() != ISD::SUB || |
| !(!LegalOperations || hasOperation(ISD::USUBSAT, DstVT))) |
| return SDValue(); |
| |
| EVT SubVT = N->getValueType(0); |
| SDValue Op0 = N->getOperand(0); |
| SDValue Op1 = N->getOperand(1); |
| |
| // Try to find umax(a,b) - b or a - umin(a,b) patterns |
| // they may be converted to usubsat(a,b). |
| if (Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) { |
| SDValue MaxLHS = Op0.getOperand(0); |
| SDValue MaxRHS = Op0.getOperand(1); |
| if (MaxLHS == Op1) |
| return getTruncatedUSUBSAT(DstVT, SubVT, MaxRHS, Op1, DAG, DL); |
| if (MaxRHS == Op1) |
| return getTruncatedUSUBSAT(DstVT, SubVT, MaxLHS, Op1, DAG, DL); |
| } |
| |
| if (Op1.getOpcode() == ISD::UMIN && Op1.hasOneUse()) { |
| SDValue MinLHS = Op1.getOperand(0); |
| SDValue MinRHS = Op1.getOperand(1); |
| if (MinLHS == Op0) |
| return getTruncatedUSUBSAT(DstVT, SubVT, Op0, MinRHS, DAG, DL); |
| if (MinRHS == Op0) |
| return getTruncatedUSUBSAT(DstVT, SubVT, Op0, MinLHS, DAG, DL); |
| } |
| |
| // sub(a,trunc(umin(zext(a),b))) -> usubsat(a,trunc(umin(b,SatLimit))) |
| if (Op1.getOpcode() == ISD::TRUNCATE && |
| Op1.getOperand(0).getOpcode() == ISD::UMIN && |
| Op1.getOperand(0).hasOneUse()) { |
| SDValue MinLHS = Op1.getOperand(0).getOperand(0); |
| SDValue MinRHS = Op1.getOperand(0).getOperand(1); |
| if (MinLHS.getOpcode() == ISD::ZERO_EXTEND && MinLHS.getOperand(0) == Op0) |
| return getTruncatedUSUBSAT(DstVT, MinLHS.getValueType(), MinLHS, MinRHS, |
| DAG, DL); |
| if (MinRHS.getOpcode() == ISD::ZERO_EXTEND && MinRHS.getOperand(0) == Op0) |
| return getTruncatedUSUBSAT(DstVT, MinLHS.getValueType(), MinRHS, MinLHS, |
| DAG, DL); |
| } |
| |
| return SDValue(); |
| } |
| |
| // Refinement of DAG/Type Legalisation (promotion) when CTLZ is used for |
| // counting leading ones. Broadly, it replaces the substraction with a left |
| // shift. |
| // |
| // * DAG Legalisation Pattern: |
| // |
| // (sub (ctlz (zeroextend (not Src))) |
| // BitWidthDiff) |
| // |
| // if BitWidthDiff == BitWidth(Node) - BitWidth(Src) |
| // --> |
| // |
| // (ctlz_zero_undef (not (shl (anyextend Src) |
| // BitWidthDiff))) |
| // |
| // * Type Legalisation Pattern: |
| // |
| // (sub (ctlz (and (xor Src XorMask) |
| // AndMask)) |
| // BitWidthDiff) |
| // |
| // if AndMask has only trailing ones |
| // and MaskBitWidth(AndMask) == BitWidth(Node) - BitWidthDiff |
| // and XorMask has more trailing ones than AndMask |
| // --> |
| // |
| // (ctlz_zero_undef (not (shl Src BitWidthDiff))) |
| template <class MatchContextClass> |
| static SDValue foldSubCtlzNot(SDNode *N, SelectionDAG &DAG) { |
| const SDLoc DL(N); |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N0.getValueType(); |
| unsigned BitWidth = VT.getScalarSizeInBits(); |
| |
| MatchContextClass Matcher(DAG, DAG.getTargetLoweringInfo(), N); |
| |
| APInt AndMask; |
| APInt XorMask; |
| APInt BitWidthDiff; |
| |
| SDValue CtlzOp; |
| SDValue Src; |
| |
| if (!sd_context_match( |
| N, Matcher, m_Sub(m_Ctlz(m_Value(CtlzOp)), m_ConstInt(BitWidthDiff)))) |
| return SDValue(); |
| |
| if (sd_context_match(CtlzOp, Matcher, m_ZExt(m_Not(m_Value(Src))))) { |
| // DAG Legalisation Pattern: |
| // (sub (ctlz (zero_extend (not Op)) BitWidthDiff)) |
| if ((BitWidth - Src.getValueType().getScalarSizeInBits()) != BitWidthDiff) |
| return SDValue(); |
| |
| Src = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Src); |
| } else if (sd_context_match(CtlzOp, Matcher, |
| m_And(m_Xor(m_Value(Src), m_ConstInt(XorMask)), |
| m_ConstInt(AndMask)))) { |
| // Type Legalisation Pattern: |
| // (sub (ctlz (and (xor Op XorMask) AndMask)) BitWidthDiff) |
| unsigned AndMaskWidth = BitWidth - BitWidthDiff.getZExtValue(); |
| if (!(AndMask.isMask(AndMaskWidth) && XorMask.countr_one() >= AndMaskWidth)) |
| return SDValue(); |
| } else |
| return SDValue(); |
| |
| SDValue ShiftConst = DAG.getShiftAmountConstant(BitWidthDiff, VT, DL); |
| SDValue LShift = Matcher.getNode(ISD::SHL, DL, VT, Src, ShiftConst); |
| SDValue Not = |
| Matcher.getNode(ISD::XOR, DL, VT, LShift, DAG.getAllOnesConstant(DL, VT)); |
| |
| return Matcher.getNode(ISD::CTLZ_ZERO_UNDEF, DL, VT, Not); |
| } |
| |
| // Fold sub(x, mul(divrem(x,y)[0], y)) to divrem(x, y)[1] |
| static SDValue foldRemainderIdiom(SDNode *N, SelectionDAG &DAG, |
| const SDLoc &DL) { |
| assert(N->getOpcode() == ISD::SUB && "Node must be a SUB"); |
| SDValue Sub0 = N->getOperand(0); |
| SDValue Sub1 = N->getOperand(1); |
| |
| auto CheckAndFoldMulCase = [&](SDValue DivRem, SDValue MaybeY) -> SDValue { |
| if ((DivRem.getOpcode() == ISD::SDIVREM || |
| DivRem.getOpcode() == ISD::UDIVREM) && |
| DivRem.getResNo() == 0 && DivRem.getOperand(0) == Sub0 && |
| DivRem.getOperand(1) == MaybeY) { |
| return SDValue(DivRem.getNode(), 1); |
| } |
| return SDValue(); |
| }; |
| |
| if (Sub1.getOpcode() == ISD::MUL) { |
| // (sub x, (mul divrem(x,y)[0], y)) |
| SDValue Mul0 = Sub1.getOperand(0); |
| SDValue Mul1 = Sub1.getOperand(1); |
| |
| if (SDValue Res = CheckAndFoldMulCase(Mul0, Mul1)) |
| return Res; |
| |
| if (SDValue Res = CheckAndFoldMulCase(Mul1, Mul0)) |
| return Res; |
| |
| } else if (Sub1.getOpcode() == ISD::SHL) { |
| // Handle (sub x, (shl divrem(x,y)[0], C)) where y = 1 << C |
| SDValue Shl0 = Sub1.getOperand(0); |
| SDValue Shl1 = Sub1.getOperand(1); |
| // Check if Shl0 is divrem(x, Y)[0] |
| if ((Shl0.getOpcode() == ISD::SDIVREM || |
| Shl0.getOpcode() == ISD::UDIVREM) && |
| Shl0.getResNo() == 0 && Shl0.getOperand(0) == Sub0) { |
| |
| SDValue Divisor = Shl0.getOperand(1); |
| |
| ConstantSDNode *DivC = isConstOrConstSplat(Divisor); |
| ConstantSDNode *ShC = isConstOrConstSplat(Shl1); |
| if (!DivC || !ShC) |
| return SDValue(); |
| |
| if (DivC->getAPIntValue().isPowerOf2() && |
| DivC->getAPIntValue().logBase2() == ShC->getAPIntValue()) |
| return SDValue(Shl0.getNode(), 1); |
| } |
| } |
| return SDValue(); |
| } |
| |
| // Since it may not be valid to emit a fold to zero for vector initializers |
| // check if we can before folding. |
| static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT, |
| SelectionDAG &DAG, bool LegalOperations) { |
| if (!VT.isVector()) |
| return DAG.getConstant(0, DL, VT); |
| if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) |
| return DAG.getConstant(0, DL, VT); |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSUB(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| unsigned BitWidth = VT.getScalarSizeInBits(); |
| SDLoc DL(N); |
| |
| auto PeekThroughFreeze = [](SDValue N) { |
| if (N->getOpcode() == ISD::FREEZE && N.hasOneUse()) |
| return N->getOperand(0); |
| return N; |
| }; |
| |
| if (SDValue V = foldSubCtlzNot<EmptyMatchContext>(N, DAG)) |
| return V; |
| |
| // fold (sub x, x) -> 0 |
| // FIXME: Refactor this and xor and other similar operations together. |
| if (PeekThroughFreeze(N0) == PeekThroughFreeze(N1)) |
| return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations); |
| |
| // fold (sub c1, c2) -> c3 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {N0, N1})) |
| return C; |
| |
| // fold vector ops |
| if (VT.isVector()) { |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (sub x, 0) -> x, vector edition |
| if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) |
| return N0; |
| } |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| // fold (sub x, c) -> (add x, -c) |
| if (ConstantSDNode *N1C = getAsNonOpaqueConstant(N1)) |
| return DAG.getNode(ISD::ADD, DL, VT, N0, |
| DAG.getConstant(-N1C->getAPIntValue(), DL, VT)); |
| |
| if (isNullOrNullSplat(N0)) { |
| // Right-shifting everything out but the sign bit followed by negation is |
| // the same as flipping arithmetic/logical shift type without the negation: |
| // -(X >>u 31) -> (X >>s 31) |
| // -(X >>s 31) -> (X >>u 31) |
| if (N1->getOpcode() == ISD::SRA || N1->getOpcode() == ISD::SRL) { |
| ConstantSDNode *ShiftAmt = isConstOrConstSplat(N1.getOperand(1)); |
| if (ShiftAmt && ShiftAmt->getAPIntValue() == (BitWidth - 1)) { |
| auto NewSh = N1->getOpcode() == ISD::SRA ? ISD::SRL : ISD::SRA; |
| if (!LegalOperations || TLI.isOperationLegal(NewSh, VT)) |
| return DAG.getNode(NewSh, DL, VT, N1.getOperand(0), N1.getOperand(1)); |
| } |
| } |
| |
| // 0 - X --> 0 if the sub is NUW. |
| if (N->getFlags().hasNoUnsignedWrap()) |
| return N0; |
| |
| if (DAG.MaskedValueIsZero(N1, ~APInt::getSignMask(BitWidth))) { |
| // N1 is either 0 or the minimum signed value. If the sub is NSW, then |
| // N1 must be 0 because negating the minimum signed value is undefined. |
| if (N->getFlags().hasNoSignedWrap()) |
| return N0; |
| |
| // 0 - X --> X if X is 0 or the minimum signed value. |
| return N1; |
| } |
| |
| // Convert 0 - abs(x). |
| if (N1.getOpcode() == ISD::ABS && N1.hasOneUse() && |
| !TLI.isOperationLegalOrCustom(ISD::ABS, VT)) |
| if (SDValue Result = TLI.expandABS(N1.getNode(), DAG, true)) |
| return Result; |
| |
| // Similar to the previous rule, but this time targeting an expanded abs. |
| // (sub 0, (max X, (sub 0, X))) --> (min X, (sub 0, X)) |
| // as well as |
| // (sub 0, (min X, (sub 0, X))) --> (max X, (sub 0, X)) |
| // Note that these two are applicable to both signed and unsigned min/max. |
| SDValue X; |
| SDValue S0; |
| auto NegPat = m_AllOf(m_Neg(m_Deferred(X)), m_Value(S0)); |
| if (sd_match(N1, m_OneUse(m_AnyOf(m_SMax(m_Value(X), NegPat), |
| m_UMax(m_Value(X), NegPat), |
| m_SMin(m_Value(X), NegPat), |
| m_UMin(m_Value(X), NegPat))))) { |
| unsigned NewOpc = ISD::getInverseMinMaxOpcode(N1->getOpcode()); |
| if (hasOperation(NewOpc, VT)) |
| return DAG.getNode(NewOpc, DL, VT, X, S0); |
| } |
| |
| // Fold neg(splat(neg(x)) -> splat(x) |
| if (VT.isVector()) { |
| SDValue N1S = DAG.getSplatValue(N1, true); |
| if (N1S && N1S.getOpcode() == ISD::SUB && |
| isNullConstant(N1S.getOperand(0))) |
| return DAG.getSplat(VT, DL, N1S.getOperand(1)); |
| } |
| |
| // sub 0, (and x, 1) --> SIGN_EXTEND_INREG x, i1 |
| if (N1.getOpcode() == ISD::AND && N1.hasOneUse() && |
| isOneOrOneSplat(N1->getOperand(1))) { |
| EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), 1); |
| if (VT.isVector()) |
| ExtVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, |
| VT.getVectorElementCount()); |
| if (TLI.getOperationAction(ISD::SIGN_EXTEND_INREG, ExtVT) == |
| TargetLowering::Legal) { |
| return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N1->getOperand(0), |
| DAG.getValueType(ExtVT)); |
| } |
| } |
| } |
| |
| // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) |
| if (isAllOnesOrAllOnesSplat(N0)) |
| return DAG.getNode(ISD::XOR, DL, VT, N1, N0); |
| |
| // fold (A - (0-B)) -> A+B |
| if (N1.getOpcode() == ISD::SUB && isNullOrNullSplat(N1.getOperand(0))) |
| return DAG.getNode(ISD::ADD, DL, VT, N0, N1.getOperand(1)); |
| |
| // fold A-(A-B) -> B |
| if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0)) |
| return N1.getOperand(1); |
| |
| // fold (A+B)-A -> B |
| if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1) |
| return N0.getOperand(1); |
| |
| // fold (A+B)-B -> A |
| if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1) |
| return N0.getOperand(0); |
| |
| // fold (A+C1)-C2 -> A+(C1-C2) |
| if (N0.getOpcode() == ISD::ADD) { |
| SDValue N01 = N0.getOperand(1); |
| if (SDValue NewC = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {N01, N1})) |
| return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), NewC); |
| } |
| |
| // fold C2-(A+C1) -> (C2-C1)-A |
| if (N1.getOpcode() == ISD::ADD) { |
| SDValue N11 = N1.getOperand(1); |
| if (SDValue NewC = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {N0, N11})) |
| return DAG.getNode(ISD::SUB, DL, VT, NewC, N1.getOperand(0)); |
| } |
| |
| // fold (A-C1)-C2 -> A-(C1+C2) |
| if (N0.getOpcode() == ISD::SUB) { |
| SDValue N01 = N0.getOperand(1); |
| if (SDValue NewC = DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, {N01, N1})) |
| return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), NewC); |
| } |
| |
| // fold (c1-A)-c2 -> (c1-c2)-A |
| if (N0.getOpcode() == ISD::SUB) { |
| SDValue N00 = N0.getOperand(0); |
| if (SDValue NewC = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {N00, N1})) |
| return DAG.getNode(ISD::SUB, DL, VT, NewC, N0.getOperand(1)); |
| } |
| |
| SDValue A, B, C; |
| |
| // fold ((A+(B+C))-B) -> A+C |
| if (sd_match(N0, m_Add(m_Value(A), m_Add(m_Specific(N1), m_Value(C))))) |
| return DAG.getNode(ISD::ADD, DL, VT, A, C); |
| |
| // fold ((A+(B-C))-B) -> A-C |
| if (sd_match(N0, m_Add(m_Value(A), m_Sub(m_Specific(N1), m_Value(C))))) |
| return DAG.getNode(ISD::SUB, DL, VT, A, C); |
| |
| // fold ((A-(B-C))-C) -> A-B |
| if (sd_match(N0, m_Sub(m_Value(A), m_Sub(m_Value(B), m_Specific(N1))))) |
| return DAG.getNode(ISD::SUB, DL, VT, A, B); |
| |
| // fold (A-(B-C)) -> A+(C-B) |
| if (sd_match(N1, m_OneUse(m_Sub(m_Value(B), m_Value(C))))) |
| return DAG.getNode(ISD::ADD, DL, VT, N0, |
| DAG.getNode(ISD::SUB, DL, VT, C, B)); |
| |
| // A - (A & B) -> A & (~B) |
| if (sd_match(N1, m_And(m_Specific(N0), m_Value(B))) && |
| (N1.hasOneUse() || isConstantOrConstantVector(B, /*NoOpaques=*/true))) |
| return DAG.getNode(ISD::AND, DL, VT, N0, DAG.getNOT(DL, B, VT)); |
| |
| // fold (A - (-B * C)) -> (A + (B * C)) |
| if (sd_match(N1, m_OneUse(m_Mul(m_Neg(m_Value(B)), m_Value(C))))) |
| return DAG.getNode(ISD::ADD, DL, VT, N0, |
| DAG.getNode(ISD::MUL, DL, VT, B, C)); |
| |
| // If either operand of a sub is undef, the result is undef |
| if (N0.isUndef()) |
| return N0; |
| if (N1.isUndef()) |
| return N1; |
| |
| if (SDValue V = foldAddSubBoolOfMaskedVal(N, DL, DAG)) |
| return V; |
| |
| if (SDValue V = foldAddSubOfSignBit(N, DL, DAG)) |
| return V; |
| |
| // Try to match AVGCEIL fixedwidth pattern |
| if (SDValue V = foldSubToAvg(N, DL)) |
| return V; |
| |
| if (SDValue V = foldAddSubMasked1(false, N0, N1, DAG, DL)) |
| return V; |
| |
| if (SDValue V = foldSubToUSubSat(VT, N, DL)) |
| return V; |
| |
| if (SDValue V = foldRemainderIdiom(N, DAG, DL)) |
| return V; |
| |
| // (A - B) - 1 -> add (xor B, -1), A |
| if (sd_match(N, m_Sub(m_OneUse(m_Sub(m_Value(A), m_Value(B))), m_One()))) |
| return DAG.getNode(ISD::ADD, DL, VT, A, DAG.getNOT(DL, B, VT)); |
| |
| // Look for: |
| // sub y, (xor x, -1) |
| // And if the target does not like this form then turn into: |
| // add (add x, y), 1 |
| if (TLI.preferIncOfAddToSubOfNot(VT) && N1.hasOneUse() && isBitwiseNot(N1)) { |
| SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, N1.getOperand(0)); |
| return DAG.getNode(ISD::ADD, DL, VT, Add, DAG.getConstant(1, DL, VT)); |
| } |
| |
| // Hoist one-use addition by non-opaque constant: |
| // (x + C) - y -> (x - y) + C |
| if (!reassociationCanBreakAddressingModePattern(ISD::SUB, DL, N, N0, N1) && |
| N0.getOpcode() == ISD::ADD && N0.hasOneUse() && |
| isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) { |
| SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), N1); |
| return DAG.getNode(ISD::ADD, DL, VT, Sub, N0.getOperand(1)); |
| } |
| // y - (x + C) -> (y - x) - C |
| if (N1.getOpcode() == ISD::ADD && N1.hasOneUse() && |
| isConstantOrConstantVector(N1.getOperand(1), /*NoOpaques=*/true)) { |
| SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(0)); |
| return DAG.getNode(ISD::SUB, DL, VT, Sub, N1.getOperand(1)); |
| } |
| // (x - C) - y -> (x - y) - C |
| // This is necessary because SUB(X,C) -> ADD(X,-C) doesn't work for vectors. |
| if (N0.getOpcode() == ISD::SUB && N0.hasOneUse() && |
| isConstantOrConstantVector(N0.getOperand(1), /*NoOpaques=*/true)) { |
| SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), N1); |
| return DAG.getNode(ISD::SUB, DL, VT, Sub, N0.getOperand(1)); |
| } |
| // (C - x) - y -> C - (x + y) |
| if (N0.getOpcode() == ISD::SUB && N0.hasOneUse() && |
| isConstantOrConstantVector(N0.getOperand(0), /*NoOpaques=*/true)) { |
| SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(1), N1); |
| return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), Add); |
| } |
| |
| // If the target's bool is represented as 0/-1, prefer to make this 'add 0/-1' |
| // rather than 'sub 0/1' (the sext should get folded). |
| // sub X, (zext i1 Y) --> add X, (sext i1 Y) |
| if (N1.getOpcode() == ISD::ZERO_EXTEND && |
| N1.getOperand(0).getScalarValueSizeInBits() == 1 && |
| TLI.getBooleanContents(VT) == |
| TargetLowering::ZeroOrNegativeOneBooleanContent) { |
| SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, N1.getOperand(0)); |
| return DAG.getNode(ISD::ADD, DL, VT, N0, SExt); |
| } |
| |
| // fold B = sra (A, size(A)-1); sub (xor (A, B), B) -> (abs A) |
| if ((!LegalOperations || hasOperation(ISD::ABS, VT)) && |
| sd_match(N1, m_Sra(m_Value(A), m_SpecificInt(BitWidth - 1))) && |
| sd_match(N0, m_Xor(m_Specific(A), m_Specific(N1)))) |
| return DAG.getNode(ISD::ABS, DL, VT, A); |
| |
| // If the relocation model supports it, consider symbol offsets. |
| if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) |
| if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) { |
| // fold (sub Sym+c1, Sym+c2) -> c1-c2 |
| if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1)) |
| if (GA->getGlobal() == GB->getGlobal()) |
| return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(), |
| DL, VT); |
| } |
| |
| // sub X, (sextinreg Y i1) -> add X, (and Y 1) |
| if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) { |
| VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1)); |
| if (TN->getVT() == MVT::i1) { |
| SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0), |
| DAG.getConstant(1, DL, VT)); |
| return DAG.getNode(ISD::ADD, DL, VT, N0, ZExt); |
| } |
| } |
| |
| // canonicalize (sub X, (vscale * C)) to (add X, (vscale * -C)) |
| if (N1.getOpcode() == ISD::VSCALE && N1.hasOneUse()) { |
| const APInt &IntVal = N1.getConstantOperandAPInt(0); |
| return DAG.getNode(ISD::ADD, DL, VT, N0, DAG.getVScale(DL, VT, -IntVal)); |
| } |
| |
| // canonicalize (sub X, step_vector(C)) to (add X, step_vector(-C)) |
| if (N1.getOpcode() == ISD::STEP_VECTOR && N1.hasOneUse()) { |
| APInt NewStep = -N1.getConstantOperandAPInt(0); |
| return DAG.getNode(ISD::ADD, DL, VT, N0, |
| DAG.getStepVector(DL, VT, NewStep)); |
| } |
| |
| // Prefer an add for more folding potential and possibly better codegen: |
| // sub N0, (lshr N10, width-1) --> add N0, (ashr N10, width-1) |
| if (!LegalOperations && N1.getOpcode() == ISD::SRL && N1.hasOneUse()) { |
| SDValue ShAmt = N1.getOperand(1); |
| ConstantSDNode *ShAmtC = isConstOrConstSplat(ShAmt); |
| if (ShAmtC && ShAmtC->getAPIntValue() == (BitWidth - 1)) { |
| SDValue SRA = DAG.getNode(ISD::SRA, DL, VT, N1.getOperand(0), ShAmt); |
| return DAG.getNode(ISD::ADD, DL, VT, N0, SRA); |
| } |
| } |
| |
| // As with the previous fold, prefer add for more folding potential. |
| // Subtracting SMIN/0 is the same as adding SMIN/0: |
| // N0 - (X << BW-1) --> N0 + (X << BW-1) |
| if (N1.getOpcode() == ISD::SHL) { |
| ConstantSDNode *ShlC = isConstOrConstSplat(N1.getOperand(1)); |
| if (ShlC && ShlC->getAPIntValue() == (BitWidth - 1)) |
| return DAG.getNode(ISD::ADD, DL, VT, N1, N0); |
| } |
| |
| // (sub (usubo_carry X, 0, Carry), Y) -> (usubo_carry X, Y, Carry) |
| if (N0.getOpcode() == ISD::USUBO_CARRY && isNullConstant(N0.getOperand(1)) && |
| N0.getResNo() == 0 && N0.hasOneUse()) |
| return DAG.getNode(ISD::USUBO_CARRY, DL, N0->getVTList(), |
| N0.getOperand(0), N1, N0.getOperand(2)); |
| |
| if (TLI.isOperationLegalOrCustom(ISD::UADDO_CARRY, VT)) { |
| // (sub Carry, X) -> (uaddo_carry (sub 0, X), 0, Carry) |
| if (SDValue Carry = getAsCarry(TLI, N0)) { |
| SDValue X = N1; |
| SDValue Zero = DAG.getConstant(0, DL, VT); |
| SDValue NegX = DAG.getNode(ISD::SUB, DL, VT, Zero, X); |
| return DAG.getNode(ISD::UADDO_CARRY, DL, |
| DAG.getVTList(VT, Carry.getValueType()), NegX, Zero, |
| Carry); |
| } |
| } |
| |
| // If there's no chance of borrowing from adjacent bits, then sub is xor: |
| // sub C0, X --> xor X, C0 |
| if (ConstantSDNode *C0 = isConstOrConstSplat(N0)) { |
| if (!C0->isOpaque()) { |
| const APInt &C0Val = C0->getAPIntValue(); |
| const APInt &MaybeOnes = ~DAG.computeKnownBits(N1).Zero; |
| if ((C0Val - MaybeOnes) == (C0Val ^ MaybeOnes)) |
| return DAG.getNode(ISD::XOR, DL, VT, N1, N0); |
| } |
| } |
| |
| // smax(a,b) - smin(a,b) --> abds(a,b) |
| if ((!LegalOperations || hasOperation(ISD::ABDS, VT)) && |
| sd_match(N0, m_SMaxLike(m_Value(A), m_Value(B))) && |
| sd_match(N1, m_SMinLike(m_Specific(A), m_Specific(B)))) |
| return DAG.getNode(ISD::ABDS, DL, VT, A, B); |
| |
| // smin(a,b) - smax(a,b) --> neg(abds(a,b)) |
| if (hasOperation(ISD::ABDS, VT) && |
| sd_match(N0, m_SMinLike(m_Value(A), m_Value(B))) && |
| sd_match(N1, m_SMaxLike(m_Specific(A), m_Specific(B)))) |
| return DAG.getNegative(DAG.getNode(ISD::ABDS, DL, VT, A, B), DL, VT); |
| |
| // umax(a,b) - umin(a,b) --> abdu(a,b) |
| if ((!LegalOperations || hasOperation(ISD::ABDU, VT)) && |
| sd_match(N0, m_UMaxLike(m_Value(A), m_Value(B))) && |
| sd_match(N1, m_UMinLike(m_Specific(A), m_Specific(B)))) |
| return DAG.getNode(ISD::ABDU, DL, VT, A, B); |
| |
| // umin(a,b) - umax(a,b) --> neg(abdu(a,b)) |
| if (hasOperation(ISD::ABDU, VT) && |
| sd_match(N0, m_UMinLike(m_Value(A), m_Value(B))) && |
| sd_match(N1, m_UMaxLike(m_Specific(A), m_Specific(B)))) |
| return DAG.getNegative(DAG.getNode(ISD::ABDU, DL, VT, A, B), DL, VT); |
| |
| // (sub x, (select (ult x, y), 0, y)) -> (umin x, (sub x, y)) |
| // (sub x, (select (uge x, y), y, 0)) -> (umin x, (sub x, y)) |
| if (hasUMin(VT)) { |
| SDValue Y; |
| if (sd_match(N1, m_OneUse(m_Select(m_SetCC(m_Specific(N0), m_Value(Y), |
| m_SpecificCondCode(ISD::SETULT)), |
| m_Zero(), m_Deferred(Y)))) || |
| sd_match(N1, m_OneUse(m_Select(m_SetCC(m_Specific(N0), m_Value(Y), |
| m_SpecificCondCode(ISD::SETUGE)), |
| m_Deferred(Y), m_Zero())))) |
| return DAG.getNode(ISD::UMIN, DL, VT, N0, |
| DAG.getNode(ISD::SUB, DL, VT, N0, Y)); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSUBSAT(SDNode *N) { |
| unsigned Opcode = N->getOpcode(); |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| bool IsSigned = Opcode == ISD::SSUBSAT; |
| SDLoc DL(N); |
| |
| // fold (sub_sat x, undef) -> 0 |
| if (N0.isUndef() || N1.isUndef()) |
| return DAG.getConstant(0, DL, VT); |
| |
| // fold (sub_sat x, x) -> 0 |
| if (N0 == N1) |
| return DAG.getConstant(0, DL, VT); |
| |
| // fold (sub_sat c1, c2) -> c3 |
| if (SDValue C = DAG.FoldConstantArithmetic(Opcode, DL, VT, {N0, N1})) |
| return C; |
| |
| // fold vector ops |
| if (VT.isVector()) { |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (sub_sat x, 0) -> x, vector edition |
| if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) |
| return N0; |
| } |
| |
| // fold (sub_sat x, 0) -> x |
| if (isNullConstant(N1)) |
| return N0; |
| |
| // If it cannot overflow, transform into an sub. |
| if (DAG.willNotOverflowSub(IsSigned, N0, N1)) |
| return DAG.getNode(ISD::SUB, DL, VT, N0, N1); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSUBC(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| SDLoc DL(N); |
| |
| // If the flag result is dead, turn this into an SUB. |
| if (!N->hasAnyUseOfValue(1)) |
| return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1), |
| DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); |
| |
| // fold (subc x, x) -> 0 + no borrow |
| if (N0 == N1) |
| return CombineTo(N, DAG.getConstant(0, DL, VT), |
| DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); |
| |
| // fold (subc x, 0) -> x + no borrow |
| if (isNullConstant(N1)) |
| return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); |
| |
| // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow |
| if (isAllOnesConstant(N0)) |
| return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0), |
| DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSUBO(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| bool IsSigned = (ISD::SSUBO == N->getOpcode()); |
| |
| EVT CarryVT = N->getValueType(1); |
| SDLoc DL(N); |
| |
| // If the flag result is dead, turn this into an SUB. |
| if (!N->hasAnyUseOfValue(1)) |
| return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1), |
| DAG.getUNDEF(CarryVT)); |
| |
| // fold (subo x, x) -> 0 + no borrow |
| if (N0 == N1) |
| return CombineTo(N, DAG.getConstant(0, DL, VT), |
| DAG.getConstant(0, DL, CarryVT)); |
| |
| // fold (subox, c) -> (addo x, -c) |
| if (ConstantSDNode *N1C = getAsNonOpaqueConstant(N1)) |
| if (IsSigned && !N1C->isMinSignedValue()) |
| return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N0, |
| DAG.getConstant(-N1C->getAPIntValue(), DL, VT)); |
| |
| // fold (subo x, 0) -> x + no borrow |
| if (isNullOrNullSplat(N1)) |
| return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT)); |
| |
| // If it cannot overflow, transform into an sub. |
| if (DAG.willNotOverflowSub(IsSigned, N0, N1)) |
| return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1), |
| DAG.getConstant(0, DL, CarryVT)); |
| |
| // Canonicalize (usubo -1, x) -> ~x, i.e. (xor x, -1) + no borrow |
| if (!IsSigned && isAllOnesOrAllOnesSplat(N0)) |
| return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0), |
| DAG.getConstant(0, DL, CarryVT)); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSUBE(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue CarryIn = N->getOperand(2); |
| |
| // fold (sube x, y, false) -> (subc x, y) |
| if (CarryIn.getOpcode() == ISD::CARRY_FALSE) |
| return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitUSUBO_CARRY(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue CarryIn = N->getOperand(2); |
| |
| // fold (usubo_carry x, y, false) -> (usubo x, y) |
| if (isNullConstant(CarryIn)) { |
| if (!LegalOperations || |
| TLI.isOperationLegalOrCustom(ISD::USUBO, N->getValueType(0))) |
| return DAG.getNode(ISD::USUBO, SDLoc(N), N->getVTList(), N0, N1); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSSUBO_CARRY(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue CarryIn = N->getOperand(2); |
| |
| // fold (ssubo_carry x, y, false) -> (ssubo x, y) |
| if (isNullConstant(CarryIn)) { |
| if (!LegalOperations || |
| TLI.isOperationLegalOrCustom(ISD::SSUBO, N->getValueType(0))) |
| return DAG.getNode(ISD::SSUBO, SDLoc(N), N->getVTList(), N0, N1); |
| } |
| |
| return SDValue(); |
| } |
| |
| // Notice that "mulfix" can be any of SMULFIX, SMULFIXSAT, UMULFIX and |
| // UMULFIXSAT here. |
| SDValue DAGCombiner::visitMULFIX(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue Scale = N->getOperand(2); |
| EVT VT = N0.getValueType(); |
| |
| // fold (mulfix x, undef, scale) -> 0 |
| if (N0.isUndef() || N1.isUndef()) |
| return DAG.getConstant(0, SDLoc(N), VT); |
| |
| // Canonicalize constant to RHS (vector doesn't have to splat) |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0, Scale); |
| |
| // fold (mulfix x, 0, scale) -> 0 |
| if (isNullConstant(N1)) |
| return DAG.getConstant(0, SDLoc(N), VT); |
| |
| return SDValue(); |
| } |
| |
| template <class MatchContextClass> SDValue DAGCombiner::visitMUL(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| unsigned BitWidth = VT.getScalarSizeInBits(); |
| SDLoc DL(N); |
| bool UseVP = std::is_same_v<MatchContextClass, VPMatchContext>; |
| MatchContextClass Matcher(DAG, TLI, N); |
| |
| // fold (mul x, undef) -> 0 |
| if (N0.isUndef() || N1.isUndef()) |
| return DAG.getConstant(0, DL, VT); |
| |
| // fold (mul c1, c2) -> c1*c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::MUL, DL, VT, {N0, N1})) |
| return C; |
| |
| // canonicalize constant to RHS (vector doesn't have to splat) |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return Matcher.getNode(ISD::MUL, DL, VT, N1, N0); |
| |
| bool N1IsConst = false; |
| bool N1IsOpaqueConst = false; |
| APInt ConstValue1; |
| |
| // fold vector ops |
| if (VT.isVector()) { |
| // TODO: Change this to use SimplifyVBinOp when it supports VP op. |
| if (!UseVP) |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| N1IsConst = ISD::isConstantSplatVector(N1.getNode(), ConstValue1); |
| assert((!N1IsConst || ConstValue1.getBitWidth() == BitWidth) && |
| "Splat APInt should be element width"); |
| } else { |
| N1IsConst = isa<ConstantSDNode>(N1); |
| if (N1IsConst) { |
| ConstValue1 = N1->getAsAPIntVal(); |
| N1IsOpaqueConst = cast<ConstantSDNode>(N1)->isOpaque(); |
| } |
| } |
| |
| // fold (mul x, 0) -> 0 |
| if (N1IsConst && ConstValue1.isZero()) |
| return N1; |
| |
| // fold (mul x, 1) -> x |
| if (N1IsConst && ConstValue1.isOne()) |
| return N0; |
| |
| if (!UseVP) |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| // fold (mul x, -1) -> 0-x |
| if (N1IsConst && ConstValue1.isAllOnes()) |
| return Matcher.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), N0); |
| |
| // fold (mul x, (1 << c)) -> x << c |
| if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) && |
| (!VT.isVector() || Level <= AfterLegalizeVectorOps)) { |
| if (SDValue LogBase2 = BuildLogBase2(N1, DL)) { |
| EVT ShiftVT = getShiftAmountTy(N0.getValueType()); |
| SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT); |
| return Matcher.getNode(ISD::SHL, DL, VT, N0, Trunc); |
| } |
| } |
| |
| // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c |
| if (N1IsConst && !N1IsOpaqueConst && ConstValue1.isNegatedPowerOf2()) { |
| unsigned Log2Val = (-ConstValue1).logBase2(); |
| |
| // FIXME: If the input is something that is easily negated (e.g. a |
| // single-use add), we should put the negate there. |
| return Matcher.getNode( |
| ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), |
| Matcher.getNode(ISD::SHL, DL, VT, N0, |
| DAG.getShiftAmountConstant(Log2Val, VT, DL))); |
| } |
| |
| // Attempt to reuse an existing umul_lohi/smul_lohi node, but only if the |
| // hi result is in use in case we hit this mid-legalization. |
| if (!UseVP) { |
| for (unsigned LoHiOpc : {ISD::UMUL_LOHI, ISD::SMUL_LOHI}) { |
| if (!LegalOperations || TLI.isOperationLegalOrCustom(LoHiOpc, VT)) { |
| SDVTList LoHiVT = DAG.getVTList(VT, VT); |
| // TODO: Can we match commutable operands with getNodeIfExists? |
| if (SDNode *LoHi = DAG.getNodeIfExists(LoHiOpc, LoHiVT, {N0, N1})) |
| if (LoHi->hasAnyUseOfValue(1)) |
| return SDValue(LoHi, 0); |
| if (SDNode *LoHi = DAG.getNodeIfExists(LoHiOpc, LoHiVT, {N1, N0})) |
| if (LoHi->hasAnyUseOfValue(1)) |
| return SDValue(LoHi, 0); |
| } |
| } |
| } |
| |
| // Try to transform: |
| // (1) multiply-by-(power-of-2 +/- 1) into shift and add/sub. |
| // mul x, (2^N + 1) --> add (shl x, N), x |
| // mul x, (2^N - 1) --> sub (shl x, N), x |
| // Examples: x * 33 --> (x << 5) + x |
| // x * 15 --> (x << 4) - x |
| // x * -33 --> -((x << 5) + x) |
| // x * -15 --> -((x << 4) - x) ; this reduces --> x - (x << 4) |
| // (2) multiply-by-(power-of-2 +/- power-of-2) into shifts and add/sub. |
| // mul x, (2^N + 2^M) --> (add (shl x, N), (shl x, M)) |
| // mul x, (2^N - 2^M) --> (sub (shl x, N), (shl x, M)) |
| // Examples: x * 0x8800 --> (x << 15) + (x << 11) |
| // x * 0xf800 --> (x << 16) - (x << 11) |
| // x * -0x8800 --> -((x << 15) + (x << 11)) |
| // x * -0xf800 --> -((x << 16) - (x << 11)) ; (x << 11) - (x << 16) |
| if (!UseVP && N1IsConst && |
| TLI.decomposeMulByConstant(*DAG.getContext(), VT, N1)) { |
| // TODO: We could handle more general decomposition of any constant by |
| // having the target set a limit on number of ops and making a |
| // callback to determine that sequence (similar to sqrt expansion). |
| unsigned MathOp = ISD::DELETED_NODE; |
| APInt MulC = ConstValue1.abs(); |
| // The constant `2` should be treated as (2^0 + 1). |
| unsigned TZeros = MulC == 2 ? 0 : MulC.countr_zero(); |
| MulC.lshrInPlace(TZeros); |
| if ((MulC - 1).isPowerOf2()) |
| MathOp = ISD::ADD; |
| else if ((MulC + 1).isPowerOf2()) |
| MathOp = ISD::SUB; |
| |
| if (MathOp != ISD::DELETED_NODE) { |
| unsigned ShAmt = |
| MathOp == ISD::ADD ? (MulC - 1).logBase2() : (MulC + 1).logBase2(); |
| ShAmt += TZeros; |
| assert(ShAmt < BitWidth && |
| "multiply-by-constant generated out of bounds shift"); |
| SDValue Shl = |
| DAG.getNode(ISD::SHL, DL, VT, N0, DAG.getConstant(ShAmt, DL, VT)); |
| SDValue R = |
| TZeros ? DAG.getNode(MathOp, DL, VT, Shl, |
| DAG.getNode(ISD::SHL, DL, VT, N0, |
| DAG.getConstant(TZeros, DL, VT))) |
| : DAG.getNode(MathOp, DL, VT, Shl, N0); |
| if (ConstValue1.isNegative()) |
| R = DAG.getNegative(R, DL, VT); |
| return R; |
| } |
| } |
| |
| // (mul (shl X, c1), c2) -> (mul X, c2 << c1) |
| if (sd_context_match(N0, Matcher, m_Opc(ISD::SHL))) { |
| SDValue N01 = N0.getOperand(1); |
| if (SDValue C3 = DAG.FoldConstantArithmetic(ISD::SHL, DL, VT, {N1, N01})) |
| return DAG.getNode(ISD::MUL, DL, VT, N0.getOperand(0), C3); |
| } |
| |
| // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one |
| // use. |
| { |
| SDValue Sh, Y; |
| |
| // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)). |
| if (sd_context_match(N0, Matcher, m_OneUse(m_Opc(ISD::SHL))) && |
| isConstantOrConstantVector(N0.getOperand(1))) { |
| Sh = N0; Y = N1; |
| } else if (sd_context_match(N1, Matcher, m_OneUse(m_Opc(ISD::SHL))) && |
| isConstantOrConstantVector(N1.getOperand(1))) { |
| Sh = N1; Y = N0; |
| } |
| |
| if (Sh.getNode()) { |
| SDValue Mul = Matcher.getNode(ISD::MUL, DL, VT, Sh.getOperand(0), Y); |
| return Matcher.getNode(ISD::SHL, DL, VT, Mul, Sh.getOperand(1)); |
| } |
| } |
| |
| // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2) |
| if (sd_context_match(N0, Matcher, m_Opc(ISD::ADD)) && |
| DAG.isConstantIntBuildVectorOrConstantInt(N1) && |
| DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1)) && |
| isMulAddWithConstProfitable(N, N0, N1)) |
| return Matcher.getNode( |
| ISD::ADD, DL, VT, |
| Matcher.getNode(ISD::MUL, SDLoc(N0), VT, N0.getOperand(0), N1), |
| Matcher.getNode(ISD::MUL, SDLoc(N1), VT, N0.getOperand(1), N1)); |
| |
| // Fold (mul (vscale * C0), C1) to (vscale * (C0 * C1)). |
| ConstantSDNode *NC1 = isConstOrConstSplat(N1); |
| if (!UseVP && N0.getOpcode() == ISD::VSCALE && NC1) { |
| const APInt &C0 = N0.getConstantOperandAPInt(0); |
| const APInt &C1 = NC1->getAPIntValue(); |
| return DAG.getVScale(DL, VT, C0 * C1); |
| } |
| |
| // Fold (mul step_vector(C0), C1) to (step_vector(C0 * C1)). |
| APInt MulVal; |
| if (!UseVP && N0.getOpcode() == ISD::STEP_VECTOR && |
| ISD::isConstantSplatVector(N1.getNode(), MulVal)) { |
| const APInt &C0 = N0.getConstantOperandAPInt(0); |
| APInt NewStep = C0 * MulVal; |
| return DAG.getStepVector(DL, VT, NewStep); |
| } |
| |
| // Fold Y = sra (X, size(X)-1); mul (or (Y, 1), X) -> (abs X) |
| SDValue X; |
| if (!UseVP && (!LegalOperations || hasOperation(ISD::ABS, VT)) && |
| sd_context_match( |
| N, Matcher, |
| m_Mul(m_Or(m_Sra(m_Value(X), m_SpecificInt(BitWidth - 1)), m_One()), |
| m_Deferred(X)))) { |
| return Matcher.getNode(ISD::ABS, DL, VT, X); |
| } |
| |
| // Fold ((mul x, 0/undef) -> 0, |
| // (mul x, 1) -> x) -> x) |
| // -> and(x, mask) |
| // We can replace vectors with '0' and '1' factors with a clearing mask. |
| if (VT.isFixedLengthVector()) { |
| unsigned NumElts = VT.getVectorNumElements(); |
| SmallBitVector ClearMask; |
| ClearMask.reserve(NumElts); |
| auto IsClearMask = [&ClearMask](ConstantSDNode *V) { |
| if (!V || V->isZero()) { |
| ClearMask.push_back(true); |
| return true; |
| } |
| ClearMask.push_back(false); |
| return V->isOne(); |
| }; |
| if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::AND, VT)) && |
| ISD::matchUnaryPredicate(N1, IsClearMask, /*AllowUndefs*/ true)) { |
| assert(N1.getOpcode() == ISD::BUILD_VECTOR && "Unknown constant vector"); |
| EVT LegalSVT = N1.getOperand(0).getValueType(); |
| SDValue Zero = DAG.getConstant(0, DL, LegalSVT); |
| SDValue AllOnes = DAG.getAllOnesConstant(DL, LegalSVT); |
| SmallVector<SDValue, 16> Mask(NumElts, AllOnes); |
| for (unsigned I = 0; I != NumElts; ++I) |
| if (ClearMask[I]) |
| Mask[I] = Zero; |
| return DAG.getNode(ISD::AND, DL, VT, N0, DAG.getBuildVector(VT, DL, Mask)); |
| } |
| } |
| |
| // reassociate mul |
| // TODO: Change reassociateOps to support vp ops. |
| if (!UseVP) |
| if (SDValue RMUL = reassociateOps(ISD::MUL, DL, N0, N1, N->getFlags())) |
| return RMUL; |
| |
| // Fold mul(vecreduce(x), vecreduce(y)) -> vecreduce(mul(x, y)) |
| // TODO: Change reassociateReduction to support vp ops. |
| if (!UseVP) |
| if (SDValue SD = |
| reassociateReduction(ISD::VECREDUCE_MUL, ISD::MUL, DL, VT, N0, N1)) |
| return SD; |
| |
| // Simplify the operands using demanded-bits information. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| return SDValue(); |
| } |
| |
| /// Return true if divmod libcall is available. |
| static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, |
| const TargetLowering &TLI) { |
| RTLIB::Libcall LC; |
| EVT NodeType = Node->getValueType(0); |
| if (!NodeType.isSimple()) |
| return false; |
| switch (NodeType.getSimpleVT().SimpleTy) { |
| default: return false; // No libcall for vector types. |
| case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; |
| case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; |
| case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; |
| case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; |
| case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; |
| } |
| |
| return TLI.getLibcallName(LC) != nullptr; |
| } |
| |
| /// Issue divrem if both quotient and remainder are needed. |
| SDValue DAGCombiner::useDivRem(SDNode *Node) { |
| if (Node->use_empty()) |
| return SDValue(); // This is a dead node, leave it alone. |
| |
| unsigned Opcode = Node->getOpcode(); |
| bool isSigned = (Opcode == ISD::SDIV) || (Opcode == ISD::SREM); |
| unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; |
| |
| // DivMod lib calls can still work on non-legal types if using lib-calls. |
| EVT VT = Node->getValueType(0); |
| if (VT.isVector() || !VT.isInteger()) |
| return SDValue(); |
| |
| if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT)) |
| return SDValue(); |
| |
| // If DIVREM is going to get expanded into a libcall, |
| // but there is no libcall available, then don't combine. |
| if (!TLI.isOperationLegalOrCustom(DivRemOpc, VT) && |
| !isDivRemLibcallAvailable(Node, isSigned, TLI)) |
| return SDValue(); |
| |
| // If div is legal, it's better to do the normal expansion |
| unsigned OtherOpcode = 0; |
| if ((Opcode == ISD::SDIV) || (Opcode == ISD::UDIV)) { |
| OtherOpcode = isSigned ? ISD::SREM : ISD::UREM; |
| if (TLI.isOperationLegalOrCustom(Opcode, VT)) |
| return SDValue(); |
| } else { |
| OtherOpcode = isSigned ? ISD::SDIV : ISD::UDIV; |
| if (TLI.isOperationLegalOrCustom(OtherOpcode, VT)) |
| return SDValue(); |
| } |
| |
| SDValue Op0 = Node->getOperand(0); |
| SDValue Op1 = Node->getOperand(1); |
| SDValue combined; |
| for (SDNode *User : Op0->users()) { |
| if (User == Node || User->getOpcode() == ISD::DELETED_NODE || |
| User->use_empty()) |
| continue; |
| // Convert the other matching node(s), too; |
| // otherwise, the DIVREM may get target-legalized into something |
| // target-specific that we won't be able to recognize. |
| unsigned UserOpc = User->getOpcode(); |
| if ((UserOpc == Opcode || UserOpc == OtherOpcode || UserOpc == DivRemOpc) && |
| User->getOperand(0) == Op0 && |
| User->getOperand(1) == Op1) { |
| if (!combined) { |
| if (UserOpc == OtherOpcode) { |
| SDVTList VTs = DAG.getVTList(VT, VT); |
| combined = DAG.getNode(DivRemOpc, SDLoc(Node), VTs, Op0, Op1); |
| } else if (UserOpc == DivRemOpc) { |
| combined = SDValue(User, 0); |
| } else { |
| assert(UserOpc == Opcode); |
| continue; |
| } |
| } |
| if (UserOpc == ISD::SDIV || UserOpc == ISD::UDIV) |
| CombineTo(User, combined); |
| else if (UserOpc == ISD::SREM || UserOpc == ISD::UREM) |
| CombineTo(User, combined.getValue(1)); |
| } |
| } |
| return combined; |
| } |
| |
| static SDValue simplifyDivRem(SDNode *N, SelectionDAG &DAG) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| unsigned Opc = N->getOpcode(); |
| bool IsDiv = (ISD::SDIV == Opc) || (ISD::UDIV == Opc); |
| ConstantSDNode *N1C = isConstOrConstSplat(N1); |
| |
| // X / undef -> undef |
| // X % undef -> undef |
| // X / 0 -> undef |
| // X % 0 -> undef |
| // NOTE: This includes vectors where any divisor element is zero/undef. |
| if (DAG.isUndef(Opc, {N0, N1})) |
| return DAG.getUNDEF(VT); |
| |
| // undef / X -> 0 |
| // undef % X -> 0 |
| if (N0.isUndef()) |
| return DAG.getConstant(0, DL, VT); |
| |
| // 0 / X -> 0 |
| // 0 % X -> 0 |
| ConstantSDNode *N0C = isConstOrConstSplat(N0); |
| if (N0C && N0C->isZero()) |
| return N0; |
| |
| // X / X -> 1 |
| // X % X -> 0 |
| if (N0 == N1) |
| return DAG.getConstant(IsDiv ? 1 : 0, DL, VT); |
| |
| // X / 1 -> X |
| // X % 1 -> 0 |
| // If this is a boolean op (single-bit element type), we can't have |
| // division-by-zero or remainder-by-zero, so assume the divisor is 1. |
| // TODO: Similarly, if we're zero-extending a boolean divisor, then assume |
| // it's a 1. |
| if ((N1C && N1C->isOne()) || (VT.getScalarType() == MVT::i1)) |
| return IsDiv ? N0 : DAG.getConstant(0, DL, VT); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSDIV(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| EVT CCVT = getSetCCResultType(VT); |
| SDLoc DL(N); |
| |
| // fold (sdiv c1, c2) -> c1/c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::SDIV, DL, VT, {N0, N1})) |
| return C; |
| |
| // fold vector ops |
| if (VT.isVector()) |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (sdiv X, -1) -> 0-X |
| ConstantSDNode *N1C = isConstOrConstSplat(N1); |
| if (N1C && N1C->isAllOnes()) |
| return DAG.getNegative(N0, DL, VT); |
| |
| // fold (sdiv X, MIN_SIGNED) -> select(X == MIN_SIGNED, 1, 0) |
| if (N1C && N1C->isMinSignedValue()) |
| return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ), |
| DAG.getConstant(1, DL, VT), |
| DAG.getConstant(0, DL, VT)); |
| |
| if (SDValue V = simplifyDivRem(N, DAG)) |
| return V; |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| // If we know the sign bits of both operands are zero, strength reduce to a |
| // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2 |
| if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) |
| return DAG.getNode(ISD::UDIV, DL, N1.getValueType(), N0, N1); |
| |
| if (SDValue V = visitSDIVLike(N0, N1, N)) { |
| // If the corresponding remainder node exists, update its users with |
| // (Dividend - (Quotient * Divisor). |
| if (SDNode *RemNode = DAG.getNodeIfExists(ISD::SREM, N->getVTList(), |
| { N0, N1 })) { |
| // If the sdiv has the exact flag we shouldn't propagate it to the |
| // remainder node. |
| if (!N->getFlags().hasExact()) { |
| SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, V, N1); |
| SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul); |
| AddToWorklist(Mul.getNode()); |
| AddToWorklist(Sub.getNode()); |
| CombineTo(RemNode, Sub); |
| } |
| } |
| return V; |
| } |
| |
| // sdiv, srem -> sdivrem |
| // If the divisor is constant, then return DIVREM only if isIntDivCheap() is |
| // true. Otherwise, we break the simplification logic in visitREM(). |
| AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); |
| if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr)) |
| if (SDValue DivRem = useDivRem(N)) |
| return DivRem; |
| |
| return SDValue(); |
| } |
| |
| static bool isDivisorPowerOfTwo(SDValue Divisor) { |
| // Helper for determining whether a value is a power-2 constant scalar or a |
| // vector of such elements. |
| auto IsPowerOfTwo = [](ConstantSDNode *C) { |
| if (C->isZero() || C->isOpaque()) |
| return false; |
| if (C->getAPIntValue().isPowerOf2()) |
| return true; |
| if (C->getAPIntValue().isNegatedPowerOf2()) |
| return true; |
| return false; |
| }; |
| |
| return ISD::matchUnaryPredicate(Divisor, IsPowerOfTwo); |
| } |
| |
| SDValue DAGCombiner::visitSDIVLike(SDValue N0, SDValue N1, SDNode *N) { |
| SDLoc DL(N); |
| EVT VT = N->getValueType(0); |
| EVT CCVT = getSetCCResultType(VT); |
| unsigned BitWidth = VT.getScalarSizeInBits(); |
| |
| // fold (sdiv X, pow2) -> simple ops after legalize |
| // FIXME: We check for the exact bit here because the generic lowering gives |
| // better results in that case. The target-specific lowering should learn how |
| // to handle exact sdivs efficiently. |
| if (!N->getFlags().hasExact() && isDivisorPowerOfTwo(N1)) { |
| // Target-specific implementation of sdiv x, pow2. |
| if (SDValue Res = BuildSDIVPow2(N)) |
| return Res; |
| |
| // Create constants that are functions of the shift amount value. |
| EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType()); |
| SDValue Bits = DAG.getConstant(BitWidth, DL, ShiftAmtTy); |
| SDValue C1 = DAG.getNode(ISD::CTTZ, DL, VT, N1); |
| C1 = DAG.getZExtOrTrunc(C1, DL, ShiftAmtTy); |
| SDValue Inexact = DAG.getNode(ISD::SUB, DL, ShiftAmtTy, Bits, C1); |
| if (!isConstantOrConstantVector(Inexact)) |
| return SDValue(); |
| |
| // Splat the sign bit into the register |
| SDValue Sign = DAG.getNode(ISD::SRA, DL, VT, N0, |
| DAG.getConstant(BitWidth - 1, DL, ShiftAmtTy)); |
| AddToWorklist(Sign.getNode()); |
| |
| // Add (N0 < 0) ? abs2 - 1 : 0; |
| SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, Sign, Inexact); |
| AddToWorklist(Srl.getNode()); |
| SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Srl); |
| AddToWorklist(Add.getNode()); |
| SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Add, C1); |
| AddToWorklist(Sra.getNode()); |
| |
| // Special case: (sdiv X, 1) -> X |
| // Special Case: (sdiv X, -1) -> 0-X |
| SDValue One = DAG.getConstant(1, DL, VT); |
| SDValue AllOnes = DAG.getAllOnesConstant(DL, VT); |
| SDValue IsOne = DAG.getSetCC(DL, CCVT, N1, One, ISD::SETEQ); |
| SDValue IsAllOnes = DAG.getSetCC(DL, CCVT, N1, AllOnes, ISD::SETEQ); |
| SDValue IsOneOrAllOnes = DAG.getNode(ISD::OR, DL, CCVT, IsOne, IsAllOnes); |
| Sra = DAG.getSelect(DL, VT, IsOneOrAllOnes, N0, Sra); |
| |
| // If dividing by a positive value, we're done. Otherwise, the result must |
| // be negated. |
| SDValue Zero = DAG.getConstant(0, DL, VT); |
| SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, Zero, Sra); |
| |
| // FIXME: Use SELECT_CC once we improve SELECT_CC constant-folding. |
| SDValue IsNeg = DAG.getSetCC(DL, CCVT, N1, Zero, ISD::SETLT); |
| SDValue Res = DAG.getSelect(DL, VT, IsNeg, Sub, Sra); |
| return Res; |
| } |
| |
| // If integer divide is expensive and we satisfy the requirements, emit an |
| // alternate sequence. Targets may check function attributes for size/speed |
| // trade-offs. |
| AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); |
| if (isConstantOrConstantVector(N1) && |
| !TLI.isIntDivCheap(N->getValueType(0), Attr)) |
| if (SDValue Op = BuildSDIV(N)) |
| return Op; |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitUDIV(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| EVT CCVT = getSetCCResultType(VT); |
| SDLoc DL(N); |
| |
| // fold (udiv c1, c2) -> c1/c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::UDIV, DL, VT, {N0, N1})) |
| return C; |
| |
| // fold vector ops |
| if (VT.isVector()) |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (udiv X, -1) -> select(X == -1, 1, 0) |
| ConstantSDNode *N1C = isConstOrConstSplat(N1); |
| if (N1C && N1C->isAllOnes() && CCVT.isVector() == VT.isVector()) { |
| return DAG.getSelect(DL, VT, DAG.getSetCC(DL, CCVT, N0, N1, ISD::SETEQ), |
| DAG.getConstant(1, DL, VT), |
| DAG.getConstant(0, DL, VT)); |
| } |
| |
| if (SDValue V = simplifyDivRem(N, DAG)) |
| return V; |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| if (SDValue V = visitUDIVLike(N0, N1, N)) { |
| // If the corresponding remainder node exists, update its users with |
| // (Dividend - (Quotient * Divisor). |
| if (SDNode *RemNode = DAG.getNodeIfExists(ISD::UREM, N->getVTList(), |
| { N0, N1 })) { |
| // If the udiv has the exact flag we shouldn't propagate it to the |
| // remainder node. |
| if (!N->getFlags().hasExact()) { |
| SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, V, N1); |
| SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul); |
| AddToWorklist(Mul.getNode()); |
| AddToWorklist(Sub.getNode()); |
| CombineTo(RemNode, Sub); |
| } |
| } |
| return V; |
| } |
| |
| // sdiv, srem -> sdivrem |
| // If the divisor is constant, then return DIVREM only if isIntDivCheap() is |
| // true. Otherwise, we break the simplification logic in visitREM(). |
| AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); |
| if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr)) |
| if (SDValue DivRem = useDivRem(N)) |
| return DivRem; |
| |
| // Simplify the operands using demanded-bits information. |
| // We don't have demanded bits support for UDIV so this just enables constant |
| // folding based on known bits. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitUDIVLike(SDValue N0, SDValue N1, SDNode *N) { |
| SDLoc DL(N); |
| EVT VT = N->getValueType(0); |
| |
| // fold (udiv x, (1 << c)) -> x >>u c |
| if (isConstantOrConstantVector(N1, /*NoOpaques*/ true)) { |
| if (SDValue LogBase2 = BuildLogBase2(N1, DL)) { |
| AddToWorklist(LogBase2.getNode()); |
| |
| EVT ShiftVT = getShiftAmountTy(N0.getValueType()); |
| SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT); |
| AddToWorklist(Trunc.getNode()); |
| return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc); |
| } |
| } |
| |
| // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 |
| if (N1.getOpcode() == ISD::SHL) { |
| SDValue N10 = N1.getOperand(0); |
| if (isConstantOrConstantVector(N10, /*NoOpaques*/ true)) { |
| if (SDValue LogBase2 = BuildLogBase2(N10, DL)) { |
| AddToWorklist(LogBase2.getNode()); |
| |
| EVT ADDVT = N1.getOperand(1).getValueType(); |
| SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ADDVT); |
| AddToWorklist(Trunc.getNode()); |
| SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT, N1.getOperand(1), Trunc); |
| AddToWorklist(Add.getNode()); |
| return DAG.getNode(ISD::SRL, DL, VT, N0, Add); |
| } |
| } |
| } |
| |
| // fold (udiv x, c) -> alternate |
| AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); |
| if (isConstantOrConstantVector(N1) && |
| !TLI.isIntDivCheap(N->getValueType(0), Attr)) |
| if (SDValue Op = BuildUDIV(N)) |
| return Op; |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::buildOptimizedSREM(SDValue N0, SDValue N1, SDNode *N) { |
| if (!N->getFlags().hasExact() && isDivisorPowerOfTwo(N1) && |
| !DAG.doesNodeExist(ISD::SDIV, N->getVTList(), {N0, N1})) { |
| // Target-specific implementation of srem x, pow2. |
| if (SDValue Res = BuildSREMPow2(N)) |
| return Res; |
| } |
| return SDValue(); |
| } |
| |
| // handles ISD::SREM and ISD::UREM |
| SDValue DAGCombiner::visitREM(SDNode *N) { |
| unsigned Opcode = N->getOpcode(); |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| EVT CCVT = getSetCCResultType(VT); |
| |
| bool isSigned = (Opcode == ISD::SREM); |
| SDLoc DL(N); |
| |
| // fold (rem c1, c2) -> c1%c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(Opcode, DL, VT, {N0, N1})) |
| return C; |
| |
| // fold (urem X, -1) -> select(FX == -1, 0, FX) |
| // Freeze the numerator to avoid a miscompile with an undefined value. |
| if (!isSigned && llvm::isAllOnesOrAllOnesSplat(N1, /*AllowUndefs*/ false) && |
| CCVT.isVector() == VT.isVector()) { |
| SDValue F0 = DAG.getFreeze(N0); |
| SDValue EqualsNeg1 = DAG.getSetCC(DL, CCVT, F0, N1, ISD::SETEQ); |
| return DAG.getSelect(DL, VT, EqualsNeg1, DAG.getConstant(0, DL, VT), F0); |
| } |
| |
| if (SDValue V = simplifyDivRem(N, DAG)) |
| return V; |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| if (isSigned) { |
| // If we know the sign bits of both operands are zero, strength reduce to a |
| // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15 |
| if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) |
| return DAG.getNode(ISD::UREM, DL, VT, N0, N1); |
| } else { |
| if (DAG.isKnownToBeAPowerOfTwo(N1)) { |
| // fold (urem x, pow2) -> (and x, pow2-1) |
| SDValue NegOne = DAG.getAllOnesConstant(DL, VT); |
| SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne); |
| AddToWorklist(Add.getNode()); |
| return DAG.getNode(ISD::AND, DL, VT, N0, Add); |
| } |
| // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1)) |
| // fold (urem x, (lshr pow2, y)) -> (and x, (add (lshr pow2, y), -1)) |
| // TODO: We should sink the following into isKnownToBePowerOfTwo |
| // using a OrZero parameter analogous to our handling in ValueTracking. |
| if ((N1.getOpcode() == ISD::SHL || N1.getOpcode() == ISD::SRL) && |
| DAG.isKnownToBeAPowerOfTwo(N1.getOperand(0))) { |
| SDValue NegOne = DAG.getAllOnesConstant(DL, VT); |
| SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne); |
| AddToWorklist(Add.getNode()); |
| return DAG.getNode(ISD::AND, DL, VT, N0, Add); |
| } |
| } |
| |
| AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); |
| |
| // If X/C can be simplified by the division-by-constant logic, lower |
| // X%C to the equivalent of X-X/C*C. |
| // Reuse the SDIVLike/UDIVLike combines - to avoid mangling nodes, the |
| // speculative DIV must not cause a DIVREM conversion. We guard against this |
| // by skipping the simplification if isIntDivCheap(). When div is not cheap, |
| // combine will not return a DIVREM. Regardless, checking cheapness here |
| // makes sense since the simplification results in fatter code. |
| if (DAG.isKnownNeverZero(N1) && !TLI.isIntDivCheap(VT, Attr)) { |
| if (isSigned) { |
| // check if we can build faster implementation for srem |
| if (SDValue OptimizedRem = buildOptimizedSREM(N0, N1, N)) |
| return OptimizedRem; |
| } |
| |
| SDValue OptimizedDiv = |
| isSigned ? visitSDIVLike(N0, N1, N) : visitUDIVLike(N0, N1, N); |
| if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != N) { |
| // If the equivalent Div node also exists, update its users. |
| unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; |
| if (SDNode *DivNode = DAG.getNodeIfExists(DivOpcode, N->getVTList(), |
| { N0, N1 })) |
| CombineTo(DivNode, OptimizedDiv); |
| SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1); |
| SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul); |
| AddToWorklist(OptimizedDiv.getNode()); |
| AddToWorklist(Mul.getNode()); |
| return Sub; |
| } |
| } |
| |
| // sdiv, srem -> sdivrem |
| if (SDValue DivRem = useDivRem(N)) |
| return DivRem.getValue(1); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitMULHS(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // fold (mulhs c1, c2) |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::MULHS, DL, VT, {N0, N1})) |
| return C; |
| |
| // canonicalize constant to RHS. |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(ISD::MULHS, DL, N->getVTList(), N1, N0); |
| |
| if (VT.isVector()) { |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (mulhs x, 0) -> 0 |
| // do not return N1, because undef node may exist. |
| if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) |
| return DAG.getConstant(0, DL, VT); |
| } |
| |
| // fold (mulhs x, 0) -> 0 |
| if (isNullConstant(N1)) |
| return N1; |
| |
| // fold (mulhs x, 1) -> (sra x, size(x)-1) |
| if (isOneConstant(N1)) |
| return DAG.getNode( |
| ISD::SRA, DL, VT, N0, |
| DAG.getShiftAmountConstant(N0.getScalarValueSizeInBits() - 1, VT, DL)); |
| |
| // fold (mulhs x, undef) -> 0 |
| if (N0.isUndef() || N1.isUndef()) |
| return DAG.getConstant(0, DL, VT); |
| |
| // If the type twice as wide is legal, transform the mulhs to a wider multiply |
| // plus a shift. |
| if (!TLI.isOperationLegalOrCustom(ISD::MULHS, VT) && VT.isSimple() && |
| !VT.isVector()) { |
| MVT Simple = VT.getSimpleVT(); |
| unsigned SimpleSize = Simple.getSizeInBits(); |
| EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); |
| if (TLI.isOperationLegal(ISD::MUL, NewVT)) { |
| N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0); |
| N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1); |
| N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); |
| N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, |
| DAG.getShiftAmountConstant(SimpleSize, NewVT, DL)); |
| return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitMULHU(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // fold (mulhu c1, c2) |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::MULHU, DL, VT, {N0, N1})) |
| return C; |
| |
| // canonicalize constant to RHS. |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(ISD::MULHU, DL, N->getVTList(), N1, N0); |
| |
| if (VT.isVector()) { |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (mulhu x, 0) -> 0 |
| // do not return N1, because undef node may exist. |
| if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) |
| return DAG.getConstant(0, DL, VT); |
| } |
| |
| // fold (mulhu x, 0) -> 0 |
| if (isNullConstant(N1)) |
| return N1; |
| |
| // fold (mulhu x, 1) -> 0 |
| if (isOneConstant(N1)) |
| return DAG.getConstant(0, DL, VT); |
| |
| // fold (mulhu x, undef) -> 0 |
| if (N0.isUndef() || N1.isUndef()) |
| return DAG.getConstant(0, DL, VT); |
| |
| // fold (mulhu x, (1 << c)) -> x >> (bitwidth - c) |
| if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) && |
| hasOperation(ISD::SRL, VT)) { |
| if (SDValue LogBase2 = BuildLogBase2(N1, DL)) { |
| unsigned NumEltBits = VT.getScalarSizeInBits(); |
| SDValue SRLAmt = DAG.getNode( |
| ISD::SUB, DL, VT, DAG.getConstant(NumEltBits, DL, VT), LogBase2); |
| EVT ShiftVT = getShiftAmountTy(N0.getValueType()); |
| SDValue Trunc = DAG.getZExtOrTrunc(SRLAmt, DL, ShiftVT); |
| return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc); |
| } |
| } |
| |
| // If the type twice as wide is legal, transform the mulhu to a wider multiply |
| // plus a shift. |
| if (!TLI.isOperationLegalOrCustom(ISD::MULHU, VT) && VT.isSimple() && |
| !VT.isVector()) { |
| MVT Simple = VT.getSimpleVT(); |
| unsigned SimpleSize = Simple.getSizeInBits(); |
| EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); |
| if (TLI.isOperationLegal(ISD::MUL, NewVT)) { |
| N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0); |
| N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1); |
| N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); |
| N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, |
| DAG.getShiftAmountConstant(SimpleSize, NewVT, DL)); |
| return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); |
| } |
| } |
| |
| // Simplify the operands using demanded-bits information. |
| // We don't have demanded bits support for MULHU so this just enables constant |
| // folding based on known bits. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitAVG(SDNode *N) { |
| unsigned Opcode = N->getOpcode(); |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| bool IsSigned = Opcode == ISD::AVGCEILS || Opcode == ISD::AVGFLOORS; |
| |
| // fold (avg c1, c2) |
| if (SDValue C = DAG.FoldConstantArithmetic(Opcode, DL, VT, {N0, N1})) |
| return C; |
| |
| // canonicalize constant to RHS. |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(Opcode, DL, N->getVTList(), N1, N0); |
| |
| if (VT.isVector()) |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (avg x, undef) -> x |
| if (N0.isUndef()) |
| return N1; |
| if (N1.isUndef()) |
| return N0; |
| |
| // fold (avg x, x) --> x |
| if (N0 == N1 && Level >= AfterLegalizeTypes) |
| return N0; |
| |
| // fold (avgfloor x, 0) -> x >> 1 |
| SDValue X, Y; |
| if (sd_match(N, m_c_BinOp(ISD::AVGFLOORS, m_Value(X), m_Zero()))) |
| return DAG.getNode(ISD::SRA, DL, VT, X, |
| DAG.getShiftAmountConstant(1, VT, DL)); |
| if (sd_match(N, m_c_BinOp(ISD::AVGFLOORU, m_Value(X), m_Zero()))) |
| return DAG.getNode(ISD::SRL, DL, VT, X, |
| DAG.getShiftAmountConstant(1, VT, DL)); |
| |
| // fold avgu(zext(x), zext(y)) -> zext(avgu(x, y)) |
| // fold avgs(sext(x), sext(y)) -> sext(avgs(x, y)) |
| if (!IsSigned && |
| sd_match(N, m_BinOp(Opcode, m_ZExt(m_Value(X)), m_ZExt(m_Value(Y)))) && |
| X.getValueType() == Y.getValueType() && |
| hasOperation(Opcode, X.getValueType())) { |
| SDValue AvgU = DAG.getNode(Opcode, DL, X.getValueType(), X, Y); |
| return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, AvgU); |
| } |
| if (IsSigned && |
| sd_match(N, m_BinOp(Opcode, m_SExt(m_Value(X)), m_SExt(m_Value(Y)))) && |
| X.getValueType() == Y.getValueType() && |
| hasOperation(Opcode, X.getValueType())) { |
| SDValue AvgS = DAG.getNode(Opcode, DL, X.getValueType(), X, Y); |
| return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, AvgS); |
| } |
| |
| // Fold avgflooru(x,y) -> avgceilu(x,y-1) iff y != 0 |
| // Fold avgflooru(x,y) -> avgceilu(x-1,y) iff x != 0 |
| // Check if avgflooru isn't legal/custom but avgceilu is. |
| if (Opcode == ISD::AVGFLOORU && !hasOperation(ISD::AVGFLOORU, VT) && |
| (!LegalOperations || hasOperation(ISD::AVGCEILU, VT))) { |
| if (DAG.isKnownNeverZero(N1)) |
| return DAG.getNode( |
| ISD::AVGCEILU, DL, VT, N0, |
| DAG.getNode(ISD::ADD, DL, VT, N1, DAG.getAllOnesConstant(DL, VT))); |
| if (DAG.isKnownNeverZero(N0)) |
| return DAG.getNode( |
| ISD::AVGCEILU, DL, VT, N1, |
| DAG.getNode(ISD::ADD, DL, VT, N0, DAG.getAllOnesConstant(DL, VT))); |
| } |
| |
| // Fold avgfloor((add nw x,y), 1) -> avgceil(x,y) |
| // Fold avgfloor((add nw x,1), y) -> avgceil(x,y) |
| if ((Opcode == ISD::AVGFLOORU && hasOperation(ISD::AVGCEILU, VT)) || |
| (Opcode == ISD::AVGFLOORS && hasOperation(ISD::AVGCEILS, VT))) { |
| SDValue Add; |
| if (sd_match(N, |
| m_c_BinOp(Opcode, |
| m_AllOf(m_Value(Add), m_Add(m_Value(X), m_Value(Y))), |
| m_One())) || |
| sd_match(N, m_c_BinOp(Opcode, |
| m_AllOf(m_Value(Add), m_Add(m_Value(X), m_One())), |
| m_Value(Y)))) { |
| |
| if (IsSigned && Add->getFlags().hasNoSignedWrap()) |
| return DAG.getNode(ISD::AVGCEILS, DL, VT, X, Y); |
| |
| if (!IsSigned && Add->getFlags().hasNoUnsignedWrap()) |
| return DAG.getNode(ISD::AVGCEILU, DL, VT, X, Y); |
| } |
| } |
| |
| // Fold avgfloors(x,y) -> avgflooru(x,y) if both x and y are non-negative |
| if (Opcode == ISD::AVGFLOORS && hasOperation(ISD::AVGFLOORU, VT)) { |
| if (DAG.SignBitIsZero(N0) && DAG.SignBitIsZero(N1)) |
| return DAG.getNode(ISD::AVGFLOORU, DL, VT, N0, N1); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitABD(SDNode *N) { |
| unsigned Opcode = N->getOpcode(); |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // fold (abd c1, c2) |
| if (SDValue C = DAG.FoldConstantArithmetic(Opcode, DL, VT, {N0, N1})) |
| return C; |
| |
| // canonicalize constant to RHS. |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(Opcode, DL, N->getVTList(), N1, N0); |
| |
| if (VT.isVector()) |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (abd x, undef) -> 0 |
| if (N0.isUndef() || N1.isUndef()) |
| return DAG.getConstant(0, DL, VT); |
| |
| // fold (abd x, x) -> 0 |
| if (N0 == N1) |
| return DAG.getConstant(0, DL, VT); |
| |
| SDValue X; |
| |
| // fold (abds x, 0) -> abs x |
| if (sd_match(N, m_c_BinOp(ISD::ABDS, m_Value(X), m_Zero())) && |
| (!LegalOperations || hasOperation(ISD::ABS, VT))) |
| return DAG.getNode(ISD::ABS, DL, VT, X); |
| |
| // fold (abdu x, 0) -> x |
| if (sd_match(N, m_c_BinOp(ISD::ABDU, m_Value(X), m_Zero()))) |
| return X; |
| |
| // fold (abds x, y) -> (abdu x, y) iff both args are known positive |
| if (Opcode == ISD::ABDS && hasOperation(ISD::ABDU, VT) && |
| DAG.SignBitIsZero(N0) && DAG.SignBitIsZero(N1)) |
| return DAG.getNode(ISD::ABDU, DL, VT, N1, N0); |
| |
| return SDValue(); |
| } |
| |
| /// Perform optimizations common to nodes that compute two values. LoOp and HiOp |
| /// give the opcodes for the two computations that are being performed. Return |
| /// true if a simplification was made. |
| SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, |
| unsigned HiOp) { |
| // If the high half is not needed, just compute the low half. |
| bool HiExists = N->hasAnyUseOfValue(1); |
| if (!HiExists && (!LegalOperations || |
| TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) { |
| SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops()); |
| return CombineTo(N, Res, Res); |
| } |
| |
| // If the low half is not needed, just compute the high half. |
| bool LoExists = N->hasAnyUseOfValue(0); |
| if (!LoExists && (!LegalOperations || |
| TLI.isOperationLegalOrCustom(HiOp, N->getValueType(1)))) { |
| SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops()); |
| return CombineTo(N, Res, Res); |
| } |
| |
| // If both halves are used, return as it is. |
| if (LoExists && HiExists) |
| return SDValue(); |
| |
| // If the two computed results can be simplified separately, separate them. |
| if (LoExists) { |
| SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops()); |
| AddToWorklist(Lo.getNode()); |
| SDValue LoOpt = combine(Lo.getNode()); |
| if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() && |
| (!LegalOperations || |
| TLI.isOperationLegalOrCustom(LoOpt.getOpcode(), LoOpt.getValueType()))) |
| return CombineTo(N, LoOpt, LoOpt); |
| } |
| |
| if (HiExists) { |
| SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops()); |
| AddToWorklist(Hi.getNode()); |
| SDValue HiOpt = combine(Hi.getNode()); |
| if (HiOpt.getNode() && HiOpt != Hi && |
| (!LegalOperations || |
| TLI.isOperationLegalOrCustom(HiOpt.getOpcode(), HiOpt.getValueType()))) |
| return CombineTo(N, HiOpt, HiOpt); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) { |
| if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS)) |
| return Res; |
| |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // Constant fold. |
| if (isa<ConstantSDNode>(N0) && isa<ConstantSDNode>(N1)) |
| return DAG.getNode(ISD::SMUL_LOHI, DL, N->getVTList(), N0, N1); |
| |
| // canonicalize constant to RHS (vector doesn't have to splat) |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(ISD::SMUL_LOHI, DL, N->getVTList(), N1, N0); |
| |
| // If the type is twice as wide is legal, transform the mulhu to a wider |
| // multiply plus a shift. |
| if (VT.isSimple() && !VT.isVector()) { |
| MVT Simple = VT.getSimpleVT(); |
| unsigned SimpleSize = Simple.getSizeInBits(); |
| EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); |
| if (TLI.isOperationLegal(ISD::MUL, NewVT)) { |
| SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0); |
| SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1); |
| Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); |
| // Compute the high part as N1. |
| Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, |
| DAG.getShiftAmountConstant(SimpleSize, NewVT, DL)); |
| Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); |
| // Compute the low part as N0. |
| Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); |
| return CombineTo(N, Lo, Hi); |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) { |
| if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU)) |
| return Res; |
| |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // Constant fold. |
| if (isa<ConstantSDNode>(N0) && isa<ConstantSDNode>(N1)) |
| return DAG.getNode(ISD::UMUL_LOHI, DL, N->getVTList(), N0, N1); |
| |
| // canonicalize constant to RHS (vector doesn't have to splat) |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(ISD::UMUL_LOHI, DL, N->getVTList(), N1, N0); |
| |
| // (umul_lohi N0, 0) -> (0, 0) |
| if (isNullConstant(N1)) { |
| SDValue Zero = DAG.getConstant(0, DL, VT); |
| return CombineTo(N, Zero, Zero); |
| } |
| |
| // (umul_lohi N0, 1) -> (N0, 0) |
| if (isOneConstant(N1)) { |
| SDValue Zero = DAG.getConstant(0, DL, VT); |
| return CombineTo(N, N0, Zero); |
| } |
| |
| // If the type is twice as wide is legal, transform the mulhu to a wider |
| // multiply plus a shift. |
| if (VT.isSimple() && !VT.isVector()) { |
| MVT Simple = VT.getSimpleVT(); |
| unsigned SimpleSize = Simple.getSizeInBits(); |
| EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); |
| if (TLI.isOperationLegal(ISD::MUL, NewVT)) { |
| SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0); |
| SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1); |
| Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); |
| // Compute the high part as N1. |
| Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, |
| DAG.getShiftAmountConstant(SimpleSize, NewVT, DL)); |
| Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); |
| // Compute the low part as N0. |
| Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); |
| return CombineTo(N, Lo, Hi); |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitMULO(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| bool IsSigned = (ISD::SMULO == N->getOpcode()); |
| |
| EVT CarryVT = N->getValueType(1); |
| SDLoc DL(N); |
| |
| ConstantSDNode *N0C = isConstOrConstSplat(N0); |
| ConstantSDNode *N1C = isConstOrConstSplat(N1); |
| |
| // fold operation with constant operands. |
| // TODO: Move this to FoldConstantArithmetic when it supports nodes with |
| // multiple results. |
| if (N0C && N1C) { |
| bool Overflow; |
| APInt Result = |
| IsSigned ? N0C->getAPIntValue().smul_ov(N1C->getAPIntValue(), Overflow) |
| : N0C->getAPIntValue().umul_ov(N1C->getAPIntValue(), Overflow); |
| return CombineTo(N, DAG.getConstant(Result, DL, VT), |
| DAG.getBoolConstant(Overflow, DL, CarryVT, CarryVT)); |
| } |
| |
| // canonicalize constant to RHS. |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(N->getOpcode(), DL, N->getVTList(), N1, N0); |
| |
| // fold (mulo x, 0) -> 0 + no carry out |
| if (isNullOrNullSplat(N1)) |
| return CombineTo(N, DAG.getConstant(0, DL, VT), |
| DAG.getConstant(0, DL, CarryVT)); |
| |
| // (mulo x, 2) -> (addo x, x) |
| // FIXME: This needs a freeze. |
| if (N1C && N1C->getAPIntValue() == 2 && |
| (!IsSigned || VT.getScalarSizeInBits() > 2)) |
| return DAG.getNode(IsSigned ? ISD::SADDO : ISD::UADDO, DL, |
| N->getVTList(), N0, N0); |
| |
| // A 1 bit SMULO overflows if both inputs are 1. |
| if (IsSigned && VT.getScalarSizeInBits() == 1) { |
| SDValue And = DAG.getNode(ISD::AND, DL, VT, N0, N1); |
| SDValue Cmp = DAG.getSetCC(DL, CarryVT, And, |
| DAG.getConstant(0, DL, VT), ISD::SETNE); |
| return CombineTo(N, And, Cmp); |
| } |
| |
| // If it cannot overflow, transform into a mul. |
| if (DAG.willNotOverflowMul(IsSigned, N0, N1)) |
| return CombineTo(N, DAG.getNode(ISD::MUL, DL, VT, N0, N1), |
| DAG.getConstant(0, DL, CarryVT)); |
| return SDValue(); |
| } |
| |
| // Function to calculate whether the Min/Max pair of SDNodes (potentially |
| // swapped around) make a signed saturate pattern, clamping to between a signed |
| // saturate of -2^(BW-1) and 2^(BW-1)-1, or an unsigned saturate of 0 and 2^BW. |
| // Returns the node being clamped and the bitwidth of the clamp in BW. Should |
| // work with both SMIN/SMAX nodes and setcc/select combo. The operands are the |
| // same as SimplifySelectCC. N0<N1 ? N2 : N3. |
| static SDValue isSaturatingMinMax(SDValue N0, SDValue N1, SDValue N2, |
| SDValue N3, ISD::CondCode CC, unsigned &BW, |
| bool &Unsigned, SelectionDAG &DAG) { |
| auto isSignedMinMax = [&](SDValue N0, SDValue N1, SDValue N2, SDValue N3, |
| ISD::CondCode CC) { |
| // The compare and select operand should be the same or the select operands |
| // should be truncated versions of the comparison. |
| if (N0 != N2 && (N2.getOpcode() != ISD::TRUNCATE || N0 != N2.getOperand(0))) |
| return 0; |
| // The constants need to be the same or a truncated version of each other. |
| ConstantSDNode *N1C = isConstOrConstSplat(peekThroughTruncates(N1)); |
| ConstantSDNode *N3C = isConstOrConstSplat(peekThroughTruncates(N3)); |
| if (!N1C || !N3C) |
| return 0; |
| const APInt &C1 = N1C->getAPIntValue().trunc(N1.getScalarValueSizeInBits()); |
| const APInt &C2 = N3C->getAPIntValue().trunc(N3.getScalarValueSizeInBits()); |
| if (C1.getBitWidth() < C2.getBitWidth() || C1 != C2.sext(C1.getBitWidth())) |
| return 0; |
| return CC == ISD::SETLT ? ISD::SMIN : (CC == ISD::SETGT ? ISD::SMAX : 0); |
| }; |
| |
| // Check the initial value is a SMIN/SMAX equivalent. |
| unsigned Opcode0 = isSignedMinMax(N0, N1, N2, N3, CC); |
| if (!Opcode0) |
| return SDValue(); |
| |
| // We could only need one range check, if the fptosi could never produce |
| // the upper value. |
| if (N0.getOpcode() == ISD::FP_TO_SINT && Opcode0 == ISD::SMAX) { |
| if (isNullOrNullSplat(N3)) { |
| EVT IntVT = N0.getValueType().getScalarType(); |
| EVT FPVT = N0.getOperand(0).getValueType().getScalarType(); |
| if (FPVT.isSimple()) { |
| Type *InputTy = FPVT.getTypeForEVT(*DAG.getContext()); |
| const fltSemantics &Semantics = InputTy->getFltSemantics(); |
| uint32_t MinBitWidth = |
| APFloatBase::semanticsIntSizeInBits(Semantics, /*isSigned*/ true); |
| if (IntVT.getSizeInBits() >= MinBitWidth) { |
| Unsigned = true; |
| BW = PowerOf2Ceil(MinBitWidth); |
| return N0; |
| } |
| } |
| } |
| } |
| |
| SDValue N00, N01, N02, N03; |
| ISD::CondCode N0CC; |
| switch (N0.getOpcode()) { |
| case ISD::SMIN: |
| case ISD::SMAX: |
| N00 = N02 = N0.getOperand(0); |
| N01 = N03 = N0.getOperand(1); |
| N0CC = N0.getOpcode() == ISD::SMIN ? ISD::SETLT : ISD::SETGT; |
| break; |
| case ISD::SELECT_CC: |
| N00 = N0.getOperand(0); |
| N01 = N0.getOperand(1); |
| N02 = N0.getOperand(2); |
| N03 = N0.getOperand(3); |
| N0CC = cast<CondCodeSDNode>(N0.getOperand(4))->get(); |
| break; |
| case ISD::SELECT: |
| case ISD::VSELECT: |
| if (N0.getOperand(0).getOpcode() != ISD::SETCC) |
| return SDValue(); |
| N00 = N0.getOperand(0).getOperand(0); |
| N01 = N0.getOperand(0).getOperand(1); |
| N02 = N0.getOperand(1); |
| N03 = N0.getOperand(2); |
| N0CC = cast<CondCodeSDNode>(N0.getOperand(0).getOperand(2))->get(); |
| break; |
| default: |
| return SDValue(); |
| } |
| |
| unsigned Opcode1 = isSignedMinMax(N00, N01, N02, N03, N0CC); |
| if (!Opcode1 || Opcode0 == Opcode1) |
| return SDValue(); |
| |
| ConstantSDNode *MinCOp = isConstOrConstSplat(Opcode0 == ISD::SMIN ? N1 : N01); |
| ConstantSDNode *MaxCOp = isConstOrConstSplat(Opcode0 == ISD::SMIN ? N01 : N1); |
| if (!MinCOp || !MaxCOp || MinCOp->getValueType(0) != MaxCOp->getValueType(0)) |
| return SDValue(); |
| |
| const APInt &MinC = MinCOp->getAPIntValue(); |
| const APInt &MaxC = MaxCOp->getAPIntValue(); |
| APInt MinCPlus1 = MinC + 1; |
| if (-MaxC == MinCPlus1 && MinCPlus1.isPowerOf2()) { |
| BW = MinCPlus1.exactLogBase2() + 1; |
| Unsigned = false; |
| return N02; |
| } |
| |
| if (MaxC == 0 && MinCPlus1.isPowerOf2()) { |
| BW = MinCPlus1.exactLogBase2(); |
| Unsigned = true; |
| return N02; |
| } |
| |
| return SDValue(); |
| } |
| |
| static SDValue PerformMinMaxFpToSatCombine(SDValue N0, SDValue N1, SDValue N2, |
| SDValue N3, ISD::CondCode CC, |
| SelectionDAG &DAG) { |
| unsigned BW; |
| bool Unsigned; |
| SDValue Fp = isSaturatingMinMax(N0, N1, N2, N3, CC, BW, Unsigned, DAG); |
| if (!Fp || Fp.getOpcode() != ISD::FP_TO_SINT) |
| return SDValue(); |
| EVT FPVT = Fp.getOperand(0).getValueType(); |
| EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), BW); |
| if (FPVT.isVector()) |
| NewVT = EVT::getVectorVT(*DAG.getContext(), NewVT, |
| FPVT.getVectorElementCount()); |
| unsigned NewOpc = Unsigned ? ISD::FP_TO_UINT_SAT : ISD::FP_TO_SINT_SAT; |
| if (!DAG.getTargetLoweringInfo().shouldConvertFpToSat(NewOpc, FPVT, NewVT)) |
| return SDValue(); |
| SDLoc DL(Fp); |
| SDValue Sat = DAG.getNode(NewOpc, DL, NewVT, Fp.getOperand(0), |
| DAG.getValueType(NewVT.getScalarType())); |
| return DAG.getExtOrTrunc(!Unsigned, Sat, DL, N2->getValueType(0)); |
| } |
| |
| static SDValue PerformUMinFpToSatCombine(SDValue N0, SDValue N1, SDValue N2, |
| SDValue N3, ISD::CondCode CC, |
| SelectionDAG &DAG) { |
| // We are looking for UMIN(FPTOUI(X), (2^n)-1), which may have come via a |
| // select/vselect/select_cc. The two operands pairs for the select (N2/N3) may |
| // be truncated versions of the setcc (N0/N1). |
| if ((N0 != N2 && |
| (N2.getOpcode() != ISD::TRUNCATE || N0 != N2.getOperand(0))) || |
| N0.getOpcode() != ISD::FP_TO_UINT || CC != ISD::SETULT) |
| return SDValue(); |
| ConstantSDNode *N1C = isConstOrConstSplat(N1); |
| ConstantSDNode *N3C = isConstOrConstSplat(N3); |
| if (!N1C || !N3C) |
| return SDValue(); |
| const APInt &C1 = N1C->getAPIntValue(); |
| const APInt &C3 = N3C->getAPIntValue(); |
| if (!(C1 + 1).isPowerOf2() || C1.getBitWidth() < C3.getBitWidth() || |
| C1 != C3.zext(C1.getBitWidth())) |
| return SDValue(); |
| |
| unsigned BW = (C1 + 1).exactLogBase2(); |
| EVT FPVT = N0.getOperand(0).getValueType(); |
| EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), BW); |
| if (FPVT.isVector()) |
| NewVT = EVT::getVectorVT(*DAG.getContext(), NewVT, |
| FPVT.getVectorElementCount()); |
| if (!DAG.getTargetLoweringInfo().shouldConvertFpToSat(ISD::FP_TO_UINT_SAT, |
| FPVT, NewVT)) |
| return SDValue(); |
| |
| SDValue Sat = |
| DAG.getNode(ISD::FP_TO_UINT_SAT, SDLoc(N0), NewVT, N0.getOperand(0), |
| DAG.getValueType(NewVT.getScalarType())); |
| return DAG.getZExtOrTrunc(Sat, SDLoc(N0), N3.getValueType()); |
| } |
| |
| SDValue DAGCombiner::visitIMINMAX(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| unsigned Opcode = N->getOpcode(); |
| SDLoc DL(N); |
| |
| // fold operation with constant operands. |
| if (SDValue C = DAG.FoldConstantArithmetic(Opcode, DL, VT, {N0, N1})) |
| return C; |
| |
| // If the operands are the same, this is a no-op. |
| if (N0 == N1) |
| return N0; |
| |
| // canonicalize constant to RHS |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(Opcode, DL, VT, N1, N0); |
| |
| // fold vector ops |
| if (VT.isVector()) |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // reassociate minmax |
| if (SDValue RMINMAX = reassociateOps(Opcode, DL, N0, N1, N->getFlags())) |
| return RMINMAX; |
| |
| // Is sign bits are zero, flip between UMIN/UMAX and SMIN/SMAX. |
| // Only do this if: |
| // 1. The current op isn't legal and the flipped is. |
| // 2. The saturation pattern is broken by canonicalization in InstCombine. |
| bool IsOpIllegal = !TLI.isOperationLegal(Opcode, VT); |
| bool IsSatBroken = Opcode == ISD::UMIN && N0.getOpcode() == ISD::SMAX; |
| if ((IsSatBroken || IsOpIllegal) && (N0.isUndef() || DAG.SignBitIsZero(N0)) && |
| (N1.isUndef() || DAG.SignBitIsZero(N1))) { |
| unsigned AltOpcode; |
| switch (Opcode) { |
| case ISD::SMIN: AltOpcode = ISD::UMIN; break; |
| case ISD::SMAX: AltOpcode = ISD::UMAX; break; |
| case ISD::UMIN: AltOpcode = ISD::SMIN; break; |
| case ISD::UMAX: AltOpcode = ISD::SMAX; break; |
| default: llvm_unreachable("Unknown MINMAX opcode"); |
| } |
| if ((IsSatBroken && IsOpIllegal) || TLI.isOperationLegal(AltOpcode, VT)) |
| return DAG.getNode(AltOpcode, DL, VT, N0, N1); |
| } |
| |
| if (Opcode == ISD::SMIN || Opcode == ISD::SMAX) |
| if (SDValue S = PerformMinMaxFpToSatCombine( |
| N0, N1, N0, N1, Opcode == ISD::SMIN ? ISD::SETLT : ISD::SETGT, DAG)) |
| return S; |
| if (Opcode == ISD::UMIN) |
| if (SDValue S = PerformUMinFpToSatCombine(N0, N1, N0, N1, ISD::SETULT, DAG)) |
| return S; |
| |
| // Fold min/max(vecreduce(x), vecreduce(y)) -> vecreduce(min/max(x, y)) |
| auto ReductionOpcode = [](unsigned Opcode) { |
| switch (Opcode) { |
| case ISD::SMIN: |
| return ISD::VECREDUCE_SMIN; |
| case ISD::SMAX: |
| return ISD::VECREDUCE_SMAX; |
| case ISD::UMIN: |
| return ISD::VECREDUCE_UMIN; |
| case ISD::UMAX: |
| return ISD::VECREDUCE_UMAX; |
| default: |
| llvm_unreachable("Unexpected opcode"); |
| } |
| }; |
| if (SDValue SD = reassociateReduction(ReductionOpcode(Opcode), Opcode, |
| SDLoc(N), VT, N0, N1)) |
| return SD; |
| |
| // Simplify the operands using demanded-bits information. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| return SDValue(); |
| } |
| |
| /// If this is a bitwise logic instruction and both operands have the same |
| /// opcode, try to sink the other opcode after the logic instruction. |
| SDValue DAGCombiner::hoistLogicOpWithSameOpcodeHands(SDNode *N) { |
| SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| unsigned LogicOpcode = N->getOpcode(); |
| unsigned HandOpcode = N0.getOpcode(); |
| assert(ISD::isBitwiseLogicOp(LogicOpcode) && "Expected logic opcode"); |
| assert(HandOpcode == N1.getOpcode() && "Bad input!"); |
| |
| // Bail early if none of these transforms apply. |
| if (N0.getNumOperands() == 0) |
| return SDValue(); |
| |
| // FIXME: We should check number of uses of the operands to not increase |
| // the instruction count for all transforms. |
| |
| // Handle size-changing casts (or sign_extend_inreg). |
| SDValue X = N0.getOperand(0); |
| SDValue Y = N1.getOperand(0); |
| EVT XVT = X.getValueType(); |
| SDLoc DL(N); |
| if (ISD::isExtOpcode(HandOpcode) || ISD::isExtVecInRegOpcode(HandOpcode) || |
| (HandOpcode == ISD::SIGN_EXTEND_INREG && |
| N0.getOperand(1) == N1.getOperand(1))) { |
| // If both operands have other uses, this transform would create extra |
| // instructions without eliminating anything. |
| if (!N0.hasOneUse() && !N1.hasOneUse()) |
| return SDValue(); |
| // We need matching integer source types. |
| if (XVT != Y.getValueType()) |
| return SDValue(); |
| // Don't create an illegal op during or after legalization. Don't ever |
| // create an unsupported vector op. |
| if ((VT.isVector() || LegalOperations) && |
| !TLI.isOperationLegalOrCustom(LogicOpcode, XVT)) |
| return SDValue(); |
| // Avoid infinite looping with PromoteIntBinOp. |
| // TODO: Should we apply desirable/legal constraints to all opcodes? |
| if ((HandOpcode == ISD::ANY_EXTEND || |
| HandOpcode == ISD::ANY_EXTEND_VECTOR_INREG) && |
| LegalTypes && !TLI.isTypeDesirableForOp(LogicOpcode, XVT)) |
| return SDValue(); |
| // logic_op (hand_op X), (hand_op Y) --> hand_op (logic_op X, Y) |
| SDNodeFlags LogicFlags; |
| LogicFlags.setDisjoint(N->getFlags().hasDisjoint() && |
| ISD::isExtOpcode(HandOpcode)); |
| SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y, LogicFlags); |
| if (HandOpcode == ISD::SIGN_EXTEND_INREG) |
| return DAG.getNode(HandOpcode, DL, VT, Logic, N0.getOperand(1)); |
| return DAG.getNode(HandOpcode, DL, VT, Logic); |
| } |
| |
| // logic_op (truncate x), (truncate y) --> truncate (logic_op x, y) |
| if (HandOpcode == ISD::TRUNCATE) { |
| // If both operands have other uses, this transform would create extra |
| // instructions without eliminating anything. |
| if (!N0.hasOneUse() && !N1.hasOneUse()) |
| return SDValue(); |
| // We need matching source types. |
| if (XVT != Y.getValueType()) |
| return SDValue(); |
| // Don't create an illegal op during or after legalization. |
| if (LegalOperations && !TLI.isOperationLegal(LogicOpcode, XVT)) |
| return SDValue(); |
| // Be extra careful sinking truncate. If it's free, there's no benefit in |
| // widening a binop. Also, don't create a logic op on an illegal type. |
| if (TLI.isZExtFree(VT, XVT) && TLI.isTruncateFree(XVT, VT)) |
| return SDValue(); |
| if (!TLI.isTypeLegal(XVT)) |
| return SDValue(); |
| SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y); |
| return DAG.getNode(HandOpcode, DL, VT, Logic); |
| } |
| |
| // For binops SHL/SRL/SRA/AND: |
| // logic_op (OP x, z), (OP y, z) --> OP (logic_op x, y), z |
| if ((HandOpcode == ISD::SHL || HandOpcode == ISD::SRL || |
| HandOpcode == ISD::SRA || HandOpcode == ISD::AND) && |
| N0.getOperand(1) == N1.getOperand(1)) { |
| // If either operand has other uses, this transform is not an improvement. |
| if (!N0.hasOneUse() || !N1.hasOneUse()) |
| return SDValue(); |
| SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y); |
| return DAG.getNode(HandOpcode, DL, VT, Logic, N0.getOperand(1)); |
| } |
| |
| // Unary ops: logic_op (bswap x), (bswap y) --> bswap (logic_op x, y) |
| if (HandOpcode == ISD::BSWAP) { |
| // If either operand has other uses, this transform is not an improvement. |
| if (!N0.hasOneUse() || !N1.hasOneUse()) |
| return SDValue(); |
| SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y); |
| return DAG.getNode(HandOpcode, DL, VT, Logic); |
| } |
| |
| // For funnel shifts FSHL/FSHR: |
| // logic_op (OP x, x1, s), (OP y, y1, s) --> |
| // --> OP (logic_op x, y), (logic_op, x1, y1), s |
| if ((HandOpcode == ISD::FSHL || HandOpcode == ISD::FSHR) && |
| N0.getOperand(2) == N1.getOperand(2)) { |
| if (!N0.hasOneUse() || !N1.hasOneUse()) |
| return SDValue(); |
| SDValue X1 = N0.getOperand(1); |
| SDValue Y1 = N1.getOperand(1); |
| SDValue S = N0.getOperand(2); |
| SDValue Logic0 = DAG.getNode(LogicOpcode, DL, VT, X, Y); |
| SDValue Logic1 = DAG.getNode(LogicOpcode, DL, VT, X1, Y1); |
| return DAG.getNode(HandOpcode, DL, VT, Logic0, Logic1, S); |
| } |
| |
| // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B)) |
| // Only perform this optimization up until type legalization, before |
| // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by |
| // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and |
| // we don't want to undo this promotion. |
| // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper |
| // on scalars. |
| if ((HandOpcode == ISD::BITCAST || HandOpcode == ISD::SCALAR_TO_VECTOR) && |
| Level <= AfterLegalizeTypes) { |
| // Input types must be integer and the same. |
| if (XVT.isInteger() && XVT == Y.getValueType() && |
| !(VT.isVector() && TLI.isTypeLegal(VT) && |
| !XVT.isVector() && !TLI.isTypeLegal(XVT))) { |
| SDValue Logic = DAG.getNode(LogicOpcode, DL, XVT, X, Y); |
| return DAG.getNode(HandOpcode, DL, VT, Logic); |
| } |
| } |
| |
| // Xor/and/or are indifferent to the swizzle operation (shuffle of one value). |
| // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B)) |
| // If both shuffles use the same mask, and both shuffle within a single |
| // vector, then it is worthwhile to move the swizzle after the operation. |
| // The type-legalizer generates this pattern when loading illegal |
| // vector types from memory. In many cases this allows additional shuffle |
| // optimizations. |
| // There are other cases where moving the shuffle after the xor/and/or |
| // is profitable even if shuffles don't perform a swizzle. |
| // If both shuffles use the same mask, and both shuffles have the same first |
| // or second operand, then it might still be profitable to move the shuffle |
| // after the xor/and/or operation. |
| if (HandOpcode == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) { |
| auto *SVN0 = cast<ShuffleVectorSDNode>(N0); |
| auto *SVN1 = cast<ShuffleVectorSDNode>(N1); |
| assert(X.getValueType() == Y.getValueType() && |
| "Inputs to shuffles are not the same type"); |
| |
| // Check that both shuffles use the same mask. The masks are known to be of |
| // the same length because the result vector type is the same. |
| // Check also that shuffles have only one use to avoid introducing extra |
| // instructions. |
| if (!SVN0->hasOneUse() || !SVN1->hasOneUse() || |
| !SVN0->getMask().equals(SVN1->getMask())) |
| return SDValue(); |
| |
| // Don't try to fold this node if it requires introducing a |
| // build vector of all zeros that might be illegal at this stage. |
| SDValue ShOp = N0.getOperand(1); |
| if (LogicOpcode == ISD::XOR && !ShOp.isUndef()) |
| ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations); |
| |
| // (logic_op (shuf (A, C), shuf (B, C))) --> shuf (logic_op (A, B), C) |
| if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) { |
| SDValue Logic = DAG.getNode(LogicOpcode, DL, VT, |
| N0.getOperand(0), N1.getOperand(0)); |
| return DAG.getVectorShuffle(VT, DL, Logic, ShOp, SVN0->getMask()); |
| } |
| |
| // Don't try to fold this node if it requires introducing a |
| // build vector of all zeros that might be illegal at this stage. |
| ShOp = N0.getOperand(0); |
| if (LogicOpcode == ISD::XOR && !ShOp.isUndef()) |
| ShOp = tryFoldToZero(DL, TLI, VT, DAG, LegalOperations); |
| |
| // (logic_op (shuf (C, A), shuf (C, B))) --> shuf (C, logic_op (A, B)) |
| if (N0.getOperand(0) == N1.getOperand(0) && ShOp.getNode()) { |
| SDValue Logic = DAG.getNode(LogicOpcode, DL, VT, N0.getOperand(1), |
| N1.getOperand(1)); |
| return DAG.getVectorShuffle(VT, DL, ShOp, Logic, SVN0->getMask()); |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| /// Try to make (and/or setcc (LL, LR), setcc (RL, RR)) more efficient. |
| SDValue DAGCombiner::foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1, |
| const SDLoc &DL) { |
| SDValue LL, LR, RL, RR, N0CC, N1CC; |
| if (!isSetCCEquivalent(N0, LL, LR, N0CC) || |
| !isSetCCEquivalent(N1, RL, RR, N1CC)) |
| return SDValue(); |
| |
| assert(N0.getValueType() == N1.getValueType() && |
| "Unexpected operand types for bitwise logic op"); |
| assert(LL.getValueType() == LR.getValueType() && |
| RL.getValueType() == RR.getValueType() && |
| "Unexpected operand types for setcc"); |
| |
| // If we're here post-legalization or the logic op type is not i1, the logic |
| // op type must match a setcc result type. Also, all folds require new |
| // operations on the left and right operands, so those types must match. |
| EVT VT = N0.getValueType(); |
| EVT OpVT = LL.getValueType(); |
| if (LegalOperations || VT.getScalarType() != MVT::i1) |
| if (VT != getSetCCResultType(OpVT)) |
| return SDValue(); |
| if (OpVT != RL.getValueType()) |
| return SDValue(); |
| |
| ISD::CondCode CC0 = cast<CondCodeSDNode>(N0CC)->get(); |
| ISD::CondCode CC1 = cast<CondCodeSDNode>(N1CC)->get(); |
| bool IsInteger = OpVT.isInteger(); |
| if (LR == RR && CC0 == CC1 && IsInteger) { |
| bool IsZero = isNullOrNullSplat(LR); |
| bool IsNeg1 = isAllOnesOrAllOnesSplat(LR); |
| |
| // All bits clear? |
| bool AndEqZero = IsAnd && CC1 == ISD::SETEQ && IsZero; |
| // All sign bits clear? |
| bool AndGtNeg1 = IsAnd && CC1 == ISD::SETGT && IsNeg1; |
| // Any bits set? |
| bool OrNeZero = !IsAnd && CC1 == ISD::SETNE && IsZero; |
| // Any sign bits set? |
| bool OrLtZero = !IsAnd && CC1 == ISD::SETLT && IsZero; |
| |
| // (and (seteq X, 0), (seteq Y, 0)) --> (seteq (or X, Y), 0) |
| // (and (setgt X, -1), (setgt Y, -1)) --> (setgt (or X, Y), -1) |
| // (or (setne X, 0), (setne Y, 0)) --> (setne (or X, Y), 0) |
| // (or (setlt X, 0), (setlt Y, 0)) --> (setlt (or X, Y), 0) |
| if (AndEqZero || AndGtNeg1 || OrNeZero || OrLtZero) { |
| SDValue Or = DAG.getNode(ISD::OR, SDLoc(N0), OpVT, LL, RL); |
| AddToWorklist(Or.getNode()); |
| return DAG.getSetCC(DL, VT, Or, LR, CC1); |
| } |
| |
| // All bits set? |
| bool AndEqNeg1 = IsAnd && CC1 == ISD::SETEQ && IsNeg1; |
| // All sign bits set? |
| bool AndLtZero = IsAnd && CC1 == ISD::SETLT && IsZero; |
| // Any bits clear? |
| bool OrNeNeg1 = !IsAnd && CC1 == ISD::SETNE && IsNeg1; |
| // Any sign bits clear? |
| bool OrGtNeg1 = !IsAnd && CC1 == ISD::SETGT && IsNeg1; |
| |
| // (and (seteq X, -1), (seteq Y, -1)) --> (seteq (and X, Y), -1) |
| // (and (setlt X, 0), (setlt Y, 0)) --> (setlt (and X, Y), 0) |
| // (or (setne X, -1), (setne Y, -1)) --> (setne (and X, Y), -1) |
| // (or (setgt X, -1), (setgt Y -1)) --> (setgt (and X, Y), -1) |
| if (AndEqNeg1 || AndLtZero || OrNeNeg1 || OrGtNeg1) { |
| SDValue And = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, LL, RL); |
| AddToWorklist(And.getNode()); |
| return DAG.getSetCC(DL, VT, And, LR, CC1); |
| } |
| } |
| |
| // TODO: What is the 'or' equivalent of this fold? |
| // (and (setne X, 0), (setne X, -1)) --> (setuge (add X, 1), 2) |
| if (IsAnd && LL == RL && CC0 == CC1 && OpVT.getScalarSizeInBits() > 1 && |
| IsInteger && CC0 == ISD::SETNE && |
| ((isNullConstant(LR) && isAllOnesConstant(RR)) || |
| (isAllOnesConstant(LR) && isNullConstant(RR)))) { |
| SDValue One = DAG.getConstant(1, DL, OpVT); |
| SDValue Two = DAG.getConstant(2, DL, OpVT); |
| SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N0), OpVT, LL, One); |
| AddToWorklist(Add.getNode()); |
| return DAG.getSetCC(DL, VT, Add, Two, ISD::SETUGE); |
| } |
| |
| // Try more general transforms if the predicates match and the only user of |
| // the compares is the 'and' or 'or'. |
| if (IsInteger && TLI.convertSetCCLogicToBitwiseLogic(OpVT) && CC0 == CC1 && |
| N0.hasOneUse() && N1.hasOneUse()) { |
| // and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0 |
| // or (setne A, B), (setne C, D) --> setne (or (xor A, B), (xor C, D)), 0 |
| if ((IsAnd && CC1 == ISD::SETEQ) || (!IsAnd && CC1 == ISD::SETNE)) { |
| SDValue XorL = DAG.getNode(ISD::XOR, SDLoc(N0), OpVT, LL, LR); |
| SDValue XorR = DAG.getNode(ISD::XOR, SDLoc(N1), OpVT, RL, RR); |
| SDValue Or = DAG.getNode(ISD::OR, DL, OpVT, XorL, XorR); |
| SDValue Zero = DAG.getConstant(0, DL, OpVT); |
| return DAG.getSetCC(DL, VT, Or, Zero, CC1); |
| } |
| |
| // Turn compare of constants whose difference is 1 bit into add+and+setcc. |
| if ((IsAnd && CC1 == ISD::SETNE) || (!IsAnd && CC1 == ISD::SETEQ)) { |
| // Match a shared variable operand and 2 non-opaque constant operands. |
| auto MatchDiffPow2 = [&](ConstantSDNode *C0, ConstantSDNode *C1) { |
| // The difference of the constants must be a single bit. |
| const APInt &CMax = |
| APIntOps::umax(C0->getAPIntValue(), C1->getAPIntValue()); |
| const APInt &CMin = |
| APIntOps::umin(C0->getAPIntValue(), C1->getAPIntValue()); |
| return !C0->isOpaque() && !C1->isOpaque() && (CMax - CMin).isPowerOf2(); |
| }; |
| if (LL == RL && ISD::matchBinaryPredicate(LR, RR, MatchDiffPow2)) { |
| // and/or (setcc X, CMax, ne), (setcc X, CMin, ne/eq) --> |
| // setcc ((sub X, CMin), ~(CMax - CMin)), 0, ne/eq |
| SDValue Max = DAG.getNode(ISD::UMAX, DL, OpVT, LR, RR); |
| SDValue Min = DAG.getNode(ISD::UMIN, DL, OpVT, LR, RR); |
| SDValue Offset = DAG.getNode(ISD::SUB, DL, OpVT, LL, Min); |
| SDValue Diff = DAG.getNode(ISD::SUB, DL, OpVT, Max, Min); |
| SDValue Mask = DAG.getNOT(DL, Diff, OpVT); |
| SDValue And = DAG.getNode(ISD::AND, DL, OpVT, Offset, Mask); |
| SDValue Zero = DAG.getConstant(0, DL, OpVT); |
| return DAG.getSetCC(DL, VT, And, Zero, CC0); |
| } |
| } |
| } |
| |
| // Canonicalize equivalent operands to LL == RL. |
| if (LL == RR && LR == RL) { |
| CC1 = ISD::getSetCCSwappedOperands(CC1); |
| std::swap(RL, RR); |
| } |
| |
| // (and (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC) |
| // (or (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC) |
| if (LL == RL && LR == RR) { |
| ISD::CondCode NewCC = IsAnd ? ISD::getSetCCAndOperation(CC0, CC1, OpVT) |
| : ISD::getSetCCOrOperation(CC0, CC1, OpVT); |
| if (NewCC != ISD::SETCC_INVALID && |
| (!LegalOperations || |
| (TLI.isCondCodeLegal(NewCC, LL.getSimpleValueType()) && |
| TLI.isOperationLegal(ISD::SETCC, OpVT)))) |
| return DAG.getSetCC(DL, VT, LL, LR, NewCC); |
| } |
| |
| return SDValue(); |
| } |
| |
| static bool arebothOperandsNotSNan(SDValue Operand1, SDValue Operand2, |
| SelectionDAG &DAG) { |
| return DAG.isKnownNeverSNaN(Operand2) && DAG.isKnownNeverSNaN(Operand1); |
| } |
| |
| static bool arebothOperandsNotNan(SDValue Operand1, SDValue Operand2, |
| SelectionDAG &DAG) { |
| return DAG.isKnownNeverNaN(Operand2) && DAG.isKnownNeverNaN(Operand1); |
| } |
| |
| // FIXME: use FMINIMUMNUM if possible, such as for RISC-V. |
| static unsigned getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2, |
| ISD::CondCode CC, unsigned OrAndOpcode, |
| SelectionDAG &DAG, |
| bool isFMAXNUMFMINNUM_IEEE, |
| bool isFMAXNUMFMINNUM) { |
| // The optimization cannot be applied for all the predicates because |
| // of the way FMINNUM/FMAXNUM and FMINNUM_IEEE/FMAXNUM_IEEE handle |
| // NaNs. For FMINNUM_IEEE/FMAXNUM_IEEE, the optimization cannot be |
| // applied at all if one of the operands is a signaling NaN. |
| |
| // It is safe to use FMINNUM_IEEE/FMAXNUM_IEEE if all the operands |
| // are non NaN values. |
| if (((CC == ISD::SETLT || CC == ISD::SETLE) && (OrAndOpcode == ISD::OR)) || |
| ((CC == ISD::SETGT || CC == ISD::SETGE) && (OrAndOpcode == ISD::AND))) |
| return arebothOperandsNotNan(Operand1, Operand2, DAG) && |
| isFMAXNUMFMINNUM_IEEE |
| ? ISD::FMINNUM_IEEE |
| : ISD::DELETED_NODE; |
| else if (((CC == ISD::SETGT || CC == ISD::SETGE) && |
| (OrAndOpcode == ISD::OR)) || |
| ((CC == ISD::SETLT || CC == ISD::SETLE) && |
| (OrAndOpcode == ISD::AND))) |
| return arebothOperandsNotNan(Operand1, Operand2, DAG) && |
| isFMAXNUMFMINNUM_IEEE |
| ? ISD::FMAXNUM_IEEE |
| : ISD::DELETED_NODE; |
| // Both FMINNUM/FMAXNUM and FMINNUM_IEEE/FMAXNUM_IEEE handle quiet |
| // NaNs in the same way. But, FMINNUM/FMAXNUM and FMINNUM_IEEE/ |
| // FMAXNUM_IEEE handle signaling NaNs differently. If we cannot prove |
| // that there are not any sNaNs, then the optimization is not valid |
| // for FMINNUM_IEEE/FMAXNUM_IEEE. In the presence of sNaNs, we apply |
| // the optimization using FMINNUM/FMAXNUM for the following cases. If |
| // we can prove that we do not have any sNaNs, then we can do the |
| // optimization using FMINNUM_IEEE/FMAXNUM_IEEE for the following |
| // cases. |
| else if (((CC == ISD::SETOLT || CC == ISD::SETOLE) && |
| (OrAndOpcode == ISD::OR)) || |
| ((CC == ISD::SETUGT || CC == ISD::SETUGE) && |
| (OrAndOpcode == ISD::AND))) |
| return isFMAXNUMFMINNUM ? ISD::FMINNUM |
| : arebothOperandsNotSNan(Operand1, Operand2, DAG) && |
| isFMAXNUMFMINNUM_IEEE |
| ? ISD::FMINNUM_IEEE |
| : ISD::DELETED_NODE; |
| else if (((CC == ISD::SETOGT || CC == ISD::SETOGE) && |
| (OrAndOpcode == ISD::OR)) || |
| ((CC == ISD::SETULT || CC == ISD::SETULE) && |
| (OrAndOpcode == ISD::AND))) |
| return isFMAXNUMFMINNUM ? ISD::FMAXNUM |
| : arebothOperandsNotSNan(Operand1, Operand2, DAG) && |
| isFMAXNUMFMINNUM_IEEE |
| ? ISD::FMAXNUM_IEEE |
| : ISD::DELETED_NODE; |
| return ISD::DELETED_NODE; |
| } |
| |
| static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) { |
| using AndOrSETCCFoldKind = TargetLowering::AndOrSETCCFoldKind; |
| assert( |
| (LogicOp->getOpcode() == ISD::AND || LogicOp->getOpcode() == ISD::OR) && |
| "Invalid Op to combine SETCC with"); |
| |
| // TODO: Search past casts/truncates. |
| SDValue LHS = LogicOp->getOperand(0); |
| SDValue RHS = LogicOp->getOperand(1); |
| if (LHS->getOpcode() != ISD::SETCC || RHS->getOpcode() != ISD::SETCC || |
| !LHS->hasOneUse() || !RHS->hasOneUse()) |
| return SDValue(); |
| |
| const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| AndOrSETCCFoldKind TargetPreference = TLI.isDesirableToCombineLogicOpOfSETCC( |
| LogicOp, LHS.getNode(), RHS.getNode()); |
| |
| SDValue LHS0 = LHS->getOperand(0); |
| SDValue RHS0 = RHS->getOperand(0); |
| SDValue LHS1 = LHS->getOperand(1); |
| SDValue RHS1 = RHS->getOperand(1); |
| // TODO: We don't actually need a splat here, for vectors we just need the |
| // invariants to hold for each element. |
| auto *LHS1C = isConstOrConstSplat(LHS1); |
| auto *RHS1C = isConstOrConstSplat(RHS1); |
| ISD::CondCode CCL = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); |
| ISD::CondCode CCR = cast<CondCodeSDNode>(RHS.getOperand(2))->get(); |
| EVT VT = LogicOp->getValueType(0); |
| EVT OpVT = LHS0.getValueType(); |
| SDLoc DL(LogicOp); |
| |
| // Check if the operands of an and/or operation are comparisons and if they |
| // compare against the same value. Replace the and/or-cmp-cmp sequence with |
| // min/max cmp sequence. If LHS1 is equal to RHS1, then the or-cmp-cmp |
| // sequence will be replaced with min-cmp sequence: |
| // (LHS0 < LHS1) | (RHS0 < RHS1) -> min(LHS0, RHS0) < LHS1 |
| // and and-cmp-cmp will be replaced with max-cmp sequence: |
| // (LHS0 < LHS1) & (RHS0 < RHS1) -> max(LHS0, RHS0) < LHS1 |
| // The optimization does not work for `==` or `!=` . |
| // The two comparisons should have either the same predicate or the |
| // predicate of one of the comparisons is the opposite of the other one. |
| bool isFMAXNUMFMINNUM_IEEE = TLI.isOperationLegal(ISD::FMAXNUM_IEEE, OpVT) && |
| TLI.isOperationLegal(ISD::FMINNUM_IEEE, OpVT); |
| bool isFMAXNUMFMINNUM = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, OpVT) && |
| TLI.isOperationLegalOrCustom(ISD::FMINNUM, OpVT); |
| if (((OpVT.isInteger() && TLI.isOperationLegal(ISD::UMAX, OpVT) && |
| TLI.isOperationLegal(ISD::SMAX, OpVT) && |
| TLI.isOperationLegal(ISD::UMIN, OpVT) && |
| TLI.isOperationLegal(ISD::SMIN, OpVT)) || |
| (OpVT.isFloatingPoint() && |
| (isFMAXNUMFMINNUM_IEEE || isFMAXNUMFMINNUM))) && |
| !ISD::isIntEqualitySetCC(CCL) && !ISD::isFPEqualitySetCC(CCL) && |
| CCL != ISD::SETFALSE && CCL != ISD::SETO && CCL != ISD::SETUO && |
| CCL != ISD::SETTRUE && |
| (CCL == CCR || CCL == ISD::getSetCCSwappedOperands(CCR))) { |
| |
| SDValue CommonValue, Operand1, Operand2; |
| ISD::CondCode CC = ISD::SETCC_INVALID; |
| if (CCL == CCR) { |
| if (LHS0 == RHS0) { |
| CommonValue = LHS0; |
| Operand1 = LHS1; |
| Operand2 = RHS1; |
| CC = ISD::getSetCCSwappedOperands(CCL); |
| } else if (LHS1 == RHS1) { |
| CommonValue = LHS1; |
| Operand1 = LHS0; |
| Operand2 = RHS0; |
| CC = CCL; |
| } |
| } else { |
| assert(CCL == ISD::getSetCCSwappedOperands(CCR) && "Unexpected CC"); |
| if (LHS0 == RHS1) { |
| CommonValue = LHS0; |
| Operand1 = LHS1; |
| Operand2 = RHS0; |
| CC = CCR; |
| } else if (RHS0 == LHS1) { |
| CommonValue = LHS1; |
| Operand1 = LHS0; |
| Operand2 = RHS1; |
| CC = CCL; |
| } |
| } |
| |
| // Don't do this transform for sign bit tests. Let foldLogicOfSetCCs |
| // handle it using OR/AND. |
| if (CC == ISD::SETLT && isNullOrNullSplat(CommonValue)) |
| CC = ISD::SETCC_INVALID; |
| else if (CC == ISD::SETGT && isAllOnesOrAllOnesSplat(CommonValue)) |
| CC = ISD::SETCC_INVALID; |
| |
| if (CC != ISD::SETCC_INVALID) { |
| unsigned NewOpcode = ISD::DELETED_NODE; |
| bool IsSigned = isSignedIntSetCC(CC); |
| if (OpVT.isInteger()) { |
| bool IsLess = (CC == ISD::SETLE || CC == ISD::SETULE || |
| CC == ISD::SETLT || CC == ISD::SETULT); |
| bool IsOr = (LogicOp->getOpcode() == ISD::OR); |
| if (IsLess == IsOr) |
| NewOpcode = IsSigned ? ISD::SMIN : ISD::UMIN; |
| else |
| NewOpcode = IsSigned ? ISD::SMAX : ISD::UMAX; |
| } else if (OpVT.isFloatingPoint()) |
| NewOpcode = |
| getMinMaxOpcodeForFP(Operand1, Operand2, CC, LogicOp->getOpcode(), |
| DAG, isFMAXNUMFMINNUM_IEEE, isFMAXNUMFMINNUM); |
| |
| if (NewOpcode != ISD::DELETED_NODE) { |
| SDValue MinMaxValue = |
| DAG.getNode(NewOpcode, DL, OpVT, Operand1, Operand2); |
| return DAG.getSetCC(DL, VT, MinMaxValue, CommonValue, CC); |
| } |
| } |
| } |
| |
| if (LHS0 == LHS1 && RHS0 == RHS1 && CCL == CCR && |
| LHS0.getValueType() == RHS0.getValueType() && |
| ((LogicOp->getOpcode() == ISD::AND && CCL == ISD::SETO) || |
| (LogicOp->getOpcode() == ISD::OR && CCL == ISD::SETUO))) |
| return DAG.getSetCC(DL, VT, LHS0, RHS0, CCL); |
| |
| if (TargetPreference == AndOrSETCCFoldKind::None) |
| return SDValue(); |
| |
| if (CCL == CCR && |
| CCL == (LogicOp->getOpcode() == ISD::AND ? ISD::SETNE : ISD::SETEQ) && |
| LHS0 == RHS0 && LHS1C && RHS1C && OpVT.isInteger()) { |
| const APInt &APLhs = LHS1C->getAPIntValue(); |
| const APInt &APRhs = RHS1C->getAPIntValue(); |
| |
| // Preference is to use ISD::ABS or we already have an ISD::ABS (in which |
| // case this is just a compare). |
| if (APLhs == (-APRhs) && |
| ((TargetPreference & AndOrSETCCFoldKind::ABS) || |
| DAG.doesNodeExist(ISD::ABS, DAG.getVTList(OpVT), {LHS0}))) { |
| const APInt &C = APLhs.isNegative() ? APRhs : APLhs; |
| // (icmp eq A, C) | (icmp eq A, -C) |
| // -> (icmp eq Abs(A), C) |
| // (icmp ne A, C) & (icmp ne A, -C) |
| // -> (icmp ne Abs(A), C) |
| SDValue AbsOp = DAG.getNode(ISD::ABS, DL, OpVT, LHS0); |
| return DAG.getNode(ISD::SETCC, DL, VT, AbsOp, |
| DAG.getConstant(C, DL, OpVT), LHS.getOperand(2)); |
| } else if (TargetPreference & |
| (AndOrSETCCFoldKind::AddAnd | AndOrSETCCFoldKind::NotAnd)) { |
| |
| // AndOrSETCCFoldKind::AddAnd: |
| // A == C0 | A == C1 |
| // IF IsPow2(smax(C0, C1)-smin(C0, C1)) |
| // -> ((A - smin(C0, C1)) & ~(smax(C0, C1)-smin(C0, C1))) == 0 |
| // A != C0 & A != C1 |
| // IF IsPow2(smax(C0, C1)-smin(C0, C1)) |
| // -> ((A - smin(C0, C1)) & ~(smax(C0, C1)-smin(C0, C1))) != 0 |
| |
| // AndOrSETCCFoldKind::NotAnd: |
| // A == C0 | A == C1 |
| // IF smax(C0, C1) == -1 AND IsPow2(smax(C0, C1) - smin(C0, C1)) |
| // -> ~A & smin(C0, C1) == 0 |
| // A != C0 & A != C1 |
| // IF smax(C0, C1) == -1 AND IsPow2(smax(C0, C1) - smin(C0, C1)) |
| // -> ~A & smin(C0, C1) != 0 |
| |
| const APInt &MaxC = APIntOps::smax(APRhs, APLhs); |
| const APInt &MinC = APIntOps::smin(APRhs, APLhs); |
| APInt Dif = MaxC - MinC; |
| if (!Dif.isZero() && Dif.isPowerOf2()) { |
| if (MaxC.isAllOnes() && |
| (TargetPreference & AndOrSETCCFoldKind::NotAnd)) { |
| SDValue NotOp = DAG.getNOT(DL, LHS0, OpVT); |
| SDValue AndOp = DAG.getNode(ISD::AND, DL, OpVT, NotOp, |
| DAG.getConstant(MinC, DL, OpVT)); |
| return DAG.getNode(ISD::SETCC, DL, VT, AndOp, |
| DAG.getConstant(0, DL, OpVT), LHS.getOperand(2)); |
| } else if (TargetPreference & AndOrSETCCFoldKind::AddAnd) { |
| |
| SDValue AddOp = DAG.getNode(ISD::ADD, DL, OpVT, LHS0, |
| DAG.getConstant(-MinC, DL, OpVT)); |
| SDValue AndOp = DAG.getNode(ISD::AND, DL, OpVT, AddOp, |
| DAG.getConstant(~Dif, DL, OpVT)); |
| return DAG.getNode(ISD::SETCC, DL, VT, AndOp, |
| DAG.getConstant(0, DL, OpVT), LHS.getOperand(2)); |
| } |
| } |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| // Combine `(select c, (X & 1), 0)` -> `(and (zext c), X)`. |
| // We canonicalize to the `select` form in the middle end, but the `and` form |
| // gets better codegen and all tested targets (arm, x86, riscv) |
| static SDValue combineSelectAsExtAnd(SDValue Cond, SDValue T, SDValue F, |
| const SDLoc &DL, SelectionDAG &DAG) { |
| const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| if (!isNullConstant(F)) |
| return SDValue(); |
| |
| EVT CondVT = Cond.getValueType(); |
| if (TLI.getBooleanContents(CondVT) != |
| TargetLoweringBase::ZeroOrOneBooleanContent) |
| return SDValue(); |
| |
| if (T.getOpcode() != ISD::AND) |
| return SDValue(); |
| |
| if (!isOneConstant(T.getOperand(1))) |
| return SDValue(); |
| |
| EVT OpVT = T.getValueType(); |
| |
| SDValue CondMask = |
| OpVT == CondVT ? Cond : DAG.getBoolExtOrTrunc(Cond, DL, OpVT, CondVT); |
| return DAG.getNode(ISD::AND, DL, OpVT, CondMask, T.getOperand(0)); |
| } |
| |
| /// This contains all DAGCombine rules which reduce two values combined by |
| /// an And operation to a single value. This makes them reusable in the context |
| /// of visitSELECT(). Rules involving constants are not included as |
| /// visitSELECT() already handles those cases. |
| SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) { |
| EVT VT = N1.getValueType(); |
| SDLoc DL(N); |
| |
| // fold (and x, undef) -> 0 |
| if (N0.isUndef() || N1.isUndef()) |
| return DAG.getConstant(0, DL, VT); |
| |
| if (SDValue V = foldLogicOfSetCCs(true, N0, N1, DL)) |
| return V; |
| |
| // Canonicalize: |
| // and(x, add) -> and(add, x) |
| if (N1.getOpcode() == ISD::ADD) |
| std::swap(N0, N1); |
| |
| // TODO: Rewrite this to return a new 'AND' instead of using CombineTo. |
| if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && |
| VT.isScalarInteger() && VT.getSizeInBits() <= 64 && N0->hasOneUse()) { |
| if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { |
| if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) { |
| // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal |
| // immediate for an add, but it is legal if its top c2 bits are set, |
| // transform the ADD so the immediate doesn't need to be materialized |
| // in a register. |
| APInt ADDC = ADDI->getAPIntValue(); |
| APInt SRLC = SRLI->getAPIntValue(); |
| if (ADDC.getSignificantBits() <= 64 && SRLC.ult(VT.getSizeInBits()) && |
| !TLI.isLegalAddImmediate(ADDC.getSExtValue())) { |
| APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), |
| SRLC.getZExtValue()); |
| if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) { |
| ADDC |= Mask; |
| if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) { |
| SDLoc DL0(N0); |
| SDValue NewAdd = |
| DAG.getNode(ISD::ADD, DL0, VT, |
| N0.getOperand(0), DAG.getConstant(ADDC, DL, VT)); |
| CombineTo(N0.getNode(), NewAdd); |
| // Return N so it doesn't get rechecked! |
| return SDValue(N, 0); |
| } |
| } |
| } |
| } |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| bool DAGCombiner::isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN, |
| EVT LoadResultTy, EVT &ExtVT) { |
| if (!AndC->getAPIntValue().isMask()) |
| return false; |
| |
| unsigned ActiveBits = AndC->getAPIntValue().countr_one(); |
| |
| ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits); |
| EVT LoadedVT = LoadN->getMemoryVT(); |
| |
| if (ExtVT == LoadedVT && |
| (!LegalOperations || |
| TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))) { |
| // ZEXTLOAD will match without needing to change the size of the value being |
| // loaded. |
| return true; |
| } |
| |
| // Do not change the width of a volatile or atomic loads. |
| if (!LoadN->isSimple()) |
| return false; |
| |
| // Do not generate loads of non-round integer types since these can |
| // be expensive (and would be wrong if the type is not byte sized). |
| if (!LoadedVT.bitsGT(ExtVT) || !ExtVT.isRound()) |
| return false; |
| |
| if (LegalOperations && |
| !TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT)) |
| return false; |
| |
| if (!TLI.shouldReduceLoadWidth(LoadN, ISD::ZEXTLOAD, ExtVT, /*ByteOffset=*/0)) |
| return false; |
| |
| return true; |
| } |
| |
| bool DAGCombiner::isLegalNarrowLdSt(LSBaseSDNode *LDST, |
| ISD::LoadExtType ExtType, EVT &MemVT, |
| unsigned ShAmt) { |
| if (!LDST) |
| return false; |
| |
| // Only allow byte offsets. |
| if (ShAmt % 8) |
| return false; |
| const unsigned ByteShAmt = ShAmt / 8; |
| |
| // Do not generate loads of non-round integer types since these can |
| // be expensive (and would be wrong if the type is not byte sized). |
| if (!MemVT.isRound()) |
| return false; |
| |
| // Don't change the width of a volatile or atomic loads. |
| if (!LDST->isSimple()) |
| return false; |
| |
| EVT LdStMemVT = LDST->getMemoryVT(); |
| |
| // Bail out when changing the scalable property, since we can't be sure that |
| // we're actually narrowing here. |
| if (LdStMemVT.isScalableVector() != MemVT.isScalableVector()) |
| return false; |
| |
| // Verify that we are actually reducing a load width here. |
| if (LdStMemVT.bitsLT(MemVT)) |
| return false; |
| |
| // Ensure that this isn't going to produce an unsupported memory access. |
| if (ShAmt) { |
| const Align LDSTAlign = LDST->getAlign(); |
| const Align NarrowAlign = commonAlignment(LDSTAlign, ByteShAmt); |
| if (!TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, |
| LDST->getAddressSpace(), NarrowAlign, |
| LDST->getMemOperand()->getFlags())) |
| return false; |
| } |
| |
| // It's not possible to generate a constant of extended or untyped type. |
| EVT PtrType = LDST->getBasePtr().getValueType(); |
| if (PtrType == MVT::Untyped || PtrType.isExtended()) |
| return false; |
| |
| if (isa<LoadSDNode>(LDST)) { |
| LoadSDNode *Load = cast<LoadSDNode>(LDST); |
| // Don't transform one with multiple uses, this would require adding a new |
| // load. |
| if (!SDValue(Load, 0).hasOneUse()) |
| return false; |
| |
| if (LegalOperations && |
| !TLI.isLoadExtLegal(ExtType, Load->getValueType(0), MemVT)) |
| return false; |
| |
| // For the transform to be legal, the load must produce only two values |
| // (the value loaded and the chain). Don't transform a pre-increment |
| // load, for example, which produces an extra value. Otherwise the |
| // transformation is not equivalent, and the downstream logic to replace |
| // uses gets things wrong. |
| if (Load->getNumValues() > 2) |
| return false; |
| |
| // If the load that we're shrinking is an extload and we're not just |
| // discarding the extension we can't simply shrink the load. Bail. |
| // TODO: It would be possible to merge the extensions in some cases. |
| if (Load->getExtensionType() != ISD::NON_EXTLOAD && |
| Load->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits() + ShAmt) |
| return false; |
| |
| if (!TLI.shouldReduceLoadWidth(Load, ExtType, MemVT, ByteShAmt)) |
| return false; |
| } else { |
| assert(isa<StoreSDNode>(LDST) && "It is not a Load nor a Store SDNode"); |
| StoreSDNode *Store = cast<StoreSDNode>(LDST); |
| // Can't write outside the original store |
| if (Store->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits() + ShAmt) |
| return false; |
| |
| if (LegalOperations && |
| !TLI.isTruncStoreLegal(Store->getValue().getValueType(), MemVT)) |
| return false; |
| } |
| return true; |
| } |
| |
| bool DAGCombiner::SearchForAndLoads(SDNode *N, |
| SmallVectorImpl<LoadSDNode*> &Loads, |
| SmallPtrSetImpl<SDNode*> &NodesWithConsts, |
| ConstantSDNode *Mask, |
| SDNode *&NodeToMask) { |
| // Recursively search for the operands, looking for loads which can be |
| // narrowed. |
| for (SDValue Op : N->op_values()) { |
| if (Op.getValueType().isVector()) |
| return false; |
| |
| // Some constants may need fixing up later if they are too large. |
| if (auto *C = dyn_cast<ConstantSDNode>(Op)) { |
| assert(ISD::isBitwiseLogicOp(N->getOpcode()) && |
| "Expected bitwise logic operation"); |
| if (!C->getAPIntValue().isSubsetOf(Mask->getAPIntValue())) |
| NodesWithConsts.insert(N); |
| continue; |
| } |
| |
| if (!Op.hasOneUse()) |
| return false; |
| |
| switch(Op.getOpcode()) { |
| case ISD::LOAD: { |
| auto *Load = cast<LoadSDNode>(Op); |
| EVT ExtVT; |
| if (isAndLoadExtLoad(Mask, Load, Load->getValueType(0), ExtVT) && |
| isLegalNarrowLdSt(Load, ISD::ZEXTLOAD, ExtVT)) { |
| |
| // ZEXTLOAD is already small enough. |
| if (Load->getExtensionType() == ISD::ZEXTLOAD && |
| ExtVT.bitsGE(Load->getMemoryVT())) |
| continue; |
| |
| // Use LE to convert equal sized loads to zext. |
| if (ExtVT.bitsLE(Load->getMemoryVT())) |
| Loads.push_back(Load); |
| |
| continue; |
| } |
| return false; |
| } |
| case ISD::ZERO_EXTEND: |
| case ISD::AssertZext: { |
| unsigned ActiveBits = Mask->getAPIntValue().countr_one(); |
| EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits); |
| EVT VT = Op.getOpcode() == ISD::AssertZext ? |
| cast<VTSDNode>(Op.getOperand(1))->getVT() : |
| Op.getOperand(0).getValueType(); |
| |
| // We can accept extending nodes if the mask is wider or an equal |
| // width to the original type. |
| if (ExtVT.bitsGE(VT)) |
| continue; |
| break; |
| } |
| case ISD::OR: |
| case ISD::XOR: |
| case ISD::AND: |
| if (!SearchForAndLoads(Op.getNode(), Loads, NodesWithConsts, Mask, |
| NodeToMask)) |
| return false; |
| continue; |
| } |
| |
| // Allow one node which will masked along with any loads found. |
| if (NodeToMask) |
| return false; |
| |
| // Also ensure that the node to be masked only produces one data result. |
| NodeToMask = Op.getNode(); |
| if (NodeToMask->getNumValues() > 1) { |
| bool HasValue = false; |
| for (unsigned i = 0, e = NodeToMask->getNumValues(); i < e; ++i) { |
| MVT VT = SDValue(NodeToMask, i).getSimpleValueType(); |
| if (VT != MVT::Glue && VT != MVT::Other) { |
| if (HasValue) { |
| NodeToMask = nullptr; |
| return false; |
| } |
| HasValue = true; |
| } |
| } |
| assert(HasValue && "Node to be masked has no data result?"); |
| } |
| } |
| return true; |
| } |
| |
| bool DAGCombiner::BackwardsPropagateMask(SDNode *N) { |
| auto *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1)); |
| if (!Mask) |
| return false; |
| |
| if (!Mask->getAPIntValue().isMask()) |
| return false; |
| |
| // No need to do anything if the and directly uses a load. |
| if (isa<LoadSDNode>(N->getOperand(0))) |
| return false; |
| |
| SmallVector<LoadSDNode*, 8> Loads; |
| SmallPtrSet<SDNode*, 2> NodesWithConsts; |
| SDNode *FixupNode = nullptr; |
| if (SearchForAndLoads(N, Loads, NodesWithConsts, Mask, FixupNode)) { |
| if (Loads.empty()) |
| return false; |
| |
| LLVM_DEBUG(dbgs() << "Backwards propagate AND: "; N->dump()); |
| SDValue MaskOp = N->getOperand(1); |
| |
| // If it exists, fixup the single node we allow in the tree that needs |
| // masking. |
| if (FixupNode) { |
| LLVM_DEBUG(dbgs() << "First, need to fix up: "; FixupNode->dump()); |
| SDValue And = DAG.getNode(ISD::AND, SDLoc(FixupNode), |
| FixupNode->getValueType(0), |
| SDValue(FixupNode, 0), MaskOp); |
| DAG.ReplaceAllUsesOfValueWith(SDValue(FixupNode, 0), And); |
| if (And.getOpcode() == ISD ::AND) |
| DAG.UpdateNodeOperands(And.getNode(), SDValue(FixupNode, 0), MaskOp); |
| } |
| |
| // Narrow any constants that need it. |
| for (auto *LogicN : NodesWithConsts) { |
| SDValue Op0 = LogicN->getOperand(0); |
| SDValue Op1 = LogicN->getOperand(1); |
| |
| // We only need to fix AND if both inputs are constants. And we only need |
| // to fix one of the constants. |
| if (LogicN->getOpcode() == ISD::AND && |
| (!isa<ConstantSDNode>(Op0) || !isa<ConstantSDNode>(Op1))) |
| continue; |
| |
| if (isa<ConstantSDNode>(Op0) && LogicN->getOpcode() != ISD::AND) |
| Op0 = |
| DAG.getNode(ISD::AND, SDLoc(Op0), Op0.getValueType(), Op0, MaskOp); |
| |
| if (isa<ConstantSDNode>(Op1)) |
| Op1 = |
| DAG.getNode(ISD::AND, SDLoc(Op1), Op1.getValueType(), Op1, MaskOp); |
| |
| if (isa<ConstantSDNode>(Op0) && !isa<ConstantSDNode>(Op1)) |
| std::swap(Op0, Op1); |
| |
| DAG.UpdateNodeOperands(LogicN, Op0, Op1); |
| } |
| |
| // Create narrow loads. |
| for (auto *Load : Loads) { |
| LLVM_DEBUG(dbgs() << "Propagate AND back to: "; Load->dump()); |
| SDValue And = DAG.getNode(ISD::AND, SDLoc(Load), Load->getValueType(0), |
| SDValue(Load, 0), MaskOp); |
| DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), And); |
| if (And.getOpcode() == ISD ::AND) |
| And = SDValue( |
| DAG.UpdateNodeOperands(And.getNode(), SDValue(Load, 0), MaskOp), 0); |
| SDValue NewLoad = reduceLoadWidth(And.getNode()); |
| assert(NewLoad && |
| "Shouldn't be masking the load if it can't be narrowed"); |
| CombineTo(Load, NewLoad, NewLoad.getValue(1)); |
| } |
| DAG.ReplaceAllUsesWith(N, N->getOperand(0).getNode()); |
| return true; |
| } |
| return false; |
| } |
| |
| // Unfold |
| // x & (-1 'logical shift' y) |
| // To |
| // (x 'opposite logical shift' y) 'logical shift' y |
| // if it is better for performance. |
| SDValue DAGCombiner::unfoldExtremeBitClearingToShifts(SDNode *N) { |
| assert(N->getOpcode() == ISD::AND); |
| |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| |
| // Do we actually prefer shifts over mask? |
| if (!TLI.shouldFoldMaskToVariableShiftPair(N0)) |
| return SDValue(); |
| |
| // Try to match (-1 '[outer] logical shift' y) |
| unsigned OuterShift; |
| unsigned InnerShift; // The opposite direction to the OuterShift. |
| SDValue Y; // Shift amount. |
| auto matchMask = [&OuterShift, &InnerShift, &Y](SDValue M) -> bool { |
| if (!M.hasOneUse()) |
| return false; |
| OuterShift = M->getOpcode(); |
| if (OuterShift == ISD::SHL) |
| InnerShift = ISD::SRL; |
| else if (OuterShift == ISD::SRL) |
| InnerShift = ISD::SHL; |
| else |
| return false; |
| if (!isAllOnesConstant(M->getOperand(0))) |
| return false; |
| Y = M->getOperand(1); |
| return true; |
| }; |
| |
| SDValue X; |
| if (matchMask(N1)) |
| X = N0; |
| else if (matchMask(N0)) |
| X = N1; |
| else |
| return SDValue(); |
| |
| SDLoc DL(N); |
| EVT VT = N->getValueType(0); |
| |
| // tmp = x 'opposite logical shift' y |
| SDValue T0 = DAG.getNode(InnerShift, DL, VT, X, Y); |
| // ret = tmp 'logical shift' y |
| SDValue T1 = DAG.getNode(OuterShift, DL, VT, T0, Y); |
| |
| return T1; |
| } |
| |
| /// Try to replace shift/logic that tests if a bit is clear with mask + setcc. |
| /// For a target with a bit test, this is expected to become test + set and save |
| /// at least 1 instruction. |
| static SDValue combineShiftAnd1ToBitTest(SDNode *And, SelectionDAG &DAG) { |
| assert(And->getOpcode() == ISD::AND && "Expected an 'and' op"); |
| |
| // Look through an optional extension. |
| SDValue And0 = And->getOperand(0), And1 = And->getOperand(1); |
| if (And0.getOpcode() == ISD::ANY_EXTEND && And0.hasOneUse()) |
| And0 = And0.getOperand(0); |
| if (!isOneConstant(And1) || !And0.hasOneUse()) |
| return SDValue(); |
| |
| SDValue Src = And0; |
| |
| // Attempt to find a 'not' op. |
| // TODO: Should we favor test+set even without the 'not' op? |
| bool FoundNot = false; |
| if (isBitwiseNot(Src)) { |
| FoundNot = true; |
| Src = Src.getOperand(0); |
| |
| // Look though an optional truncation. The source operand may not be the |
| // same type as the original 'and', but that is ok because we are masking |
| // off everything but the low bit. |
| if (Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse()) |
| Src = Src.getOperand(0); |
| } |
| |
| // Match a shift-right by constant. |
| if (Src.getOpcode() != ISD::SRL || !Src.hasOneUse()) |
| return SDValue(); |
| |
| // This is probably not worthwhile without a supported type. |
| EVT SrcVT = Src.getValueType(); |
| const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| if (!TLI.isTypeLegal(SrcVT)) |
| return SDValue(); |
| |
| // We might have looked through casts that make this transform invalid. |
| unsigned BitWidth = SrcVT.getScalarSizeInBits(); |
| SDValue ShiftAmt = Src.getOperand(1); |
| auto *ShiftAmtC = dyn_cast<ConstantSDNode>(ShiftAmt); |
| if (!ShiftAmtC || !ShiftAmtC->getAPIntValue().ult(BitWidth)) |
| return SDValue(); |
| |
| // Set source to shift source. |
| Src = Src.getOperand(0); |
| |
| // Try again to find a 'not' op. |
| // TODO: Should we favor test+set even with two 'not' ops? |
| if (!FoundNot) { |
| if (!isBitwiseNot(Src)) |
| return SDValue(); |
| Src = Src.getOperand(0); |
| } |
| |
| if (!TLI.hasBitTest(Src, ShiftAmt)) |
| return SDValue(); |
| |
| // Turn this into a bit-test pattern using mask op + setcc: |
| // and (not (srl X, C)), 1 --> (and X, 1<<C) == 0 |
| // and (srl (not X), C)), 1 --> (and X, 1<<C) == 0 |
| SDLoc DL(And); |
| SDValue X = DAG.getZExtOrTrunc(Src, DL, SrcVT); |
| EVT CCVT = |
| TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); |
| SDValue Mask = DAG.getConstant( |
| APInt::getOneBitSet(BitWidth, ShiftAmtC->getZExtValue()), DL, SrcVT); |
| SDValue NewAnd = DAG.getNode(ISD::AND, DL, SrcVT, X, Mask); |
| SDValue Zero = DAG.getConstant(0, DL, SrcVT); |
| SDValue Setcc = DAG.getSetCC(DL, CCVT, NewAnd, Zero, ISD::SETEQ); |
| return DAG.getZExtOrTrunc(Setcc, DL, And->getValueType(0)); |
| } |
| |
| /// For targets that support usubsat, match a bit-hack form of that operation |
| /// that ends in 'and' and convert it. |
| static SDValue foldAndToUsubsat(SDNode *N, SelectionDAG &DAG, const SDLoc &DL) { |
| EVT VT = N->getValueType(0); |
| unsigned BitWidth = VT.getScalarSizeInBits(); |
| APInt SignMask = APInt::getSignMask(BitWidth); |
| |
| // (i8 X ^ 128) & (i8 X s>> 7) --> usubsat X, 128 |
| // (i8 X + 128) & (i8 X s>> 7) --> usubsat X, 128 |
| // xor/add with SMIN (signmask) are logically equivalent. |
| SDValue X; |
| if (!sd_match(N, m_And(m_OneUse(m_Xor(m_Value(X), m_SpecificInt(SignMask))), |
| m_OneUse(m_Sra(m_Deferred(X), |
| m_SpecificInt(BitWidth - 1))))) && |
| !sd_match(N, m_And(m_OneUse(m_Add(m_Value(X), m_SpecificInt(SignMask))), |
| m_OneUse(m_Sra(m_Deferred(X), |
| m_SpecificInt(BitWidth - 1)))))) |
| return SDValue(); |
| |
| return DAG.getNode(ISD::USUBSAT, DL, VT, X, |
| DAG.getConstant(SignMask, DL, VT)); |
| } |
| |
| /// Given a bitwise logic operation N with a matching bitwise logic operand, |
| /// fold a pattern where 2 of the source operands are identically shifted |
| /// values. For example: |
| /// ((X0 << Y) | Z) | (X1 << Y) --> ((X0 | X1) << Y) | Z |
| static SDValue foldLogicOfShifts(SDNode *N, SDValue LogicOp, SDValue ShiftOp, |
| SelectionDAG &DAG) { |
| unsigned LogicOpcode = N->getOpcode(); |
| assert(ISD::isBitwiseLogicOp(LogicOpcode) && |
| "Expected bitwise logic operation"); |
| |
| if (!LogicOp.hasOneUse() || !ShiftOp.hasOneUse()) |
| return SDValue(); |
| |
| // Match another bitwise logic op and a shift. |
| unsigned ShiftOpcode = ShiftOp.getOpcode(); |
| if (LogicOp.getOpcode() != LogicOpcode || |
| !(ShiftOpcode == ISD::SHL || ShiftOpcode == ISD::SRL || |
| ShiftOpcode == ISD::SRA)) |
| return SDValue(); |
| |
| // Match another shift op inside the first logic operand. Handle both commuted |
| // possibilities. |
| // LOGIC (LOGIC (SH X0, Y), Z), (SH X1, Y) --> LOGIC (SH (LOGIC X0, X1), Y), Z |
| // LOGIC (LOGIC Z, (SH X0, Y)), (SH X1, Y) --> LOGIC (SH (LOGIC X0, X1), Y), Z |
| SDValue X1 = ShiftOp.getOperand(0); |
| SDValue Y = ShiftOp.getOperand(1); |
| SDValue X0, Z; |
| if (LogicOp.getOperand(0).getOpcode() == ShiftOpcode && |
| LogicOp.getOperand(0).getOperand(1) == Y) { |
| X0 = LogicOp.getOperand(0).getOperand(0); |
| Z = LogicOp.getOperand(1); |
| } else if (LogicOp.getOperand(1).getOpcode() == ShiftOpcode && |
| LogicOp.getOperand(1).getOperand(1) == Y) { |
| X0 = LogicOp.getOperand(1).getOperand(0); |
| Z = LogicOp.getOperand(0); |
| } else { |
| return SDValue(); |
| } |
| |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| SDValue LogicX = DAG.getNode(LogicOpcode, DL, VT, X0, X1); |
| SDValue NewShift = DAG.getNode(ShiftOpcode, DL, VT, LogicX, Y); |
| return DAG.getNode(LogicOpcode, DL, VT, NewShift, Z); |
| } |
| |
| /// Given a tree of logic operations with shape like |
| /// (LOGIC (LOGIC (X, Y), LOGIC (Z, Y))) |
| /// try to match and fold shift operations with the same shift amount. |
| /// For example: |
| /// LOGIC (LOGIC (SH X0, Y), Z), (LOGIC (SH X1, Y), W) --> |
| /// --> LOGIC (SH (LOGIC X0, X1), Y), (LOGIC Z, W) |
| static SDValue foldLogicTreeOfShifts(SDNode *N, SDValue LeftHand, |
| SDValue RightHand, SelectionDAG &DAG) { |
| unsigned LogicOpcode = N->getOpcode(); |
| assert(ISD::isBitwiseLogicOp(LogicOpcode) && |
| "Expected bitwise logic operation"); |
| if (LeftHand.getOpcode() != LogicOpcode || |
| RightHand.getOpcode() != LogicOpcode) |
| return SDValue(); |
| if (!LeftHand.hasOneUse() || !RightHand.hasOneUse()) |
| return SDValue(); |
| |
| // Try to match one of following patterns: |
| // LOGIC (LOGIC (SH X0, Y), Z), (LOGIC (SH X1, Y), W) |
| // LOGIC (LOGIC (SH X0, Y), Z), (LOGIC W, (SH X1, Y)) |
| // Note that foldLogicOfShifts will handle commuted versions of the left hand |
| // itself. |
| SDValue CombinedShifts, W; |
| SDValue R0 = RightHand.getOperand(0); |
| SDValue R1 = RightHand.getOperand(1); |
| if ((CombinedShifts = foldLogicOfShifts(N, LeftHand, R0, DAG))) |
| W = R1; |
| else if ((CombinedShifts = foldLogicOfShifts(N, LeftHand, R1, DAG))) |
| W = R0; |
| else |
| return SDValue(); |
| |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| return DAG.getNode(LogicOpcode, DL, VT, CombinedShifts, W); |
| } |
| |
| /// Fold "masked merge" expressions like `(m & x) | (~m & y)` and its DeMorgan |
| /// variant `(~m | x) & (m | y)` into the equivalent `((x ^ y) & m) ^ y)` |
| /// pattern. This is typically a better representation for targets without a |
| /// fused "and-not" operation. |
| static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG, |
| const TargetLowering &TLI, const SDLoc &DL) { |
| // Note that masked-merge variants using XOR or ADD expressions are |
| // normalized to OR by InstCombine so we only check for OR or AND. |
| assert((Node->getOpcode() == ISD::OR || Node->getOpcode() == ISD::AND) && |
| "Must be called with ISD::OR or ISD::AND node"); |
| |
| // If the target supports and-not, don't fold this. |
| if (TLI.hasAndNot(SDValue(Node, 0))) |
| return SDValue(); |
| |
| SDValue M, X, Y; |
| |
| if (sd_match(Node, |
| m_Or(m_OneUse(m_And(m_OneUse(m_Not(m_Value(M))), m_Value(Y))), |
| m_OneUse(m_And(m_Deferred(M), m_Value(X))))) || |
| sd_match(Node, |
| m_And(m_OneUse(m_Or(m_OneUse(m_Not(m_Value(M))), m_Value(X))), |
| m_OneUse(m_Or(m_Deferred(M), m_Value(Y)))))) { |
| EVT VT = M.getValueType(); |
| SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, Y); |
| SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor, M); |
| return DAG.getNode(ISD::XOR, DL, VT, And, Y); |
| } |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitAND(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N1.getValueType(); |
| SDLoc DL(N); |
| |
| // x & x --> x |
| if (N0 == N1) |
| return N0; |
| |
| // fold (and c1, c2) -> c1&c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::AND, DL, VT, {N0, N1})) |
| return C; |
| |
| // canonicalize constant to RHS |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(ISD::AND, DL, VT, N1, N0); |
| |
| if (areBitwiseNotOfEachother(N0, N1)) |
| return DAG.getConstant(APInt::getZero(VT.getScalarSizeInBits()), DL, VT); |
| |
| // fold vector ops |
| if (VT.isVector()) { |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (and x, 0) -> 0, vector edition |
| if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) |
| // do not return N1, because undef node may exist in N1 |
| return DAG.getConstant(APInt::getZero(N1.getScalarValueSizeInBits()), DL, |
| N1.getValueType()); |
| |
| // fold (and x, -1) -> x, vector edition |
| if (ISD::isConstantSplatVectorAllOnes(N1.getNode())) |
| return N0; |
| |
| // fold (and (masked_load) (splat_vec (x, ...))) to zext_masked_load |
| auto *MLoad = dyn_cast<MaskedLoadSDNode>(N0); |
| ConstantSDNode *Splat = isConstOrConstSplat(N1, true, true); |
| if (MLoad && MLoad->getExtensionType() == ISD::EXTLOAD && Splat) { |
| EVT LoadVT = MLoad->getMemoryVT(); |
| EVT ExtVT = VT; |
| if (TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT, LoadVT)) { |
| // For this AND to be a zero extension of the masked load the elements |
| // of the BuildVec must mask the bottom bits of the extended element |
| // type |
| uint64_t ElementSize = |
| LoadVT.getVectorElementType().getScalarSizeInBits(); |
| if (Splat->getAPIntValue().isMask(ElementSize)) { |
| SDValue NewLoad = DAG.getMaskedLoad( |
| ExtVT, DL, MLoad->getChain(), MLoad->getBasePtr(), |
| MLoad->getOffset(), MLoad->getMask(), MLoad->getPassThru(), |
| LoadVT, MLoad->getMemOperand(), MLoad->getAddressingMode(), |
| ISD::ZEXTLOAD, MLoad->isExpandingLoad()); |
| bool LoadHasOtherUsers = !N0.hasOneUse(); |
| CombineTo(N, NewLoad); |
| if (LoadHasOtherUsers) |
| CombineTo(MLoad, NewLoad.getValue(0), NewLoad.getValue(1)); |
| return SDValue(N, 0); |
| } |
| } |
| } |
| } |
| |
| // fold (and x, -1) -> x |
| if (isAllOnesConstant(N1)) |
| return N0; |
| |
| // if (and x, c) is known to be zero, return 0 |
| unsigned BitWidth = VT.getScalarSizeInBits(); |
| ConstantSDNode *N1C = isConstOrConstSplat(N1); |
| if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), APInt::getAllOnes(BitWidth))) |
| return DAG.getConstant(0, DL, VT); |
| |
| if (SDValue R = foldAndOrOfSETCC(N, DAG)) |
| return R; |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| // reassociate and |
| if (SDValue RAND = reassociateOps(ISD::AND, DL, N0, N1, N->getFlags())) |
| return RAND; |
| |
| // Fold and(vecreduce(x), vecreduce(y)) -> vecreduce(and(x, y)) |
| if (SDValue SD = |
| reassociateReduction(ISD::VECREDUCE_AND, ISD::AND, DL, VT, N0, N1)) |
| return SD; |
| |
| // fold (and (or x, C), D) -> D if (C & D) == D |
| auto MatchSubset = [](ConstantSDNode *LHS, ConstantSDNode *RHS) { |
| return RHS->getAPIntValue().isSubsetOf(LHS->getAPIntValue()); |
| }; |
| if (N0.getOpcode() == ISD::OR && |
| ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchSubset)) |
| return N1; |
| |
| if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { |
| SDValue N0Op0 = N0.getOperand(0); |
| EVT SrcVT = N0Op0.getValueType(); |
| unsigned SrcBitWidth = SrcVT.getScalarSizeInBits(); |
| APInt Mask = ~N1C->getAPIntValue(); |
| Mask = Mask.trunc(SrcBitWidth); |
| |
| // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits. |
| if (DAG.MaskedValueIsZero(N0Op0, Mask)) |
| return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0Op0); |
| |
| // fold (and (any_ext V), c) -> (zero_ext (and (trunc V), c)) if profitable. |
| if (N1C->getAPIntValue().countLeadingZeros() >= (BitWidth - SrcBitWidth) && |
| TLI.isTruncateFree(VT, SrcVT) && TLI.isZExtFree(SrcVT, VT) && |
| TLI.isTypeDesirableForOp(ISD::AND, SrcVT) && |
| TLI.isNarrowingProfitable(N, VT, SrcVT)) |
| return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, |
| DAG.getNode(ISD::AND, DL, SrcVT, N0Op0, |
| DAG.getZExtOrTrunc(N1, DL, SrcVT))); |
| } |
| |
| // fold (and (ext (and V, c1)), c2) -> (and (ext V), (and c1, (ext c2))) |
| if (ISD::isExtOpcode(N0.getOpcode())) { |
| unsigned ExtOpc = N0.getOpcode(); |
| SDValue N0Op0 = N0.getOperand(0); |
| if (N0Op0.getOpcode() == ISD::AND && |
| (ExtOpc != ISD::ZERO_EXTEND || !TLI.isZExtFree(N0Op0, VT)) && |
| N0->hasOneUse() && N0Op0->hasOneUse()) { |
| if (SDValue NewExt = DAG.FoldConstantArithmetic(ExtOpc, DL, VT, |
| {N0Op0.getOperand(1)})) { |
| if (SDValue NewMask = |
| DAG.FoldConstantArithmetic(ISD::AND, DL, VT, {N1, NewExt})) { |
| return DAG.getNode(ISD::AND, DL, VT, |
| DAG.getNode(ExtOpc, DL, VT, N0Op0.getOperand(0)), |
| NewMask); |
| } |
| } |
| } |
| } |
| |
| // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) -> |
| // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must |
| // already be zero by virtue of the width of the base type of the load. |
| // |
| // the 'X' node here can either be nothing or an extract_vector_elt to catch |
| // more cases. |
| if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && |
| N0.getValueSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits() && |
| N0.getOperand(0).getOpcode() == ISD::LOAD && |
| N0.getOperand(0).getResNo() == 0) || |
| (N0.getOpcode() == ISD::LOAD && N0.getResNo() == 0)) { |
| auto *Load = |
| cast<LoadSDNode>((N0.getOpcode() == ISD::LOAD) ? N0 : N0.getOperand(0)); |
| |
| // Get the constant (if applicable) the zero'th operand is being ANDed with. |
| // This can be a pure constant or a vector splat, in which case we treat the |
| // vector as a scalar and use the splat value. |
| APInt Constant = APInt::getZero(1); |
| if (const ConstantSDNode *C = isConstOrConstSplat( |
| N1, /*AllowUndef=*/false, /*AllowTruncation=*/true)) { |
| Constant = C->getAPIntValue(); |
| } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) { |
| unsigned EltBitWidth = Vector->getValueType(0).getScalarSizeInBits(); |
| APInt SplatValue, SplatUndef; |
| unsigned SplatBitSize; |
| bool HasAnyUndefs; |
| // Endianness should not matter here. Code below makes sure that we only |
| // use the result if the SplatBitSize is a multiple of the vector element |
| // size. And after that we AND all element sized parts of the splat |
| // together. So the end result should be the same regardless of in which |
| // order we do those operations. |
| const bool IsBigEndian = false; |
| bool IsSplat = |
| Vector->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, |
| HasAnyUndefs, EltBitWidth, IsBigEndian); |
| |
| // Make sure that variable 'Constant' is only set if 'SplatBitSize' is a |
| // multiple of 'BitWidth'. Otherwise, we could propagate a wrong value. |
| if (IsSplat && (SplatBitSize % EltBitWidth) == 0) { |
| // Undef bits can contribute to a possible optimisation if set, so |
| // set them. |
| SplatValue |= SplatUndef; |
| |
| // The splat value may be something like "0x00FFFFFF", which means 0 for |
| // the first vector value and FF for the rest, repeating. We need a mask |
| // that will apply equally to all members of the vector, so AND all the |
| // lanes of the constant together. |
| Constant = APInt::getAllOnes(EltBitWidth); |
| for (unsigned i = 0, n = (SplatBitSize / EltBitWidth); i < n; ++i) |
| Constant &= SplatValue.extractBits(EltBitWidth, i * EltBitWidth); |
| } |
| } |
| |
| // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is |
| // actually legal and isn't going to get expanded, else this is a false |
| // optimisation. |
| bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD, |
| Load->getValueType(0), |
| Load->getMemoryVT()); |
| |
| // Resize the constant to the same size as the original memory access before |
| // extension. If it is still the AllOnesValue then this AND is completely |
| // unneeded. |
| Constant = Constant.zextOrTrunc(Load->getMemoryVT().getScalarSizeInBits()); |
| |
| bool B; |
| switch (Load->getExtensionType()) { |
| default: B = false; break; |
| case ISD::EXTLOAD: B = CanZextLoadProfitably; break; |
| case ISD::ZEXTLOAD: |
| case ISD::NON_EXTLOAD: B = true; break; |
| } |
| |
| if (B && Constant.isAllOnes()) { |
| // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to |
| // preserve semantics once we get rid of the AND. |
| SDValue NewLoad(Load, 0); |
| |
| // Fold the AND away. NewLoad may get replaced immediately. |
| CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0); |
| |
| if (Load->getExtensionType() == ISD::EXTLOAD) { |
| NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD, |
| Load->getValueType(0), SDLoc(Load), |
| Load->getChain(), Load->getBasePtr(), |
| Load->getOffset(), Load->getMemoryVT(), |
| Load->getMemOperand()); |
| // Replace uses of the EXTLOAD with the new ZEXTLOAD. |
| if (Load->getNumValues() == 3) { |
| // PRE/POST_INC loads have 3 values. |
| SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1), |
| NewLoad.getValue(2) }; |
| CombineTo(Load, To, 3, true); |
| } else { |
| CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1)); |
| } |
| } |
| |
| return SDValue(N, 0); // Return N so it doesn't get rechecked! |
| } |
| } |
| |
| // Try to convert a constant mask AND into a shuffle clear mask. |
| if (VT.isVector()) |
| if (SDValue Shuffle = XformToShuffleWithZero(N)) |
| return Shuffle; |
| |
| if (SDValue Combined = combineCarryDiamond(DAG, TLI, N0, N1, N)) |
| return Combined; |
| |
| if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR && N0.hasOneUse() && N1C && |
| ISD::isExtOpcode(N0.getOperand(0).getOpcode())) { |
| SDValue Ext = N0.getOperand(0); |
| EVT ExtVT = Ext->getValueType(0); |
| SDValue Extendee = Ext->getOperand(0); |
| |
| unsigned ScalarWidth = Extendee.getValueType().getScalarSizeInBits(); |
| if (N1C->getAPIntValue().isMask(ScalarWidth) && |
| (!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, ExtVT))) { |
| // (and (extract_subvector (zext|anyext|sext v) _) iN_mask) |
| // => (extract_subvector (iN_zeroext v)) |
| SDValue ZeroExtExtendee = |
| DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVT, Extendee); |
| |
| return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ZeroExtExtendee, |
| N0.getOperand(1)); |
| } |
| } |
| |
| // fold (and (masked_gather x)) -> (zext_masked_gather x) |
| if (auto *GN0 = dyn_cast<MaskedGatherSDNode>(N0)) { |
| EVT MemVT = GN0->getMemoryVT(); |
| EVT ScalarVT = MemVT.getScalarType(); |
| |
| if (SDValue(GN0, 0).hasOneUse() && |
| isConstantSplatVectorMaskForType(N1.getNode(), ScalarVT) && |
| TLI.isVectorLoadExtDesirable(SDValue(SDValue(GN0, 0)))) { |
| SDValue Ops[] = {GN0->getChain(), GN0->getPassThru(), GN0->getMask(), |
| GN0->getBasePtr(), GN0->getIndex(), GN0->getScale()}; |
| |
| SDValue ZExtLoad = DAG.getMaskedGather( |
| DAG.getVTList(VT, MVT::Other), MemVT, DL, Ops, GN0->getMemOperand(), |
| GN0->getIndexType(), ISD::ZEXTLOAD); |
| |
| CombineTo(N, ZExtLoad); |
| AddToWorklist(ZExtLoad.getNode()); |
| // Avoid recheck of N. |
| return SDValue(N, 0); |
| } |
| } |
| |
| // fold (and (load x), 255) -> (zextload x, i8) |
| // fold (and (extload x, i16), 255) -> (zextload x, i8) |
| if (N1C && N0.getOpcode() == ISD::LOAD && !VT.isVector()) |
| if (SDValue Res = reduceLoadWidth(N)) |
| return Res; |
| |
| if (LegalTypes) { |
| // Attempt to propagate the AND back up to the leaves which, if they're |
| // loads, can be combined to narrow loads and the AND node can be removed. |
| // Perform after legalization so that extend nodes will already be |
| // combined into the loads. |
| if (BackwardsPropagateMask(N)) |
| return SDValue(N, 0); |
| } |
| |
| if (SDValue Combined = visitANDLike(N0, N1, N)) |
| return Combined; |
| |
| // Simplify: (and (op x...), (op y...)) -> (op (and x, y)) |
| if (N0.getOpcode() == N1.getOpcode()) |
| if (SDValue V = hoistLogicOpWithSameOpcodeHands(N)) |
| return V; |
| |
| if (SDValue R = foldLogicOfShifts(N, N0, N1, DAG)) |
| return R; |
| if (SDValue R = foldLogicOfShifts(N, N1, N0, DAG)) |
| return R; |
| |
| // Fold (and X, (bswap (not Y))) -> (and X, (not (bswap Y))) |
| // Fold (and X, (bitreverse (not Y))) -> (and X, (not (bitreverse Y))) |
| SDValue X, Y, Z, NotY; |
| for (unsigned Opc : {ISD::BSWAP, ISD::BITREVERSE}) |
| if (sd_match(N, |
| m_And(m_Value(X), m_OneUse(m_UnaryOp(Opc, m_Value(NotY))))) && |
| sd_match(NotY, m_Not(m_Value(Y))) && |
| (TLI.hasAndNot(SDValue(N, 0)) || NotY->hasOneUse())) |
| return DAG.getNode(ISD::AND, DL, VT, X, |
| DAG.getNOT(DL, DAG.getNode(Opc, DL, VT, Y), VT)); |
| |
| // Fold (and X, (rot (not Y), Z)) -> (and X, (not (rot Y, Z))) |
| for (unsigned Opc : {ISD::ROTL, ISD::ROTR}) |
| if (sd_match(N, m_And(m_Value(X), |
| m_OneUse(m_BinOp(Opc, m_Value(NotY), m_Value(Z))))) && |
| sd_match(NotY, m_Not(m_Value(Y))) && |
| (TLI.hasAndNot(SDValue(N, 0)) || NotY->hasOneUse())) |
| return DAG.getNode(ISD::AND, DL, VT, X, |
| DAG.getNOT(DL, DAG.getNode(Opc, DL, VT, Y, Z), VT)); |
| |
| // Fold (and X, (add (not Y), Z)) -> (and X, (not (sub Y, Z))) |
| // Fold (and X, (sub (not Y), Z)) -> (and X, (not (add Y, Z))) |
| if (TLI.hasAndNot(SDValue(N, 0))) |
| if (SDValue Folded = foldBitwiseOpWithNeg(N, DL, VT)) |
| return Folded; |
| |
| // Fold (and (srl X, C), 1) -> (srl X, BW-1) for signbit extraction |
| // If we are shifting down an extended sign bit, see if we can simplify |
| // this to shifting the MSB directly to expose further simplifications. |
| // This pattern often appears after sext_inreg legalization. |
| APInt Amt; |
| if (sd_match(N, m_And(m_Srl(m_Value(X), m_ConstInt(Amt)), m_One())) && |
| Amt.ult(BitWidth - 1) && Amt.uge(BitWidth - DAG.ComputeNumSignBits(X))) |
| return DAG.getNode(ISD::SRL, DL, VT, X, |
| DAG.getShiftAmountConstant(BitWidth - 1, VT, DL)); |
| |
| // Masking the negated extension of a boolean is just the zero-extended |
| // boolean: |
| // and (sub 0, zext(bool X)), 1 --> zext(bool X) |
| // and (sub 0, sext(bool X)), 1 --> zext(bool X) |
| // |
| // Note: the SimplifyDemandedBits fold below can make an information-losing |
| // transform, and then we have no way to find this better fold. |
| if (sd_match(N, m_And(m_Sub(m_Zero(), m_Value(X)), m_One()))) { |
| if (X.getOpcode() == ISD::ZERO_EXTEND && |
| X.getOperand(0).getScalarValueSizeInBits() == 1) |
| return X; |
| if (X.getOpcode() == ISD::SIGN_EXTEND && |
| X.getOperand(0).getScalarValueSizeInBits() == 1) |
| return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, X.getOperand(0)); |
| } |
| |
| // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1) |
| // fold (and (sra)) -> (and (srl)) when possible. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| // fold (zext_inreg (extload x)) -> (zextload x) |
| // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use |
| if (ISD::isUNINDEXEDLoad(N0.getNode()) && |
| (ISD::isEXTLoad(N0.getNode()) || |
| (ISD::isSEXTLoad(N0.getNode()) && N0.hasOneUse()))) { |
| auto *LN0 = cast<LoadSDNode>(N0); |
| EVT MemVT = LN0->getMemoryVT(); |
| // If we zero all the possible extended bits, then we can turn this into |
| // a zextload if we are running before legalize or the operation is legal. |
| unsigned ExtBitSize = N1.getScalarValueSizeInBits(); |
| unsigned MemBitSize = MemVT.getScalarSizeInBits(); |
| APInt ExtBits = APInt::getHighBitsSet(ExtBitSize, ExtBitSize - MemBitSize); |
| if (DAG.MaskedValueIsZero(N1, ExtBits) && |
| ((!LegalOperations && LN0->isSimple()) || |
| TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) { |
| SDValue ExtLoad = |
| DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, LN0->getChain(), |
| LN0->getBasePtr(), MemVT, LN0->getMemOperand()); |
| AddToWorklist(N); |
| CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); |
| return SDValue(N, 0); // Return N so it doesn't get rechecked! |
| } |
| } |
| |
| // fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const) |
| if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) { |
| if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), |
| N0.getOperand(1), false)) |
| return BSwap; |
| } |
| |
| if (SDValue Shifts = unfoldExtremeBitClearingToShifts(N)) |
| return Shifts; |
| |
| if (SDValue V = combineShiftAnd1ToBitTest(N, DAG)) |
| return V; |
| |
| // Recognize the following pattern: |
| // |
| // AndVT = (and (sign_extend NarrowVT to AndVT) #bitmask) |
| // |
| // where bitmask is a mask that clears the upper bits of AndVT. The |
| // number of bits in bitmask must be a power of two. |
| auto IsAndZeroExtMask = [](SDValue LHS, SDValue RHS) { |
| if (LHS->getOpcode() != ISD::SIGN_EXTEND) |
| return false; |
| |
| auto *C = dyn_cast<ConstantSDNode>(RHS); |
| if (!C) |
| return false; |
| |
| if (!C->getAPIntValue().isMask( |
| LHS.getOperand(0).getValueType().getFixedSizeInBits())) |
| return false; |
| |
| return true; |
| }; |
| |
| // Replace (and (sign_extend ...) #bitmask) with (zero_extend ...). |
| if (IsAndZeroExtMask(N0, N1)) |
| return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)); |
| |
| if (hasOperation(ISD::USUBSAT, VT)) |
| if (SDValue V = foldAndToUsubsat(N, DAG, DL)) |
| return V; |
| |
| // Postpone until legalization completed to avoid interference with bswap |
| // folding |
| if (LegalOperations || VT.isVector()) |
| if (SDValue R = foldLogicTreeOfShifts(N, N0, N1, DAG)) |
| return R; |
| |
| if (VT.isScalarInteger() && VT != MVT::i1) |
| if (SDValue R = foldMaskedMerge(N, DAG, TLI, DL)) |
| return R; |
| |
| return SDValue(); |
| } |
| |
| /// Match (a >> 8) | (a << 8) as (bswap a) >> 16. |
| SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, |
| bool DemandHighBits) { |
| if (!LegalOperations) |
| return SDValue(); |
| |
| EVT VT = N->getValueType(0); |
| if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16) |
| return SDValue(); |
| if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT)) |
| return SDValue(); |
| |
| // Recognize (and (shl a, 8), 0xff00), (and (srl a, 8), 0xff) |
| bool LookPassAnd0 = false; |
| bool LookPassAnd1 = false; |
| if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL) |
| std::swap(N0, N1); |
| if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL) |
| std::swap(N0, N1); |
| if (N0.getOpcode() == ISD::AND) { |
| if (!N0->hasOneUse()) |
| return SDValue(); |
| ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
| // Also handle 0xffff since the LHS is guaranteed to have zeros there. |
| // This is needed for X86. |
| if (!N01C || (N01C->getZExtValue() != 0xFF00 && |
| N01C->getZExtValue() != 0xFFFF)) |
| return SDValue(); |
| N0 = N0.getOperand(0); |
| LookPassAnd0 = true; |
| } |
| |
| if (N1.getOpcode() == ISD::AND) { |
| if (!N1->hasOneUse()) |
| return SDValue(); |
| ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); |
| if (!N11C || N11C->getZExtValue() != 0xFF) |
| return SDValue(); |
| N1 = N1.getOperand(0); |
| LookPassAnd1 = true; |
| } |
| |
| if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) |
| std::swap(N0, N1); |
| if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) |
| return SDValue(); |
| if (!N0->hasOneUse() || !N1->hasOneUse()) |
| return SDValue(); |
| |
| ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
| ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); |
| if (!N01C || !N11C) |
| return SDValue(); |
| if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8) |
| return SDValue(); |
| |
| // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8) |
| SDValue N00 = N0->getOperand(0); |
| if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) { |
| if (!N00->hasOneUse()) |
| return SDValue(); |
| ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1)); |
| if (!N001C || N001C->getZExtValue() != 0xFF) |
| return SDValue(); |
| N00 = N00.getOperand(0); |
| LookPassAnd0 = true; |
| } |
| |
| SDValue N10 = N1->getOperand(0); |
| if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) { |
| if (!N10->hasOneUse()) |
| return SDValue(); |
| ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1)); |
| // Also allow 0xFFFF since the bits will be shifted out. This is needed |
| // for X86. |
| if (!N101C || (N101C->getZExtValue() != 0xFF00 && |
| N101C->getZExtValue() != 0xFFFF)) |
| return SDValue(); |
| N10 = N10.getOperand(0); |
| LookPassAnd1 = true; |
| } |
| |
| if (N00 != N10) |
| return SDValue(); |
| |
| // Make sure everything beyond the low halfword gets set to zero since the SRL |
| // 16 will clear the top bits. |
| unsigned OpSizeInBits = VT.getSizeInBits(); |
| if (OpSizeInBits > 16) { |
| // If the left-shift isn't masked out then the only way this is a bswap is |
| // if all bits beyond the low 8 are 0. In that case the entire pattern |
| // reduces to a left shift anyway: leave it for other parts of the combiner. |
| if (DemandHighBits && !LookPassAnd0) |
| return SDValue(); |
| |
| // However, if the right shift isn't masked out then it might be because |
| // it's not needed. See if we can spot that too. If the high bits aren't |
| // demanded, we only need bits 23:16 to be zero. Otherwise, we need all |
| // upper bits to be zero. |
| if (!LookPassAnd1) { |
| unsigned HighBit = DemandHighBits ? OpSizeInBits : 24; |
| if (!DAG.MaskedValueIsZero(N10, |
| APInt::getBitsSet(OpSizeInBits, 16, HighBit))) |
| return SDValue(); |
| } |
| } |
| |
| SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00); |
| if (OpSizeInBits > 16) { |
| SDLoc DL(N); |
| Res = DAG.getNode(ISD::SRL, DL, VT, Res, |
| DAG.getShiftAmountConstant(OpSizeInBits - 16, VT, DL)); |
| } |
| return Res; |
| } |
| |
| /// Return true if the specified node is an element that makes up a 32-bit |
| /// packed halfword byteswap. |
| /// ((x & 0x000000ff) << 8) | |
| /// ((x & 0x0000ff00) >> 8) | |
| /// ((x & 0x00ff0000) << 8) | |
| /// ((x & 0xff000000) >> 8) |
| static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) { |
| if (!N->hasOneUse()) |
| return false; |
| |
| unsigned Opc = N.getOpcode(); |
| if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL) |
| return false; |
| |
| SDValue N0 = N.getOperand(0); |
| unsigned Opc0 = N0.getOpcode(); |
| if (Opc0 != ISD::AND && Opc0 != ISD::SHL && Opc0 != ISD::SRL) |
| return false; |
| |
| ConstantSDNode *N1C = nullptr; |
| // SHL or SRL: look upstream for AND mask operand |
| if (Opc == ISD::AND) |
| N1C = dyn_cast<ConstantSDNode>(N.getOperand(1)); |
| else if (Opc0 == ISD::AND) |
| N1C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
| if (!N1C) |
| return false; |
| |
| unsigned MaskByteOffset; |
| switch (N1C->getZExtValue()) { |
| default: |
| return false; |
| case 0xFF: MaskByteOffset = 0; break; |
| case 0xFF00: MaskByteOffset = 1; break; |
| case 0xFFFF: |
| // In case demanded bits didn't clear the bits that will be shifted out. |
| // This is needed for X86. |
| if (Opc == ISD::SRL || (Opc == ISD::AND && Opc0 == ISD::SHL)) { |
| MaskByteOffset = 1; |
| break; |
| } |
| return false; |
| case 0xFF0000: MaskByteOffset = 2; break; |
| case 0xFF000000: MaskByteOffset = 3; break; |
| } |
| |
| // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00). |
| if (Opc == ISD::AND) { |
| if (MaskByteOffset == 0 || MaskByteOffset == 2) { |
| // (x >> 8) & 0xff |
| // (x >> 8) & 0xff0000 |
| if (Opc0 != ISD::SRL) |
| return false; |
| ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
| if (!C || C->getZExtValue() != 8) |
| return false; |
| } else { |
| // (x << 8) & 0xff00 |
| // (x << 8) & 0xff000000 |
| if (Opc0 != ISD::SHL) |
| return false; |
| ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
| if (!C || C->getZExtValue() != 8) |
| return false; |
| } |
| } else if (Opc == ISD::SHL) { |
| // (x & 0xff) << 8 |
| // (x & 0xff0000) << 8 |
| if (MaskByteOffset != 0 && MaskByteOffset != 2) |
| return false; |
| ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); |
| if (!C || C->getZExtValue() != 8) |
| return false; |
| } else { // Opc == ISD::SRL |
| // (x & 0xff00) >> 8 |
| // (x & 0xff000000) >> 8 |
| if (MaskByteOffset != 1 && MaskByteOffset != 3) |
| return false; |
| ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); |
| if (!C || C->getZExtValue() != 8) |
| return false; |
| } |
| |
| if (Parts[MaskByteOffset]) |
| return false; |
| |
| Parts[MaskByteOffset] = N0.getOperand(0).getNode(); |
| return true; |
| } |
| |
| // Match 2 elements of a packed halfword bswap. |
| static bool isBSwapHWordPair(SDValue N, MutableArrayRef<SDNode *> Parts) { |
| if (N.getOpcode() == ISD::OR) |
| return isBSwapHWordElement(N.getOperand(0), Parts) && |
| isBSwapHWordElement(N.getOperand(1), Parts); |
| |
| if (N.getOpcode() == ISD::SRL && N.getOperand(0).getOpcode() == ISD::BSWAP) { |
| ConstantSDNode *C = isConstOrConstSplat(N.getOperand(1)); |
| if (!C || C->getAPIntValue() != 16) |
| return false; |
| Parts[0] = Parts[1] = N.getOperand(0).getOperand(0).getNode(); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| // Match this pattern: |
| // (or (and (shl (A, 8)), 0xff00ff00), (and (srl (A, 8)), 0x00ff00ff)) |
| // And rewrite this to: |
| // (rotr (bswap A), 16) |
| static SDValue matchBSwapHWordOrAndAnd(const TargetLowering &TLI, |
| SelectionDAG &DAG, SDNode *N, SDValue N0, |
| SDValue N1, EVT VT) { |
| assert(N->getOpcode() == ISD::OR && VT == MVT::i32 && |
| "MatchBSwapHWordOrAndAnd: expecting i32"); |
| if (!TLI.isOperationLegalOrCustom(ISD::ROTR, VT)) |
| return SDValue(); |
| if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND) |
| return SDValue(); |
| // TODO: this is too restrictive; lifting this restriction requires more tests |
| if (!N0->hasOneUse() || !N1->hasOneUse()) |
| return SDValue(); |
| ConstantSDNode *Mask0 = isConstOrConstSplat(N0.getOperand(1)); |
| ConstantSDNode *Mask1 = isConstOrConstSplat(N1.getOperand(1)); |
| if (!Mask0 || !Mask1) |
| return SDValue(); |
| if (Mask0->getAPIntValue() != 0xff00ff00 || |
| Mask1->getAPIntValue() != 0x00ff00ff) |
| return SDValue(); |
| SDValue Shift0 = N0.getOperand(0); |
| SDValue Shift1 = N1.getOperand(0); |
| if (Shift0.getOpcode() != ISD::SHL || Shift1.getOpcode() != ISD::SRL) |
| return SDValue(); |
| ConstantSDNode *ShiftAmt0 = isConstOrConstSplat(Shift0.getOperand(1)); |
| ConstantSDNode *ShiftAmt1 = isConstOrConstSplat(Shift1.getOperand(1)); |
| if (!ShiftAmt0 || !ShiftAmt1) |
| return SDValue(); |
| if (ShiftAmt0->getAPIntValue() != 8 || ShiftAmt1->getAPIntValue() != 8) |
| return SDValue(); |
| if (Shift0.getOperand(0) != Shift1.getOperand(0)) |
| return SDValue(); |
| |
| SDLoc DL(N); |
| SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Shift0.getOperand(0)); |
| SDValue ShAmt = DAG.getShiftAmountConstant(16, VT, DL); |
| return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt); |
| } |
| |
| /// Match a 32-bit packed halfword bswap. That is |
| /// ((x & 0x000000ff) << 8) | |
| /// ((x & 0x0000ff00) >> 8) | |
| /// ((x & 0x00ff0000) << 8) | |
| /// ((x & 0xff000000) >> 8) |
| /// => (rotl (bswap x), 16) |
| SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) { |
| if (!LegalOperations) |
| return SDValue(); |
| |
| EVT VT = N->getValueType(0); |
| if (VT != MVT::i32) |
| return SDValue(); |
| if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT)) |
| return SDValue(); |
| |
| if (SDValue BSwap = matchBSwapHWordOrAndAnd(TLI, DAG, N, N0, N1, VT)) |
| return BSwap; |
| |
| // Try again with commuted operands. |
| if (SDValue BSwap = matchBSwapHWordOrAndAnd(TLI, DAG, N, N1, N0, VT)) |
| return BSwap; |
| |
| |
| // Look for either |
| // (or (bswaphpair), (bswaphpair)) |
| // (or (or (bswaphpair), (and)), (and)) |
| // (or (or (and), (bswaphpair)), (and)) |
| SDNode *Parts[4] = {}; |
| |
| if (isBSwapHWordPair(N0, Parts)) { |
| // (or (or (and), (and)), (or (and), (and))) |
| if (!isBSwapHWordPair(N1, Parts)) |
| return SDValue(); |
| } else if (N0.getOpcode() == ISD::OR) { |
| // (or (or (or (and), (and)), (and)), (and)) |
| if (!isBSwapHWordElement(N1, Parts)) |
| return SDValue(); |
| SDValue N00 = N0.getOperand(0); |
| SDValue N01 = N0.getOperand(1); |
| if (!(isBSwapHWordElement(N01, Parts) && isBSwapHWordPair(N00, Parts)) && |
| !(isBSwapHWordElement(N00, Parts) && isBSwapHWordPair(N01, Parts))) |
| return SDValue(); |
| } else { |
| return SDValue(); |
| } |
| |
| // Make sure the parts are all coming from the same node. |
| if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3]) |
| return SDValue(); |
| |
| SDLoc DL(N); |
| SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, |
| SDValue(Parts[0], 0)); |
| |
| // Result of the bswap should be rotated by 16. If it's not legal, then |
| // do (x << 16) | (x >> 16). |
| SDValue ShAmt = DAG.getShiftAmountConstant(16, VT, DL); |
| if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT)) |
| return DAG.getNode(ISD::ROTL, DL, VT, BSwap, ShAmt); |
| if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT)) |
| return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt); |
| return DAG.getNode(ISD::OR, DL, VT, |
| DAG.getNode(ISD::SHL, DL, VT, BSwap, ShAmt), |
| DAG.getNode(ISD::SRL, DL, VT, BSwap, ShAmt)); |
| } |
| |
| /// This contains all DAGCombine rules which reduce two values combined by |
| /// an Or operation to a single value \see visitANDLike(). |
| SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, const SDLoc &DL) { |
| EVT VT = N1.getValueType(); |
| |
| // fold (or x, undef) -> -1 |
| if (!LegalOperations && (N0.isUndef() || N1.isUndef())) |
| return DAG.getAllOnesConstant(DL, VT); |
| |
| if (SDValue V = foldLogicOfSetCCs(false, N0, N1, DL)) |
| return V; |
| |
| // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible. |
| if (N0.getOpcode() == ISD::AND && N1.getOpcode() == ISD::AND && |
| // Don't increase # computations. |
| (N0->hasOneUse() || N1->hasOneUse())) { |
| // We can only do this xform if we know that bits from X that are set in C2 |
| // but not in C1 are already zero. Likewise for Y. |
| if (const ConstantSDNode *N0O1C = |
| getAsNonOpaqueConstant(N0.getOperand(1))) { |
| if (const ConstantSDNode *N1O1C = |
| getAsNonOpaqueConstant(N1.getOperand(1))) { |
| // We can only do this xform if we know that bits from X that are set in |
| // C2 but not in C1 are already zero. Likewise for Y. |
| const APInt &LHSMask = N0O1C->getAPIntValue(); |
| const APInt &RHSMask = N1O1C->getAPIntValue(); |
| |
| if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) && |
| DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) { |
| SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT, |
| N0.getOperand(0), N1.getOperand(0)); |
| return DAG.getNode(ISD::AND, DL, VT, X, |
| DAG.getConstant(LHSMask | RHSMask, DL, VT)); |
| } |
| } |
| } |
| } |
| |
| // (or (and X, M), (and X, N)) -> (and X, (or M, N)) |
| if (N0.getOpcode() == ISD::AND && |
| N1.getOpcode() == ISD::AND && |
| N0.getOperand(0) == N1.getOperand(0) && |
| // Don't increase # computations. |
| (N0->hasOneUse() || N1->hasOneUse())) { |
| SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT, |
| N0.getOperand(1), N1.getOperand(1)); |
| return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), X); |
| } |
| |
| return SDValue(); |
| } |
| |
| /// OR combines for which the commuted variant will be tried as well. |
| static SDValue visitORCommutative(SelectionDAG &DAG, SDValue N0, SDValue N1, |
| SDNode *N) { |
| EVT VT = N0.getValueType(); |
| unsigned BW = VT.getScalarSizeInBits(); |
| SDLoc DL(N); |
| |
| auto peekThroughResize = [](SDValue V) { |
| if (V->getOpcode() == ISD::ZERO_EXTEND || V->getOpcode() == ISD::TRUNCATE) |
| return V->getOperand(0); |
| return V; |
| }; |
| |
| SDValue N0Resized = peekThroughResize(N0); |
| if (N0Resized.getOpcode() == ISD::AND) { |
| SDValue N1Resized = peekThroughResize(N1); |
| SDValue N00 = N0Resized.getOperand(0); |
| SDValue N01 = N0Resized.getOperand(1); |
| |
| // fold or (and x, y), x --> x |
| if (N00 == N1Resized || N01 == N1Resized) |
| return N1; |
| |
| // fold (or (and X, (xor Y, -1)), Y) -> (or X, Y) |
| // TODO: Set AllowUndefs = true. |
| if (SDValue NotOperand = getBitwiseNotOperand(N01, N00, |
| /* AllowUndefs */ false)) { |
| if (peekThroughResize(NotOperand) == N1Resized) |
| return DAG.getNode(ISD::OR, DL, VT, DAG.getZExtOrTrunc(N00, DL, VT), |
| N1); |
| } |
| |
| // fold (or (and (xor Y, -1), X), Y) -> (or X, Y) |
| if (SDValue NotOperand = getBitwiseNotOperand(N00, N01, |
| /* AllowUndefs */ false)) { |
| if (peekThroughResize(NotOperand) == N1Resized) |
| return DAG.getNode(ISD::OR, DL, VT, DAG.getZExtOrTrunc(N01, DL, VT), |
| N1); |
| } |
| } |
| |
| SDValue X, Y; |
| |
| // fold or (xor X, N1), N1 --> or X, N1 |
| if (sd_match(N0, m_Xor(m_Value(X), m_Specific(N1)))) |
| return DAG.getNode(ISD::OR, DL, VT, X, N1); |
| |
| // fold or (xor x, y), (x and/or y) --> or x, y |
| if (sd_match(N0, m_Xor(m_Value(X), m_Value(Y))) && |
| (sd_match(N1, m_And(m_Specific(X), m_Specific(Y))) || |
| sd_match(N1, m_Or(m_Specific(X), m_Specific(Y))))) |
| return DAG.getNode(ISD::OR, DL, VT, X, Y); |
| |
| if (SDValue R = foldLogicOfShifts(N, N0, N1, DAG)) |
| return R; |
| |
| auto peekThroughZext = [](SDValue V) { |
| if (V->getOpcode() == ISD::ZERO_EXTEND) |
| return V->getOperand(0); |
| return V; |
| }; |
| |
| // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y |
| if (N0.getOpcode() == ISD::FSHL && N1.getOpcode() == ISD::SHL && |
| N0.getOperand(0) == N1.getOperand(0) && |
| peekThroughZext(N0.getOperand(2)) == peekThroughZext(N1.getOperand(1))) |
| return N0; |
| |
| // (fshr ?, X, Y) | (srl X, Y) --> fshr ?, X, Y |
| if (N0.getOpcode() == ISD::FSHR && N1.getOpcode() == ISD::SRL && |
| N0.getOperand(1) == N1.getOperand(0) && |
| peekThroughZext(N0.getOperand(2)) == peekThroughZext(N1.getOperand(1))) |
| return N0; |
| |
| // Attempt to match a legalized build_pair-esque pattern: |
| // or(shl(aext(Hi),BW/2),zext(Lo)) |
| SDValue Lo, Hi; |
| if (sd_match(N0, |
| m_OneUse(m_Shl(m_AnyExt(m_Value(Hi)), m_SpecificInt(BW / 2)))) && |
| sd_match(N1, m_ZExt(m_Value(Lo))) && |
| Lo.getScalarValueSizeInBits() == (BW / 2) && |
| Lo.getValueType() == Hi.getValueType()) { |
| // Fold build_pair(not(Lo),not(Hi)) -> not(build_pair(Lo,Hi)). |
| SDValue NotLo, NotHi; |
| if (sd_match(Lo, m_OneUse(m_Not(m_Value(NotLo)))) && |
| sd_match(Hi, m_OneUse(m_Not(m_Value(NotHi))))) { |
| Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, NotLo); |
| Hi = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NotHi); |
| Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, |
| DAG.getShiftAmountConstant(BW / 2, VT, DL)); |
| return DAG.getNOT(DL, DAG.getNode(ISD::OR, DL, VT, Lo, Hi), VT); |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitOR(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N1.getValueType(); |
| SDLoc DL(N); |
| |
| // x | x --> x |
| if (N0 == N1) |
| return N0; |
| |
| // fold (or c1, c2) -> c1|c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::OR, DL, VT, {N0, N1})) |
| return C; |
| |
| // canonicalize constant to RHS |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(ISD::OR, DL, VT, N1, N0); |
| |
| // fold vector ops |
| if (VT.isVector()) { |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (or x, 0) -> x, vector edition |
| if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) |
| return N0; |
| |
| // fold (or x, -1) -> -1, vector edition |
| if (ISD::isConstantSplatVectorAllOnes(N1.getNode())) |
| // do not return N1, because undef node may exist in N1 |
| return DAG.getAllOnesConstant(DL, N1.getValueType()); |
| |
| // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask) |
| // Do this only if the resulting type / shuffle is legal. |
| auto *SV0 = dyn_cast<ShuffleVectorSDNode>(N0); |
| auto *SV1 = dyn_cast<ShuffleVectorSDNode>(N1); |
| if (SV0 && SV1 && TLI.isTypeLegal(VT)) { |
| bool ZeroN00 = ISD::isBuildVectorAllZeros(N0.getOperand(0).getNode()); |
| bool ZeroN01 = ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode()); |
| bool ZeroN10 = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode()); |
| bool ZeroN11 = ISD::isBuildVectorAllZeros(N1.getOperand(1).getNode()); |
| // Ensure both shuffles have a zero input. |
| if ((ZeroN00 != ZeroN01) && (ZeroN10 != ZeroN11)) { |
| assert((!ZeroN00 || !ZeroN01) && "Both inputs zero!"); |
| assert((!ZeroN10 || !ZeroN11) && "Both inputs zero!"); |
| bool CanFold = true; |
| int NumElts = VT.getVectorNumElements(); |
| SmallVector<int, 4> Mask(NumElts, -1); |
| |
| for (int i = 0; i != NumElts; ++i) { |
| int M0 = SV0->getMaskElt(i); |
| int M1 = SV1->getMaskElt(i); |
| |
| // Determine if either index is pointing to a zero vector. |
| bool M0Zero = M0 < 0 || (ZeroN00 == (M0 < NumElts)); |
| bool M1Zero = M1 < 0 || (ZeroN10 == (M1 < NumElts)); |
| |
| // If one element is zero and the otherside is undef, keep undef. |
| // This also handles the case that both are undef. |
| if ((M0Zero && M1 < 0) || (M1Zero && M0 < 0)) |
| continue; |
| |
| // Make sure only one of the elements is zero. |
| if (M0Zero == M1Zero) { |
| CanFold = false; |
| break; |
| } |
| |
| assert((M0 >= 0 || M1 >= 0) && "Undef index!"); |
| |
| // We have a zero and non-zero element. If the non-zero came from |
| // SV0 make the index a LHS index. If it came from SV1, make it |
| // a RHS index. We need to mod by NumElts because we don't care |
| // which operand it came from in the original shuffles. |
| Mask[i] = M1Zero ? M0 % NumElts : (M1 % NumElts) + NumElts; |
| } |
| |
| if (CanFold) { |
| SDValue NewLHS = ZeroN00 ? N0.getOperand(1) : N0.getOperand(0); |
| SDValue NewRHS = ZeroN10 ? N1.getOperand(1) : N1.getOperand(0); |
| SDValue LegalShuffle = |
| TLI.buildLegalVectorShuffle(VT, DL, NewLHS, NewRHS, Mask, DAG); |
| if (LegalShuffle) |
| return LegalShuffle; |
| } |
| } |
| } |
| } |
| |
| // fold (or x, 0) -> x |
| if (isNullConstant(N1)) |
| return N0; |
| |
| // fold (or x, -1) -> -1 |
| if (isAllOnesConstant(N1)) |
| return N1; |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| // fold (or x, c) -> c iff (x & ~c) == 0 |
| ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); |
| if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue())) |
| return N1; |
| |
| if (SDValue R = foldAndOrOfSETCC(N, DAG)) |
| return R; |
| |
| if (SDValue Combined = visitORLike(N0, N1, DL)) |
| return Combined; |
| |
| if (SDValue Combined = combineCarryDiamond(DAG, TLI, N0, N1, N)) |
| return Combined; |
| |
| // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16) |
| if (SDValue BSwap = MatchBSwapHWord(N, N0, N1)) |
| return BSwap; |
| if (SDValue BSwap = MatchBSwapHWordLow(N, N0, N1)) |
| return BSwap; |
| |
| // reassociate or |
| if (SDValue ROR = reassociateOps(ISD::OR, DL, N0, N1, N->getFlags())) |
| return ROR; |
| |
| // Fold or(vecreduce(x), vecreduce(y)) -> vecreduce(or(x, y)) |
| if (SDValue SD = |
| reassociateReduction(ISD::VECREDUCE_OR, ISD::OR, DL, VT, N0, N1)) |
| return SD; |
| |
| // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) |
| // iff (c1 & c2) != 0 or c1/c2 are undef. |
| auto MatchIntersect = [](ConstantSDNode *C1, ConstantSDNode *C2) { |
| return !C1 || !C2 || C1->getAPIntValue().intersects(C2->getAPIntValue()); |
| }; |
| if (N0.getOpcode() == ISD::AND && N0->hasOneUse() && |
| ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchIntersect, true)) { |
| if (SDValue COR = DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N1), VT, |
| {N1, N0.getOperand(1)})) { |
| SDValue IOR = DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1); |
| AddToWorklist(IOR.getNode()); |
| return DAG.getNode(ISD::AND, DL, VT, COR, IOR); |
| } |
| } |
| |
| if (SDValue Combined = visitORCommutative(DAG, N0, N1, N)) |
| return Combined; |
| if (SDValue Combined = visitORCommutative(DAG, N1, N0, N)) |
| return Combined; |
| |
| // Simplify: (or (op x...), (op y...)) -> (op (or x, y)) |
| if (N0.getOpcode() == N1.getOpcode()) |
| if (SDValue V = hoistLogicOpWithSameOpcodeHands(N)) |
| return V; |
| |
| // See if this is some rotate idiom. |
| if (SDValue Rot = MatchRotate(N0, N1, DL, /*FromAdd=*/false)) |
| return Rot; |
| |
| if (SDValue Load = MatchLoadCombine(N)) |
| return Load; |
| |
| // Simplify the operands using demanded-bits information. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| // If OR can be rewritten into ADD, try combines based on ADD. |
| if ((!LegalOperations || TLI.isOperationLegal(ISD::ADD, VT)) && |
| DAG.isADDLike(SDValue(N, 0))) |
| if (SDValue Combined = visitADDLike(N)) |
| return Combined; |
| |
| // Postpone until legalization completed to avoid interference with bswap |
| // folding |
| if (LegalOperations || VT.isVector()) |
| if (SDValue R = foldLogicTreeOfShifts(N, N0, N1, DAG)) |
| return R; |
| |
| if (VT.isScalarInteger() && VT != MVT::i1) |
| if (SDValue R = foldMaskedMerge(N, DAG, TLI, DL)) |
| return R; |
| |
| return SDValue(); |
| } |
| |
| static SDValue stripConstantMask(const SelectionDAG &DAG, SDValue Op, |
| SDValue &Mask) { |
| if (Op.getOpcode() == ISD::AND && |
| DAG.isConstantIntBuildVectorOrConstantInt(Op.getOperand(1))) { |
| Mask = Op.getOperand(1); |
| return Op.getOperand(0); |
| } |
| return Op; |
| } |
| |
| /// Match "(X shl/srl V1) & V2" where V2 may not be present. |
| static bool matchRotateHalf(const SelectionDAG &DAG, SDValue Op, SDValue &Shift, |
| SDValue &Mask) { |
| Op = stripConstantMask(DAG, Op, Mask); |
| if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) { |
| Shift = Op; |
| return true; |
| } |
| return false; |
| } |
| |
| /// Helper function for visitOR to extract the needed side of a rotate idiom |
| /// from a shl/srl/mul/udiv. This is meant to handle cases where |
| /// InstCombine merged some outside op with one of the shifts from |
| /// the rotate pattern. |
| /// \returns An empty \c SDValue if the needed shift couldn't be extracted. |
| /// Otherwise, returns an expansion of \p ExtractFrom based on the following |
| /// patterns: |
| /// |
| /// (or (add v v) (shrl v bitwidth-1)): |
| /// expands (add v v) -> (shl v 1) |
| /// |
| /// (or (mul v c0) (shrl (mul v c1) c2)): |
| /// expands (mul v c0) -> (shl (mul v c1) c3) |
| /// |
| /// (or (udiv v c0) (shl (udiv v c1) c2)): |
| /// expands (udiv v c0) -> (shrl (udiv v c1) c3) |
| /// |
| /// (or (shl v c0) (shrl (shl v c1) c2)): |
| /// expands (shl v c0) -> (shl (shl v c1) c3) |
| /// |
| /// (or (shrl v c0) (shl (shrl v c1) c2)): |
| /// expands (shrl v c0) -> (shrl (shrl v c1) c3) |
| /// |
| /// Such that in all cases, c3+c2==bitwidth(op v c1). |
| static SDValue extractShiftForRotate(SelectionDAG &DAG, SDValue OppShift, |
| SDValue ExtractFrom, SDValue &Mask, |
| const SDLoc &DL) { |
| assert(OppShift && ExtractFrom && "Empty SDValue"); |
| if (OppShift.getOpcode() != ISD::SHL && OppShift.getOpcode() != ISD::SRL) |
| return SDValue(); |
| |
| ExtractFrom = stripConstantMask(DAG, ExtractFrom, Mask); |
| |
| // Value and Type of the shift. |
| SDValue OppShiftLHS = OppShift.getOperand(0); |
| EVT ShiftedVT = OppShiftLHS.getValueType(); |
| |
| // Amount of the existing shift. |
| ConstantSDNode *OppShiftCst = isConstOrConstSplat(OppShift.getOperand(1)); |
| |
| // (add v v) -> (shl v 1) |
| // TODO: Should this be a general DAG canonicalization? |
| if (OppShift.getOpcode() == ISD::SRL && OppShiftCst && |
| ExtractFrom.getOpcode() == ISD::ADD && |
| ExtractFrom.getOperand(0) == ExtractFrom.getOperand(1) && |
| ExtractFrom.getOperand(0) == OppShiftLHS && |
| OppShiftCst->getAPIntValue() == ShiftedVT.getScalarSizeInBits() - 1) |
| return DAG.getNode(ISD::SHL, DL, ShiftedVT, OppShiftLHS, |
| DAG.getShiftAmountConstant(1, ShiftedVT, DL)); |
| |
| // Preconditions: |
| // (or (op0 v c0) (shiftl/r (op0 v c1) c2)) |
| // |
| // Find opcode of the needed shift to be extracted from (op0 v c0). |
| unsigned Opcode = ISD::DELETED_NODE; |
| bool IsMulOrDiv = false; |
| // Set Opcode and IsMulOrDiv if the extract opcode matches the needed shift |
| // opcode or its arithmetic (mul or udiv) variant. |
| auto SelectOpcode = [&](unsigned NeededShift, unsigned MulOrDivVariant) { |
| IsMulOrDiv = ExtractFrom.getOpcode() == MulOrDivVariant; |
| if (!IsMulOrDiv && ExtractFrom.getOpcode() != NeededShift) |
| return false; |
| Opcode = NeededShift; |
| return true; |
| }; |
| // op0 must be either the needed shift opcode or the mul/udiv equivalent |
| // that the needed shift can be extracted from. |
| if ((OppShift.getOpcode() != ISD::SRL || !SelectOpcode(ISD::SHL, ISD::MUL)) && |
| (OppShift.getOpcode() != ISD::SHL || !SelectOpcode(ISD::SRL, ISD::UDIV))) |
| return SDValue(); |
| |
| // op0 must be the same opcode on both sides, have the same LHS argument, |
| // and produce the same value type. |
| if (OppShiftLHS.getOpcode() != ExtractFrom.getOpcode() || |
| OppShiftLHS.getOperand(0) != ExtractFrom.getOperand(0) || |
| ShiftedVT != ExtractFrom.getValueType()) |
| return SDValue(); |
| |
| // Constant mul/udiv/shift amount from the RHS of the shift's LHS op. |
| ConstantSDNode *OppLHSCst = isConstOrConstSplat(OppShiftLHS.getOperand(1)); |
| // Constant mul/udiv/shift amount from the RHS of the ExtractFrom op. |
| ConstantSDNode *ExtractFromCst = |
| isConstOrConstSplat(ExtractFrom.getOperand(1)); |
| // TODO: We should be able to handle non-uniform constant vectors for these values |
| // Check that we have constant values. |
| if (!OppShiftCst || !OppShiftCst->getAPIntValue() || |
| !OppLHSCst || !OppLHSCst->getAPIntValue() || |
| !ExtractFromCst || !ExtractFromCst->getAPIntValue()) |
| return SDValue(); |
| |
| // Compute the shift amount we need to extract to complete the rotate. |
| const unsigned VTWidth = ShiftedVT.getScalarSizeInBits(); |
| if (OppShiftCst->getAPIntValue().ugt(VTWidth)) |
| return SDValue(); |
| APInt NeededShiftAmt = VTWidth - OppShiftCst->getAPIntValue(); |
| // Normalize the bitwidth of the two mul/udiv/shift constant operands. |
| APInt ExtractFromAmt = ExtractFromCst->getAPIntValue(); |
| APInt OppLHSAmt = OppLHSCst->getAPIntValue(); |
| zeroExtendToMatch(ExtractFromAmt, OppLHSAmt); |
| |
| // Now try extract the needed shift from the ExtractFrom op and see if the |
| // result matches up with the existing shift's LHS op. |
| if (IsMulOrDiv) { |
| // Op to extract from is a mul or udiv by a constant. |
| // Check: |
| // c2 / (1 << (bitwidth(op0 v c0) - c1)) == c0 |
| // c2 % (1 << (bitwidth(op0 v c0) - c1)) == 0 |
| const APInt ExtractDiv = APInt::getOneBitSet(ExtractFromAmt.getBitWidth(), |
| NeededShiftAmt.getZExtValue()); |
| APInt ResultAmt; |
| APInt Rem; |
| APInt::udivrem(ExtractFromAmt, ExtractDiv, ResultAmt, Rem); |
| if (Rem != 0 || ResultAmt != OppLHSAmt) |
| return SDValue(); |
| } else { |
| // Op to extract from is a shift by a constant. |
| // Check: |
| // c2 - (bitwidth(op0 v c0) - c1) == c0 |
| if (OppLHSAmt != ExtractFromAmt - NeededShiftAmt.zextOrTrunc( |
| ExtractFromAmt.getBitWidth())) |
| return SDValue(); |
| } |
| |
| // Return the expanded shift op that should allow a rotate to be formed. |
| EVT ShiftVT = OppShift.getOperand(1).getValueType(); |
| EVT ResVT = ExtractFrom.getValueType(); |
| SDValue NewShiftNode = DAG.getConstant(NeededShiftAmt, DL, ShiftVT); |
| return DAG.getNode(Opcode, DL, ResVT, OppShiftLHS, NewShiftNode); |
| } |
| |
| // Return true if we can prove that, whenever Neg and Pos are both in the |
| // range [0, EltSize), Neg == (Pos == 0 ? 0 : EltSize - Pos). This means that |
| // for two opposing shifts shift1 and shift2 and a value X with OpBits bits: |
| // |
| // (or (shift1 X, Neg), (shift2 X, Pos)) |
| // |
| // reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate |
| // in direction shift1 by Neg. The range [0, EltSize) means that we only need |
| // to consider shift amounts with defined behavior. |
| // |
| // The IsRotate flag should be set when the LHS of both shifts is the same. |
| // Otherwise if matching a general funnel shift, it should be clear. |
| static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize, |
| SelectionDAG &DAG, bool IsRotate, bool FromAdd) { |
| const auto &TLI = DAG.getTargetLoweringInfo(); |
| // If EltSize is a power of 2 then: |
| // |
| // (a) (Pos == 0 ? 0 : EltSize - Pos) == (EltSize - Pos) & (EltSize - 1) |
| // (b) Neg == Neg & (EltSize - 1) whenever Neg is in [0, EltSize). |
| // |
| // So if EltSize is a power of 2 and Neg is (and Neg', EltSize-1), we check |
| // for the stronger condition: |
| // |
| // Neg & (EltSize - 1) == (EltSize - Pos) & (EltSize - 1) [A] |
| // |
| // for all Neg and Pos. Since Neg & (EltSize - 1) == Neg' & (EltSize - 1) |
| // we can just replace Neg with Neg' for the rest of the function. |
| // |
| // In other cases we check for the even stronger condition: |
| // |
| // Neg == EltSize - Pos [B] |
| // |
| // for all Neg and Pos. Note that the (or ...) then invokes undefined |
| // behavior if Pos == 0 (and consequently Neg == EltSize). |
| // |
| // We could actually use [A] whenever EltSize is a power of 2, but the |
| // only extra cases that it would match are those uninteresting ones |
| // where Neg and Pos are never in range at the same time. E.g. for |
| // EltSize == 32, using [A] would allow a Neg of the form (sub 64, Pos) |
| // as well as (sub 32, Pos), but: |
| // |
| // (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos)) |
| // |
| // always invokes undefined behavior for 32-bit X. |
| // |
| // Below, Mask == EltSize - 1 when using [A] and is all-ones otherwise. |
| // This allows us to peek through any operations that only affect Mask's |
| // un-demanded bits. |
| // |
| // NOTE: We can only do this when matching operations which won't modify the |
| // least Log2(EltSize) significant bits and not a general funnel shift. |
| unsigned MaskLoBits = 0; |
| if (IsRotate && !FromAdd && isPowerOf2_64(EltSize)) { |
| unsigned Bits = Log2_64(EltSize); |
| unsigned NegBits = Neg.getScalarValueSizeInBits(); |
| if (NegBits >= Bits) { |
| APInt DemandedBits = APInt::getLowBitsSet(NegBits, Bits); |
| if (SDValue Inner = |
| TLI.SimplifyMultipleUseDemandedBits(Neg, DemandedBits, DAG)) { |
| Neg = Inner; |
| MaskLoBits = Bits; |
| } |
| } |
| } |
| |
| // Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1. |
| if (Neg.getOpcode() != ISD::SUB) |
| return false; |
| ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(0)); |
| if (!NegC) |
| return false; |
| SDValue NegOp1 = Neg.getOperand(1); |
| |
| // On the RHS of [A], if Pos is the result of operation on Pos' that won't |
| // affect Mask's demanded bits, just replace Pos with Pos'. These operations |
| // are redundant for the purpose of the equality. |
| if (MaskLoBits) { |
| unsigned PosBits = Pos.getScalarValueSizeInBits(); |
| if (PosBits >= MaskLoBits) { |
| APInt DemandedBits = APInt::getLowBitsSet(PosBits, MaskLoBits); |
| if (SDValue Inner = |
| TLI.SimplifyMultipleUseDemandedBits(Pos, DemandedBits, DAG)) { |
| Pos = Inner; |
| } |
| } |
| } |
| |
| // The condition we need is now: |
| // |
| // (NegC - NegOp1) & Mask == (EltSize - Pos) & Mask |
| // |
| // If NegOp1 == Pos then we need: |
| // |
| // EltSize & Mask == NegC & Mask |
| // |
| // (because "x & Mask" is a truncation and distributes through subtraction). |
| // |
| // We also need to account for a potential truncation of NegOp1 if the amount |
| // has already been legalized to a shift amount type. |
| APInt Width; |
| if ((Pos == NegOp1) || |
| (NegOp1.getOpcode() == ISD::TRUNCATE && Pos == NegOp1.getOperand(0))) |
| Width = NegC->getAPIntValue(); |
| |
| // Check for cases where Pos has the form (add NegOp1, PosC) for some PosC. |
| // Then the condition we want to prove becomes: |
| // |
| // (NegC - NegOp1) & Mask == (EltSize - (NegOp1 + PosC)) & Mask |
| // |
| // which, again because "x & Mask" is a truncation, becomes: |
| // |
| // NegC & Mask == (EltSize - PosC) & Mask |
| // EltSize & Mask == (NegC + PosC) & Mask |
| else if (Pos.getOpcode() == ISD::ADD && Pos.getOperand(0) == NegOp1) { |
| if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) |
| Width = PosC->getAPIntValue() + NegC->getAPIntValue(); |
| else |
| return false; |
| } else |
| return false; |
| |
| // Now we just need to check that EltSize & Mask == Width & Mask. |
| if (MaskLoBits) |
| // EltSize & Mask is 0 since Mask is EltSize - 1. |
| return Width.getLoBits(MaskLoBits) == 0; |
| return Width == EltSize; |
| } |
| |
| // A subroutine of MatchRotate used once we have found an OR of two opposite |
| // shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces |
| // to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the |
| // former being preferred if supported. InnerPos and InnerNeg are Pos and |
| // Neg with outer conversions stripped away. |
| SDValue DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos, |
| SDValue Neg, SDValue InnerPos, |
| SDValue InnerNeg, bool FromAdd, |
| bool HasPos, unsigned PosOpcode, |
| unsigned NegOpcode, const SDLoc &DL) { |
| // fold (or/add (shl x, (*ext y)), |
| // (srl x, (*ext (sub 32, y)))) -> |
| // (rotl x, y) or (rotr x, (sub 32, y)) |
| // |
| // fold (or/add (shl x, (*ext (sub 32, y))), |
| // (srl x, (*ext y))) -> |
| // (rotr x, y) or (rotl x, (sub 32, y)) |
| EVT VT = Shifted.getValueType(); |
| if (matchRotateSub(InnerPos, InnerNeg, VT.getScalarSizeInBits(), DAG, |
| /*IsRotate*/ true, FromAdd)) |
| return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted, |
| HasPos ? Pos : Neg); |
| |
| return SDValue(); |
| } |
| |
| // A subroutine of MatchRotate used once we have found an OR of two opposite |
| // shifts of N0 + N1. If Neg == <operand size> - Pos then the OR reduces |
| // to both (PosOpcode N0, N1, Pos) and (NegOpcode N0, N1, Neg), with the |
| // former being preferred if supported. InnerPos and InnerNeg are Pos and |
| // Neg with outer conversions stripped away. |
| // TODO: Merge with MatchRotatePosNeg. |
| SDValue DAGCombiner::MatchFunnelPosNeg(SDValue N0, SDValue N1, SDValue Pos, |
| SDValue Neg, SDValue InnerPos, |
| SDValue InnerNeg, bool FromAdd, |
| bool HasPos, unsigned PosOpcode, |
| unsigned NegOpcode, const SDLoc &DL) { |
| EVT VT = N0.getValueType(); |
| unsigned EltBits = VT.getScalarSizeInBits(); |
| |
| // fold (or/add (shl x0, (*ext y)), |
| // (srl x1, (*ext (sub 32, y)))) -> |
| // (fshl x0, x1, y) or (fshr x0, x1, (sub 32, y)) |
| // |
| // fold (or/add (shl x0, (*ext (sub 32, y))), |
| // (srl x1, (*ext y))) -> |
| // (fshr x0, x1, y) or (fshl x0, x1, (sub 32, y)) |
| if (matchRotateSub(InnerPos, InnerNeg, EltBits, DAG, /*IsRotate*/ N0 == N1, |
| FromAdd)) |
| return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, N0, N1, |
| HasPos ? Pos : Neg); |
| |
| // Matching the shift+xor cases, we can't easily use the xor'd shift amount |
| // so for now just use the PosOpcode case if its legal. |
| // TODO: When can we use the NegOpcode case? |
| if (PosOpcode == ISD::FSHL && isPowerOf2_32(EltBits)) { |
| SDValue X; |
| // fold (or/add (shl x0, y), (srl (srl x1, 1), (xor y, 31))) |
| // -> (fshl x0, x1, y) |
| if (sd_match(N1, m_Srl(m_Value(X), m_One())) && |
| sd_match(InnerNeg, |
| m_Xor(m_Specific(InnerPos), m_SpecificInt(EltBits - 1))) && |
| TLI.isOperationLegalOrCustom(ISD::FSHL, VT)) { |
| return DAG.getNode(ISD::FSHL, DL, VT, N0, X, Pos); |
| } |
| |
| // fold (or/add (shl (shl x0, 1), (xor y, 31)), (srl x1, y)) |
| // -> (fshr x0, x1, y) |
| if (sd_match(N0, m_Shl(m_Value(X), m_One())) && |
| sd_match(InnerPos, |
| m_Xor(m_Specific(InnerNeg), m_SpecificInt(EltBits - 1))) && |
| TLI.isOperationLegalOrCustom(ISD::FSHR, VT)) { |
| return DAG.getNode(ISD::FSHR, DL, VT, X, N1, Neg); |
| } |
| |
| // fold (or/add (shl (add x0, x0), (xor y, 31)), (srl x1, y)) |
| // -> (fshr x0, x1, y) |
| // TODO: Should add(x,x) -> shl(x,1) be a general DAG canonicalization? |
| if (sd_match(N0, m_Add(m_Value(X), m_Deferred(X))) && |
| sd_match(InnerPos, |
| m_Xor(m_Specific(InnerNeg), m_SpecificInt(EltBits - 1))) && |
| TLI.isOperationLegalOrCustom(ISD::FSHR, VT)) { |
| return DAG.getNode(ISD::FSHR, DL, VT, X, N1, Neg); |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| // MatchRotate - Handle an 'or' or 'add' of two operands. If this is one of the |
| // many idioms for rotate, and if the target supports rotation instructions, |
| // generate a rot[lr]. This also matches funnel shift patterns, similar to |
| // rotation but with different shifted sources. |
| SDValue DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL, |
| bool FromAdd) { |
| EVT VT = LHS.getValueType(); |
| |
| // The target must have at least one rotate/funnel flavor. |
| // We still try to match rotate by constant pre-legalization. |
| // TODO: Support pre-legalization funnel-shift by constant. |
| bool HasROTL = hasOperation(ISD::ROTL, VT); |
| bool HasROTR = hasOperation(ISD::ROTR, VT); |
| bool HasFSHL = hasOperation(ISD::FSHL, VT); |
| bool HasFSHR = hasOperation(ISD::FSHR, VT); |
| |
| // If the type is going to be promoted and the target has enabled custom |
| // lowering for rotate, allow matching rotate by non-constants. Only allow |
| // this for scalar types. |
| if (VT.isScalarInteger() && TLI.getTypeAction(*DAG.getContext(), VT) == |
| TargetLowering::TypePromoteInteger) { |
| HasROTL |= TLI.getOperationAction(ISD::ROTL, VT) == TargetLowering::Custom; |
| HasROTR |= TLI.getOperationAction(ISD::ROTR, VT) == TargetLowering::Custom; |
| } |
| |
| if (LegalOperations && !HasROTL && !HasROTR && !HasFSHL && !HasFSHR) |
| return SDValue(); |
| |
| // Check for truncated rotate. |
| if (LHS.getOpcode() == ISD::TRUNCATE && RHS.getOpcode() == ISD::TRUNCATE && |
| LHS.getOperand(0).getValueType() == RHS.getOperand(0).getValueType()) { |
| assert(LHS.getValueType() == RHS.getValueType()); |
| if (SDValue Rot = |
| MatchRotate(LHS.getOperand(0), RHS.getOperand(0), DL, FromAdd)) |
| return DAG.getNode(ISD::TRUNCATE, SDLoc(LHS), LHS.getValueType(), Rot); |
| } |
| |
| // Match "(X shl/srl V1) & V2" where V2 may not be present. |
| SDValue LHSShift; // The shift. |
| SDValue LHSMask; // AND value if any. |
| matchRotateHalf(DAG, LHS, LHSShift, LHSMask); |
| |
| SDValue RHSShift; // The shift. |
| SDValue RHSMask; // AND value if any. |
| matchRotateHalf(DAG, RHS, RHSShift, RHSMask); |
| |
| // If neither side matched a rotate half, bail |
| if (!LHSShift && !RHSShift) |
| return SDValue(); |
| |
| // InstCombine may have combined a constant shl, srl, mul, or udiv with one |
| // side of the rotate, so try to handle that here. In all cases we need to |
| // pass the matched shift from the opposite side to compute the opcode and |
| // needed shift amount to extract. We still want to do this if both sides |
| // matched a rotate half because one half may be a potential overshift that |
| // can be broken down (ie if InstCombine merged two shl or srl ops into a |
| // single one). |
| |
| // Have LHS side of the rotate, try to extract the needed shift from the RHS. |
| if (LHSShift) |
| if (SDValue NewRHSShift = |
| extractShiftForRotate(DAG, LHSShift, RHS, RHSMask, DL)) |
| RHSShift = NewRHSShift; |
| // Have RHS side of the rotate, try to extract the needed shift from the LHS. |
| if (RHSShift) |
| if (SDValue NewLHSShift = |
| extractShiftForRotate(DAG, RHSShift, LHS, LHSMask, DL)) |
| LHSShift = NewLHSShift; |
| |
| // If a side is still missing, nothing else we can do. |
| if (!RHSShift || !LHSShift) |
| return SDValue(); |
| |
| // At this point we've matched or extracted a shift op on each side. |
| |
| if (LHSShift.getOpcode() == RHSShift.getOpcode()) |
| return SDValue(); // Shifts must disagree. |
| |
| // Canonicalize shl to left side in a shl/srl pair. |
| if (RHSShift.getOpcode() == ISD::SHL) { |
| std::swap(LHS, RHS); |
| std::swap(LHSShift, RHSShift); |
| std::swap(LHSMask, RHSMask); |
| } |
| |
| // Something has gone wrong - we've lost the shl/srl pair - bail. |
| if (LHSShift.getOpcode() != ISD::SHL || RHSShift.getOpcode() != ISD::SRL) |
| return SDValue(); |
| |
| unsigned EltSizeInBits = VT.getScalarSizeInBits(); |
| SDValue LHSShiftArg = LHSShift.getOperand(0); |
| SDValue LHSShiftAmt = LHSShift.getOperand(1); |
| SDValue RHSShiftArg = RHSShift.getOperand(0); |
| SDValue RHSShiftAmt = RHSShift.getOperand(1); |
| |
| auto MatchRotateSum = [EltSizeInBits](ConstantSDNode *LHS, |
| ConstantSDNode *RHS) { |
| return (LHS->getAPIntValue() + RHS->getAPIntValue()) == EltSizeInBits; |
| }; |
| |
| auto ApplyMasks = [&](SDValue Res) { |
| // If there is an AND of either shifted operand, apply it to the result. |
| if (LHSMask.getNode() || RHSMask.getNode()) { |
| SDValue AllOnes = DAG.getAllOnesConstant(DL, VT); |
| SDValue Mask = AllOnes; |
| |
| if (LHSMask.getNode()) { |
| SDValue RHSBits = DAG.getNode(ISD::SRL, DL, VT, AllOnes, RHSShiftAmt); |
| Mask = DAG.getNode(ISD::AND, DL, VT, Mask, |
| DAG.getNode(ISD::OR, DL, VT, LHSMask, RHSBits)); |
| } |
| if (RHSMask.getNode()) { |
| SDValue LHSBits = DAG.getNode(ISD::SHL, DL, VT, AllOnes, LHSShiftAmt); |
| Mask = DAG.getNode(ISD::AND, DL, VT, Mask, |
| DAG.getNode(ISD::OR, DL, VT, RHSMask, LHSBits)); |
| } |
| |
| Res = DAG.getNode(ISD::AND, DL, VT, Res, Mask); |
| } |
| |
| return Res; |
| }; |
| |
| // TODO: Support pre-legalization funnel-shift by constant. |
| bool IsRotate = LHSShiftArg == RHSShiftArg; |
| if (!IsRotate && !(HasFSHL || HasFSHR)) { |
| if (TLI.isTypeLegal(VT) && LHS.hasOneUse() && RHS.hasOneUse() && |
| ISD::matchBinaryPredicate(LHSShiftAmt, RHSShiftAmt, MatchRotateSum)) { |
| // Look for a disguised rotate by constant. |
| // The common shifted operand X may be hidden inside another 'or'. |
| SDValue X, Y; |
| auto matchOr = [&X, &Y](SDValue Or, SDValue CommonOp) { |
| if (!Or.hasOneUse() || Or.getOpcode() != ISD::OR) |
| return false; |
| if (CommonOp == Or.getOperand(0)) { |
| X = CommonOp; |
| Y = Or.getOperand(1); |
| return true; |
| } |
| if (CommonOp == Or.getOperand(1)) { |
| X = CommonOp; |
| Y = Or.getOperand(0); |
| return true; |
| } |
| return false; |
| }; |
| |
| SDValue Res; |
| if (matchOr(LHSShiftArg, RHSShiftArg)) { |
| // (shl (X | Y), C1) | (srl X, C2) --> (rotl X, C1) | (shl Y, C1) |
| SDValue RotX = DAG.getNode(ISD::ROTL, DL, VT, X, LHSShiftAmt); |
| SDValue ShlY = DAG.getNode(ISD::SHL, DL, VT, Y, LHSShiftAmt); |
| Res = DAG.getNode(ISD::OR, DL, VT, RotX, ShlY); |
| } else if (matchOr(RHSShiftArg, LHSShiftArg)) { |
| // (shl X, C1) | (srl (X | Y), C2) --> (rotl X, C1) | (srl Y, C2) |
| SDValue RotX = DAG.getNode(ISD::ROTL, DL, VT, X, LHSShiftAmt); |
| SDValue SrlY = DAG.getNode(ISD::SRL, DL, VT, Y, RHSShiftAmt); |
| Res = DAG.getNode(ISD::OR, DL, VT, RotX, SrlY); |
| } else { |
| return SDValue(); |
| } |
| |
| return ApplyMasks(Res); |
| } |
| |
| return SDValue(); // Requires funnel shift support. |
| } |
| |
| // fold (or/add (shl x, C1), (srl x, C2)) -> (rotl x, C1) |
| // fold (or/add (shl x, C1), (srl x, C2)) -> (rotr x, C2) |
| // fold (or/add (shl x, C1), (srl y, C2)) -> (fshl x, y, C1) |
| // fold (or/add (shl x, C1), (srl y, C2)) -> (fshr x, y, C2) |
| // iff C1+C2 == EltSizeInBits |
| if (ISD::matchBinaryPredicate(LHSShiftAmt, RHSShiftAmt, MatchRotateSum)) { |
| SDValue Res; |
| if (IsRotate && (HasROTL || HasROTR || !(HasFSHL || HasFSHR))) { |
| bool UseROTL = !LegalOperations || HasROTL; |
| Res = DAG.getNode(UseROTL ? ISD::ROTL : ISD::ROTR, DL, VT, LHSShiftArg, |
| UseROTL ? LHSShiftAmt : RHSShiftAmt); |
| } else { |
| bool UseFSHL = !LegalOperations || HasFSHL; |
| Res = DAG.getNode(UseFSHL ? ISD::FSHL : ISD::FSHR, DL, VT, LHSShiftArg, |
| RHSShiftArg, UseFSHL ? LHSShiftAmt : RHSShiftAmt); |
| } |
| |
| return ApplyMasks(Res); |
| } |
| |
| // Even pre-legalization, we can't easily rotate/funnel-shift by a variable |
| // shift. |
| if (!HasROTL && !HasROTR && !HasFSHL && !HasFSHR) |
| return SDValue(); |
| |
| // If there is a mask here, and we have a variable shift, we can't be sure |
| // that we're masking out the right stuff. |
| if (LHSMask.getNode() || RHSMask.getNode()) |
| return SDValue(); |
| |
| // If the shift amount is sign/zext/any-extended just peel it off. |
| SDValue LExtOp0 = LHSShiftAmt; |
| SDValue RExtOp0 = RHSShiftAmt; |
| if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || |
| LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || |
| LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || |
| LHSShiftAmt.getOpcode() == ISD::TRUNCATE) && |
| (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || |
| RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || |
| RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || |
| RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) { |
| LExtOp0 = LHSShiftAmt.getOperand(0); |
| RExtOp0 = RHSShiftAmt.getOperand(0); |
| } |
| |
| if (IsRotate && (HasROTL || HasROTR)) { |
| if (SDValue TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt, |
| LExtOp0, RExtOp0, FromAdd, HasROTL, |
| ISD::ROTL, ISD::ROTR, DL)) |
| return TryL; |
| |
| if (SDValue TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt, |
| RExtOp0, LExtOp0, FromAdd, HasROTR, |
| ISD::ROTR, ISD::ROTL, DL)) |
| return TryR; |
| } |
| |
| if (SDValue TryL = MatchFunnelPosNeg(LHSShiftArg, RHSShiftArg, LHSShiftAmt, |
| RHSShiftAmt, LExtOp0, RExtOp0, FromAdd, |
| HasFSHL, ISD::FSHL, ISD::FSHR, DL)) |
| return TryL; |
| |
| if (SDValue TryR = MatchFunnelPosNeg(LHSShiftArg, RHSShiftArg, RHSShiftAmt, |
| LHSShiftAmt, RExtOp0, LExtOp0, FromAdd, |
| HasFSHR, ISD::FSHR, ISD::FSHL, DL)) |
| return TryR; |
| |
| return SDValue(); |
| } |
| |
| /// Recursively traverses the expression calculating the origin of the requested |
| /// byte of the given value. Returns std::nullopt if the provider can't be |
| /// calculated. |
| /// |
| /// For all the values except the root of the expression, we verify that the |
| /// value has exactly one use and if not then return std::nullopt. This way if |
| /// the origin of the byte is returned it's guaranteed that the values which |
| /// contribute to the byte are not used outside of this expression. |
| |
| /// However, there is a special case when dealing with vector loads -- we allow |
| /// more than one use if the load is a vector type. Since the values that |
| /// contribute to the byte ultimately come from the ExtractVectorElements of the |
| /// Load, we don't care if the Load has uses other than ExtractVectorElements, |
| /// because those operations are independent from the pattern to be combined. |
| /// For vector loads, we simply care that the ByteProviders are adjacent |
| /// positions of the same vector, and their index matches the byte that is being |
| /// provided. This is captured by the \p VectorIndex algorithm. \p VectorIndex |
| /// is the index used in an ExtractVectorElement, and \p StartingIndex is the |
| /// byte position we are trying to provide for the LoadCombine. If these do |
| /// not match, then we can not combine the vector loads. \p Index uses the |
| /// byte position we are trying to provide for and is matched against the |
| /// shl and load size. The \p Index algorithm ensures the requested byte is |
| /// provided for by the pattern, and the pattern does not over provide bytes. |
| /// |
| /// |
| /// The supported LoadCombine pattern for vector loads is as follows |
| /// or |
| /// / \ |
| /// or shl |
| /// / \ | |
| /// or shl zext |
| /// / \ | | |
| /// shl zext zext EVE* |
| /// | | | | |
| /// zext EVE* EVE* LOAD |
| /// | | | |
| /// EVE* LOAD LOAD |
| /// | |
| /// LOAD |
| /// |
| /// *ExtractVectorElement |
| using SDByteProvider = ByteProvider<SDNode *>; |
| |
| static std::optional<SDByteProvider> |
| calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth, |
| std::optional<uint64_t> VectorIndex, |
| unsigned StartingIndex = 0) { |
| |
| // Typical i64 by i8 pattern requires recursion up to 8 calls depth |
| if (Depth == 10) |
| return std::nullopt; |
| |
| // Only allow multiple uses if the instruction is a vector load (in which |
| // case we will use the load for every ExtractVectorElement) |
| if (Depth && !Op.hasOneUse() && |
| (Op.getOpcode() != ISD::LOAD || !Op.getValueType().isVector())) |
| return std::nullopt; |
| |
| // Fail to combine if we have encountered anything but a LOAD after handling |
| // an ExtractVectorElement. |
| if (Op.getOpcode() != ISD::LOAD && VectorIndex.has_value()) |
| return std::nullopt; |
| |
| unsigned BitWidth = Op.getValueSizeInBits(); |
| if (BitWidth % 8 != 0) |
| return std::nullopt; |
| unsigned ByteWidth = BitWidth / 8; |
| assert(Index < ByteWidth && "invalid index requested"); |
| (void) ByteWidth; |
| |
| switch (Op.getOpcode()) { |
| case ISD::OR: { |
| auto LHS = |
| calculateByteProvider(Op->getOperand(0), Index, Depth + 1, VectorIndex); |
| if (!LHS) |
| return std::nullopt; |
| auto RHS = |
| calculateByteProvider(Op->getOperand(1), Index, Depth + 1, VectorIndex); |
| if (!RHS) |
| return std::nullopt; |
| |
| if (LHS->isConstantZero()) |
| return RHS; |
| if (RHS->isConstantZero()) |
| return LHS; |
| return std::nullopt; |
| } |
| case ISD::SHL: { |
| auto ShiftOp = dyn_cast<ConstantSDNode>(Op->getOperand(1)); |
| if (!ShiftOp) |
| return std::nullopt; |
| |
| uint64_t BitShift = ShiftOp->getZExtValue(); |
| |
| if (BitShift % 8 != 0) |
| return std::nullopt; |
| uint64_t ByteShift = BitShift / 8; |
| |
| // If we are shifting by an amount greater than the index we are trying to |
| // provide, then do not provide anything. Otherwise, subtract the index by |
| // the amount we shifted by. |
| return Index < ByteShift |
| ? SDByteProvider::getConstantZero() |
| : calculateByteProvider(Op->getOperand(0), Index - ByteShift, |
| Depth + 1, VectorIndex, Index); |
| } |
| case ISD::ANY_EXTEND: |
| case ISD::SIGN_EXTEND: |
| case ISD::ZERO_EXTEND: { |
| SDValue NarrowOp = Op->getOperand(0); |
| unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits(); |
| if (NarrowBitWidth % 8 != 0) |
| return std::nullopt; |
| uint64_t NarrowByteWidth = NarrowBitWidth / 8; |
| |
| if (Index >= NarrowByteWidth) |
| return Op.getOpcode() == ISD::ZERO_EXTEND |
| ? std::optional<SDByteProvider>( |
| SDByteProvider::getConstantZero()) |
| : std::nullopt; |
| return calculateByteProvider(NarrowOp, Index, Depth + 1, VectorIndex, |
| StartingIndex); |
| } |
| case ISD::BSWAP: |
| return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1, |
| Depth + 1, VectorIndex, StartingIndex); |
| case ISD::EXTRACT_VECTOR_ELT: { |
| auto OffsetOp = dyn_cast<ConstantSDNode>(Op->getOperand(1)); |
| if (!OffsetOp) |
| return std::nullopt; |
| |
| VectorIndex = OffsetOp->getZExtValue(); |
| |
| SDValue NarrowOp = Op->getOperand(0); |
| unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits(); |
| if (NarrowBitWidth % 8 != 0) |
| return std::nullopt; |
| uint64_t NarrowByteWidth = NarrowBitWidth / 8; |
| // EXTRACT_VECTOR_ELT can extend the element type to the width of the return |
| // type, leaving the high bits undefined. |
| if (Index >= NarrowByteWidth) |
| return std::nullopt; |
| |
| // Check to see if the position of the element in the vector corresponds |
| // with the byte we are trying to provide for. In the case of a vector of |
| // i8, this simply means the VectorIndex == StartingIndex. For non i8 cases, |
| // the element will provide a range of bytes. For example, if we have a |
| // vector of i16s, each element provides two bytes (V[1] provides byte 2 and |
| // 3). |
| if (*VectorIndex * NarrowByteWidth > StartingIndex) |
| return std::nullopt; |
| if ((*VectorIndex + 1) * NarrowByteWidth <= StartingIndex) |
| return std::nullopt; |
| |
| return calculateByteProvider(Op->getOperand(0), Index, Depth + 1, |
| VectorIndex, StartingIndex); |
| } |
| case ISD::LOAD: { |
| auto L = cast<LoadSDNode>(Op.getNode()); |
| if (!L->isSimple() || L->isIndexed()) |
| return std::nullopt; |
| |
| unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits(); |
| if (NarrowBitWidth % 8 != 0) |
| return std::nullopt; |
| uint64_t NarrowByteWidth = NarrowBitWidth / 8; |
| |
| // If the width of the load does not reach byte we are trying to provide for |
| // and it is not a ZEXTLOAD, then the load does not provide for the byte in |
| // question |
| if (Index >= NarrowByteWidth) |
| return L->getExtensionType() == ISD::ZEXTLOAD |
| ? std::optional<SDByteProvider>( |
| SDByteProvider::getConstantZero()) |
| : std::nullopt; |
| |
| unsigned BPVectorIndex = VectorIndex.value_or(0U); |
| return SDByteProvider::getSrc(L, Index, BPVectorIndex); |
| } |
| } |
| |
| return std::nullopt; |
| } |
| |
| static unsigned littleEndianByteAt(unsigned BW, unsigned i) { |
| return i; |
| } |
| |
| static unsigned bigEndianByteAt(unsigned BW, unsigned i) { |
| return BW - i - 1; |
| } |
| |
| // Check if the bytes offsets we are looking at match with either big or |
| // little endian value loaded. Return true for big endian, false for little |
| // endian, and std::nullopt if match failed. |
| static std::optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets, |
| int64_t FirstOffset) { |
| // The endian can be decided only when it is 2 bytes at least. |
| unsigned Width = ByteOffsets.size(); |
| if (Width < 2) |
| return std::nullopt; |
| |
| bool BigEndian = true, LittleEndian = true; |
| for (unsigned i = 0; i < Width; i++) { |
| int64_t CurrentByteOffset = ByteOffsets[i] - FirstOffset; |
| LittleEndian &= CurrentByteOffset == littleEndianByteAt(Width, i); |
| BigEndian &= CurrentByteOffset == bigEndianByteAt(Width, i); |
| if (!BigEndian && !LittleEndian) |
| return std::nullopt; |
| } |
| |
| assert((BigEndian != LittleEndian) && "It should be either big endian or" |
| "little endian"); |
| return BigEndian; |
| } |
| |
| // Look through one layer of truncate or extend. |
| static SDValue stripTruncAndExt(SDValue Value) { |
| switch (Value.getOpcode()) { |
| case ISD::TRUNCATE: |
| case ISD::ZERO_EXTEND: |
| case ISD::SIGN_EXTEND: |
| case ISD::ANY_EXTEND: |
| return Value.getOperand(0); |
| } |
| return SDValue(); |
| } |
| |
| /// Match a pattern where a wide type scalar value is stored by several narrow |
| /// stores. Fold it into a single store or a BSWAP and a store if the targets |
| /// supports it. |
| /// |
| /// Assuming little endian target: |
| /// i8 *p = ... |
| /// i32 val = ... |
| /// p[0] = (val >> 0) & 0xFF; |
| /// p[1] = (val >> 8) & 0xFF; |
| /// p[2] = (val >> 16) & 0xFF; |
| /// p[3] = (val >> 24) & 0xFF; |
| /// => |
| /// *((i32)p) = val; |
| /// |
| /// i8 *p = ... |
| /// i32 val = ... |
| /// p[0] = (val >> 24) & 0xFF; |
| /// p[1] = (val >> 16) & 0xFF; |
| /// p[2] = (val >> 8) & 0xFF; |
| /// p[3] = (val >> 0) & 0xFF; |
| /// => |
| /// *((i32)p) = BSWAP(val); |
| SDValue DAGCombiner::mergeTruncStores(StoreSDNode *N) { |
| // The matching looks for "store (trunc x)" patterns that appear early but are |
| // likely to be replaced by truncating store nodes during combining. |
| // TODO: If there is evidence that running this later would help, this |
| // limitation could be removed. Legality checks may need to be added |
| // for the created store and optional bswap/rotate. |
| if (LegalOperations || OptLevel == CodeGenOptLevel::None) |
| return SDValue(); |
| |
| // We only handle merging simple stores of 1-4 bytes. |
| // TODO: Allow unordered atomics when wider type is legal (see D66309) |
| EVT MemVT = N->getMemoryVT(); |
| if (!(MemVT == MVT::i8 || MemVT == MVT::i16 || MemVT == MVT::i32) || |
| !N->isSimple() || N->isIndexed()) |
| return SDValue(); |
| |
| // Collect all of the stores in the chain, upto the maximum store width (i64). |
| SDValue Chain = N->getChain(); |
| SmallVector<StoreSDNode *, 8> Stores = {N}; |
| unsigned NarrowNumBits = MemVT.getScalarSizeInBits(); |
| unsigned MaxWideNumBits = 64; |
| unsigned MaxStores = MaxWideNumBits / NarrowNumBits; |
| while (auto *Store = dyn_cast<StoreSDNode>(Chain)) { |
| // All stores must be the same size to ensure that we are writing all of the |
| // bytes in the wide value. |
| // This store should have exactly one use as a chain operand for another |
| // store in the merging set. If there are other chain uses, then the |
| // transform may not be safe because order of loads/stores outside of this |
| // set may not be preserved. |
| // TODO: We could allow multiple sizes by tracking each stored byte. |
| if (Store->getMemoryVT() != MemVT || !Store->isSimple() || |
| Store->isIndexed() || !Store->hasOneUse()) |
| return SDValue(); |
| Stores.push_back(Store); |
| Chain = Store->getChain(); |
| if (MaxStores < Stores.size()) |
| return SDValue(); |
| } |
| // There is no reason to continue if we do not have at least a pair of stores. |
| if (Stores.size() < 2) |
| return SDValue(); |
| |
| // Handle simple types only. |
| LLVMContext &Context = *DAG.getContext(); |
| unsigned NumStores = Stores.size(); |
| unsigned WideNumBits = NumStores * NarrowNumBits; |
| EVT WideVT = EVT::getIntegerVT(Context, WideNumBits); |
| if (WideVT != MVT::i16 && WideVT != MVT::i32 && WideVT != MVT::i64) |
| return SDValue(); |
| |
| // Check if all bytes of the source value that we are looking at are stored |
| // to the same base address. Collect offsets from Base address into OffsetMap. |
| SDValue SourceValue; |
| SmallVector<int64_t, 8> OffsetMap(NumStores, INT64_MAX); |
| int64_t FirstOffset = INT64_MAX; |
| StoreSDNode *FirstStore = nullptr; |
| std::optional<BaseIndexOffset> Base; |
| for (auto *Store : Stores) { |
| // All the stores store different parts of the CombinedValue. A truncate is |
| // required to get the partial value. |
| SDValue Trunc = Store->getValue(); |
| if (Trunc.getOpcode() != ISD::TRUNCATE) |
| return SDValue(); |
| // Other than the first/last part, a shift operation is required to get the |
| // offset. |
| int64_t Offset = 0; |
| SDValue WideVal = Trunc.getOperand(0); |
| if ((WideVal.getOpcode() == ISD::SRL || WideVal.getOpcode() == ISD::SRA) && |
| isa<ConstantSDNode>(WideVal.getOperand(1))) { |
| // The shift amount must be a constant multiple of the narrow type. |
| // It is translated to the offset address in the wide source value "y". |
| // |
| // x = srl y, ShiftAmtC |
| // i8 z = trunc x |
| // store z, ... |
| uint64_t ShiftAmtC = WideVal.getConstantOperandVal(1); |
| if (ShiftAmtC % NarrowNumBits != 0) |
| return SDValue(); |
| |
| // Make sure we aren't reading bits that are shifted in. |
| if (ShiftAmtC > WideVal.getScalarValueSizeInBits() - NarrowNumBits) |
| return SDValue(); |
| |
| Offset = ShiftAmtC / NarrowNumBits; |
| WideVal = WideVal.getOperand(0); |
| } |
| |
| // Stores must share the same source value with different offsets. |
| if (!SourceValue) |
| SourceValue = WideVal; |
| else if (SourceValue != WideVal) { |
| // Truncate and extends can be stripped to see if the values are related. |
| if (stripTruncAndExt(SourceValue) != WideVal && |
| stripTruncAndExt(WideVal) != SourceValue) |
| return SDValue(); |
| |
| if (WideVal.getScalarValueSizeInBits() > |
| SourceValue.getScalarValueSizeInBits()) |
| SourceValue = WideVal; |
| |
| // Give up if the source value type is smaller than the store size. |
| if (SourceValue.getScalarValueSizeInBits() < WideVT.getScalarSizeInBits()) |
| return SDValue(); |
| } |
| |
| // Stores must share the same base address. |
| BaseIndexOffset Ptr = BaseIndexOffset::match(Store, DAG); |
| int64_t ByteOffsetFromBase = 0; |
| if (!Base) |
| Base = Ptr; |
| else if (!Base->equalBaseIndex(Ptr, DAG, ByteOffsetFromBase)) |
| return SDValue(); |
| |
| // Remember the first store. |
| if (ByteOffsetFromBase < FirstOffset) { |
| FirstStore = Store; |
| FirstOffset = ByteOffsetFromBase; |
| } |
| // Map the offset in the store and the offset in the combined value, and |
| // early return if it has been set before. |
| if (Offset < 0 || Offset >= NumStores || OffsetMap[Offset] != INT64_MAX) |
| return SDValue(); |
| OffsetMap[Offset] = ByteOffsetFromBase; |
| } |
| |
| assert(FirstOffset != INT64_MAX && "First byte offset must be set"); |
| assert(FirstStore && "First store must be set"); |
| |
| // Check that a store of the wide type is both allowed and fast on the target |
| const DataLayout &Layout = DAG.getDataLayout(); |
| unsigned Fast = 0; |
| bool Allowed = TLI.allowsMemoryAccess(Context, Layout, WideVT, |
| *FirstStore->getMemOperand(), &Fast); |
| if (!Allowed || !Fast) |
| return SDValue(); |
| |
| // Check if the pieces of the value are going to the expected places in memory |
| // to merge the stores. |
| auto checkOffsets = [&](bool MatchLittleEndian) { |
| if (MatchLittleEndian) { |
| for (unsigned i = 0; i != NumStores; ++i) |
| if (OffsetMap[i] != i * (NarrowNumBits / 8) + FirstOffset) |
| return false; |
| } else { // MatchBigEndian by reversing loop counter. |
| for (unsigned i = 0, j = NumStores - 1; i != NumStores; ++i, --j) |
| if (OffsetMap[j] != i * (NarrowNumBits / 8) + FirstOffset) |
| return false; |
| } |
| return true; |
| }; |
| |
| // Check if the offsets line up for the native data layout of this target. |
| bool NeedBswap = false; |
| bool NeedRotate = false; |
| if (!checkOffsets(Layout.isLittleEndian())) { |
| // Special-case: check if byte offsets line up for the opposite endian. |
| if (NarrowNumBits == 8 && checkOffsets(Layout.isBigEndian())) |
| NeedBswap = true; |
| else if (NumStores == 2 && checkOffsets(Layout.isBigEndian())) |
| NeedRotate = true; |
| else |
| return SDValue(); |
| } |
| |
| SDLoc DL(N); |
| if (WideVT != SourceValue.getValueType()) { |
| assert(SourceValue.getValueType().getScalarSizeInBits() > WideNumBits && |
| "Unexpected store value to merge"); |
| SourceValue = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SourceValue); |
| } |
| |
| // Before legalize we can introduce illegal bswaps/rotates which will be later |
| // converted to an explicit bswap sequence. This way we end up with a single |
| // store and byte shuffling instead of several stores and byte shuffling. |
| if (NeedBswap) { |
| SourceValue = DAG.getNode(ISD::BSWAP, DL, WideVT, SourceValue); |
| } else if (NeedRotate) { |
| assert(WideNumBits % 2 == 0 && "Unexpected type for rotate"); |
| SDValue RotAmt = DAG.getConstant(WideNumBits / 2, DL, WideVT); |
| SourceValue = DAG.getNode(ISD::ROTR, DL, WideVT, SourceValue, RotAmt); |
| } |
| |
| SDValue NewStore = |
| DAG.getStore(Chain, DL, SourceValue, FirstStore->getBasePtr(), |
| FirstStore->getPointerInfo(), FirstStore->getAlign()); |
| |
| // Rely on other DAG combine rules to remove the other individual stores. |
| DAG.ReplaceAllUsesWith(N, NewStore.getNode()); |
| return NewStore; |
| } |
| |
| /// Match a pattern where a wide type scalar value is loaded by several narrow |
| /// loads and combined by shifts and ors. Fold it into a single load or a load |
| /// and a BSWAP if the targets supports it. |
| /// |
| /// Assuming little endian target: |
| /// i8 *a = ... |
| /// i32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24) |
| /// => |
| /// i32 val = *((i32)a) |
| /// |
| /// i8 *a = ... |
| /// i32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3] |
| /// => |
| /// i32 val = BSWAP(*((i32)a)) |
| /// |
| /// TODO: This rule matches complex patterns with OR node roots and doesn't |
| /// interact well with the worklist mechanism. When a part of the pattern is |
| /// updated (e.g. one of the loads) its direct users are put into the worklist, |
| /// but the root node of the pattern which triggers the load combine is not |
| /// necessarily a direct user of the changed node. For example, once the address |
| /// of t28 load is reassociated load combine won't be triggered: |
| /// t25: i32 = add t4, Constant:i32<2> |
| /// t26: i64 = sign_extend t25 |
| /// t27: i64 = add t2, t26 |
| /// t28: i8,ch = load<LD1[%tmp9]> t0, t27, undef:i64 |
| /// t29: i32 = zero_extend t28 |
| /// t32: i32 = shl t29, Constant:i8<8> |
| /// t33: i32 = or t23, t32 |
| /// As a possible fix visitLoad can check if the load can be a part of a load |
| /// combine pattern and add corresponding OR roots to the worklist. |
| SDValue DAGCombiner::MatchLoadCombine(SDNode *N) { |
| assert(N->getOpcode() == ISD::OR && |
| "Can only match load combining against OR nodes"); |
| |
| // Handles simple types only |
| EVT VT = N->getValueType(0); |
| if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) |
| return SDValue(); |
| unsigned ByteWidth = VT.getSizeInBits() / 8; |
| |
| bool IsBigEndianTarget = DAG.getDataLayout().isBigEndian(); |
| auto MemoryByteOffset = [&](SDByteProvider P) { |
| assert(P.hasSrc() && "Must be a memory byte provider"); |
| auto *Load = cast<LoadSDNode>(P.Src.value()); |
| |
| unsigned LoadBitWidth = Load->getMemoryVT().getScalarSizeInBits(); |
| |
| assert(LoadBitWidth % 8 == 0 && |
| "can only analyze providers for individual bytes not bit"); |
| unsigned LoadByteWidth = LoadBitWidth / 8; |
| return IsBigEndianTarget ? bigEndianByteAt(LoadByteWidth, P.DestOffset) |
| : littleEndianByteAt(LoadByteWidth, P.DestOffset); |
| }; |
| |
| std::optional<BaseIndexOffset> Base; |
| SDValue Chain; |
| |
| SmallPtrSet<LoadSDNode *, 8> Loads; |
| std::optional<SDByteProvider> FirstByteProvider; |
| int64_t FirstOffset = INT64_MAX; |
| |
| // Check if all the bytes of the OR we are looking at are loaded from the same |
| // base address. Collect bytes offsets from Base address in ByteOffsets. |
| SmallVector<int64_t, 8> ByteOffsets(ByteWidth); |
| unsigned ZeroExtendedBytes = 0; |
| for (int i = ByteWidth - 1; i >= 0; --i) { |
| auto P = |
| calculateByteProvider(SDValue(N, 0), i, 0, /*VectorIndex*/ std::nullopt, |
| /*StartingIndex*/ i); |
| if (!P) |
| return SDValue(); |
| |
| if (P->isConstantZero()) { |
| // It's OK for the N most significant bytes to be 0, we can just |
| // zero-extend the load. |
| if (++ZeroExtendedBytes != (ByteWidth - static_cast<unsigned>(i))) |
| return SDValue(); |
| continue; |
| } |
| assert(P->hasSrc() && "provenance should either be memory or zero"); |
| auto *L = cast<LoadSDNode>(P->Src.value()); |
| |
| // All loads must share the same chain |
| SDValue LChain = L->getChain(); |
| if (!Chain) |
| Chain = LChain; |
| else if (Chain != LChain) |
| return SDValue(); |
| |
| // Loads must share the same base address |
| BaseIndexOffset Ptr = BaseIndexOffset::match(L, DAG); |
| int64_t ByteOffsetFromBase = 0; |
| |
| // For vector loads, the expected load combine pattern will have an |
| // ExtractElement for each index in the vector. While each of these |
| // ExtractElements will be accessing the same base address as determined |
| // by the load instruction, the actual bytes they interact with will differ |
| // due to different ExtractElement indices. To accurately determine the |
| // byte position of an ExtractElement, we offset the base load ptr with |
| // the index multiplied by the byte size of each element in the vector. |
| if (L->getMemoryVT().isVector()) { |
| unsigned LoadWidthInBit = L->getMemoryVT().getScalarSizeInBits(); |
| if (LoadWidthInBit % 8 != 0) |
| return SDValue(); |
| unsigned ByteOffsetFromVector = P->SrcOffset * LoadWidthInBit / 8; |
| Ptr.addToOffset(ByteOffsetFromVector); |
| } |
| |
| if (!Base) |
| Base = Ptr; |
| |
| else if (!Base->equalBaseIndex(Ptr, DAG, ByteOffsetFromBase)) |
| return SDValue(); |
| |
| // Calculate the offset of the current byte from the base address |
| ByteOffsetFromBase += MemoryByteOffset(*P); |
| ByteOffsets[i] = ByteOffsetFromBase; |
| |
| // Remember the first byte load |
| if (ByteOffsetFromBase < FirstOffset) { |
| FirstByteProvider = P; |
| FirstOffset = ByteOffsetFromBase; |
| } |
| |
| Loads.insert(L); |
| } |
| |
| assert(!Loads.empty() && "All the bytes of the value must be loaded from " |
| "memory, so there must be at least one load which produces the value"); |
| assert(Base && "Base address of the accessed memory location must be set"); |
| assert(FirstOffset != INT64_MAX && "First byte offset must be set"); |
| |
| bool NeedsZext = ZeroExtendedBytes > 0; |
| |
| EVT MemVT = |
| EVT::getIntegerVT(*DAG.getContext(), (ByteWidth - ZeroExtendedBytes) * 8); |
| |
| if (!MemVT.isSimple()) |
| return SDValue(); |
| |
| // Before legalize we can introduce too wide illegal loads which will be later |
| // split into legal sized loads. This enables us to combine i64 load by i8 |
| // patterns to a couple of i32 loads on 32 bit targets. |
| if (LegalOperations && |
| !TLI.isLoadExtLegal(NeedsZext ? ISD::ZEXTLOAD : ISD::NON_EXTLOAD, VT, |
| MemVT)) |
| return SDValue(); |
| |
| // Check if the bytes of the OR we are looking at match with either big or |
| // little endian value load |
| std::optional<bool> IsBigEndian = isBigEndian( |
| ArrayRef(ByteOffsets).drop_back(ZeroExtendedBytes), FirstOffset); |
| if (!IsBigEndian) |
| return SDValue(); |
| |
| assert(FirstByteProvider && "must be set"); |
| |
| // Ensure that the first byte is loaded from zero offset of the first load. |
| // So the combined value can be loaded from the first load address. |
| if (MemoryByteOffset(*FirstByteProvider) != 0) |
| return SDValue(); |
| auto *FirstLoad = cast<LoadSDNode>(FirstByteProvider->Src.value()); |
| |
| // The node we are looking at matches with the pattern, check if we can |
| // replace it with a single (possibly zero-extended) load and bswap + shift if |
| // needed. |
| |
| // If the load needs byte swap check if the target supports it |
| bool NeedsBswap = IsBigEndianTarget != *IsBigEndian; |
| |
| // Before legalize we can introduce illegal bswaps which will be later |
| // converted to an explicit bswap sequence. This way we end up with a single |
| // load and byte shuffling instead of several loads and byte shuffling. |
| // We do not introduce illegal bswaps when zero-extending as this tends to |
| // introduce too many arithmetic instructions. |
| if (NeedsBswap && (LegalOperations || NeedsZext) && |
| !TLI.isOperationLegal(ISD::BSWAP, VT)) |
| return SDValue(); |
| |
| // If we need to bswap and zero extend, we have to insert a shift. Check that |
| // it is legal. |
| if (NeedsBswap && NeedsZext && LegalOperations && |
| !TLI.isOperationLegal(ISD::SHL, VT)) |
| return SDValue(); |
| |
| // Check that a load of the wide type is both allowed and fast on the target |
| unsigned Fast = 0; |
| bool Allowed = |
| TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, |
| *FirstLoad->getMemOperand(), &Fast); |
| if (!Allowed || !Fast) |
| return SDValue(); |
| |
| SDValue NewLoad = |
| DAG.getExtLoad(NeedsZext ? ISD::ZEXTLOAD : ISD::NON_EXTLOAD, SDLoc(N), VT, |
| Chain, FirstLoad->getBasePtr(), |
| FirstLoad->getPointerInfo(), MemVT, FirstLoad->getAlign()); |
| |
| // Transfer chain users from old loads to the new load. |
| for (LoadSDNode *L : Loads) |
| DAG.makeEquivalentMemoryOrdering(L, NewLoad); |
| |
| if (!NeedsBswap) |
| return NewLoad; |
| |
| SDValue ShiftedLoad = |
| NeedsZext ? DAG.getNode(ISD::SHL, SDLoc(N), VT, NewLoad, |
| DAG.getShiftAmountConstant(ZeroExtendedBytes * 8, |
| VT, SDLoc(N))) |
| : NewLoad; |
| return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, ShiftedLoad); |
| } |
| |
| // If the target has andn, bsl, or a similar bit-select instruction, |
| // we want to unfold masked merge, with canonical pattern of: |
| // | A | |B| |
| // ((x ^ y) & m) ^ y |
| // | D | |
| // Into: |
| // (x & m) | (y & ~m) |
| // If y is a constant, m is not a 'not', and the 'andn' does not work with |
| // immediates, we unfold into a different pattern: |
| // ~(~x & m) & (m | y) |
| // If x is a constant, m is a 'not', and the 'andn' does not work with |
| // immediates, we unfold into a different pattern: |
| // (x | ~m) & ~(~m & ~y) |
| // NOTE: we don't unfold the pattern if 'xor' is actually a 'not', because at |
| // the very least that breaks andnpd / andnps patterns, and because those |
| // patterns are simplified in IR and shouldn't be created in the DAG |
| SDValue DAGCombiner::unfoldMaskedMerge(SDNode *N) { |
| assert(N->getOpcode() == ISD::XOR); |
| |
| // Don't touch 'not' (i.e. where y = -1). |
| if (isAllOnesOrAllOnesSplat(N->getOperand(1))) |
| return SDValue(); |
| |
| EVT VT = N->getValueType(0); |
| |
| // There are 3 commutable operators in the pattern, |
| // so we have to deal with 8 possible variants of the basic pattern. |
| SDValue X, Y, M; |
| auto matchAndXor = [&X, &Y, &M](SDValue And, unsigned XorIdx, SDValue Other) { |
| if (And.getOpcode() != ISD::AND || !And.hasOneUse()) |
| return false; |
| SDValue Xor = And.getOperand(XorIdx); |
| if (Xor.getOpcode() != ISD::XOR || !Xor.hasOneUse()) |
| return false; |
| SDValue Xor0 = Xor.getOperand(0); |
| SDValue Xor1 = Xor.getOperand(1); |
| // Don't touch 'not' (i.e. where y = -1). |
| if (isAllOnesOrAllOnesSplat(Xor1)) |
| return false; |
| if (Other == Xor0) |
| std::swap(Xor0, Xor1); |
| if (Other != Xor1) |
| return false; |
| X = Xor0; |
| Y = Xor1; |
| M = And.getOperand(XorIdx ? 0 : 1); |
| return true; |
| }; |
| |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| if (!matchAndXor(N0, 0, N1) && !matchAndXor(N0, 1, N1) && |
| !matchAndXor(N1, 0, N0) && !matchAndXor(N1, 1, N0)) |
| return SDValue(); |
| |
| // Don't do anything if the mask is constant. This should not be reachable. |
| // InstCombine should have already unfolded this pattern, and DAGCombiner |
| // probably shouldn't produce it, too. |
| if (isa<ConstantSDNode>(M.getNode())) |
| return SDValue(); |
| |
| // We can transform if the target has AndNot |
| if (!TLI.hasAndNot(M)) |
| return SDValue(); |
| |
| SDLoc DL(N); |
| |
| // If Y is a constant, check that 'andn' works with immediates. Unless M is |
| // a bitwise not that would already allow ANDN to be used. |
| if (!TLI.hasAndNot(Y) && !isBitwiseNot(M)) { |
| assert(TLI.hasAndNot(X) && "Only mask is a variable? Unreachable."); |
| // If not, we need to do a bit more work to make sure andn is still used. |
| SDValue NotX = DAG.getNOT(DL, X, VT); |
| SDValue LHS = DAG.getNode(ISD::AND, DL, VT, NotX, M); |
| SDValue NotLHS = DAG.getNOT(DL, LHS, VT); |
| SDValue RHS = DAG.getNode(ISD::OR, DL, VT, M, Y); |
| return DAG.getNode(ISD::AND, DL, VT, NotLHS, RHS); |
| } |
| |
| // If X is a constant and M is a bitwise not, check that 'andn' works with |
| // immediates. |
| if (!TLI.hasAndNot(X) && isBitwiseNot(M)) { |
| assert(TLI.hasAndNot(Y) && "Only mask is a variable? Unreachable."); |
| // If not, we need to do a bit more work to make sure andn is still used. |
| SDValue NotM = M.getOperand(0); |
| SDValue LHS = DAG.getNode(ISD::OR, DL, VT, X, NotM); |
| SDValue NotY = DAG.getNOT(DL, Y, VT); |
| SDValue RHS = DAG.getNode(ISD::AND, DL, VT, NotM, NotY); |
| SDValue NotRHS = DAG.getNOT(DL, RHS, VT); |
| return DAG.getNode(ISD::AND, DL, VT, LHS, NotRHS); |
| } |
| |
| SDValue LHS = DAG.getNode(ISD::AND, DL, VT, X, M); |
| SDValue NotM = DAG.getNOT(DL, M, VT); |
| SDValue RHS = DAG.getNode(ISD::AND, DL, VT, Y, NotM); |
| |
| return DAG.getNode(ISD::OR, DL, VT, LHS, RHS); |
| } |
| |
| SDValue DAGCombiner::visitXOR(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N0.getValueType(); |
| SDLoc DL(N); |
| |
| // fold (xor undef, undef) -> 0. This is a common idiom (misuse). |
| if (N0.isUndef() && N1.isUndef()) |
| return DAG.getConstant(0, DL, VT); |
| |
| // fold (xor x, undef) -> undef |
| if (N0.isUndef()) |
| return N0; |
| if (N1.isUndef()) |
| return N1; |
| |
| // fold (xor c1, c2) -> c1^c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::XOR, DL, VT, {N0, N1})) |
| return C; |
| |
| // canonicalize constant to RHS |
| if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && |
| !DAG.isConstantIntBuildVectorOrConstantInt(N1)) |
| return DAG.getNode(ISD::XOR, DL, VT, N1, N0); |
| |
| // fold vector ops |
| if (VT.isVector()) { |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| // fold (xor x, 0) -> x, vector edition |
| if (ISD::isConstantSplatVectorAllZeros(N1.getNode())) |
| return N0; |
| } |
| |
| // fold (xor x, 0) -> x |
| if (isNullConstant(N1)) |
| return N0; |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| // reassociate xor |
| if (SDValue RXOR = reassociateOps(ISD::XOR, DL, N0, N1, N->getFlags())) |
| return RXOR; |
| |
| // Fold xor(vecreduce(x), vecreduce(y)) -> vecreduce(xor(x, y)) |
| if (SDValue SD = |
| reassociateReduction(ISD::VECREDUCE_XOR, ISD::XOR, DL, VT, N0, N1)) |
| return SD; |
| |
| // fold (a^b) -> (a|b) iff a and b share no bits. |
| if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) && |
| DAG.haveNoCommonBitsSet(N0, N1)) |
| return DAG.getNode(ISD::OR, DL, VT, N0, N1, SDNodeFlags::Disjoint); |
| |
| // look for 'add-like' folds: |
| // XOR(N0,MIN_SIGNED_VALUE) == ADD(N0,MIN_SIGNED_VALUE) |
| if ((!LegalOperations || TLI.isOperationLegal(ISD::ADD, VT)) && |
| isMinSignedConstant(N1)) |
| if (SDValue Combined = visitADDLike(N)) |
| return Combined; |
| |
| // fold !(x cc y) -> (x !cc y) |
| unsigned N0Opcode = N0.getOpcode(); |
| SDValue LHS, RHS, CC; |
| if (TLI.isConstTrueVal(N1) && |
| isSetCCEquivalent(N0, LHS, RHS, CC, /*MatchStrict*/ true)) { |
| ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), |
| LHS.getValueType()); |
| if (!LegalOperations || |
| TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) { |
| switch (N0Opcode) { |
| default: |
| llvm_unreachable("Unhandled SetCC Equivalent!"); |
| case ISD::SETCC: |
| return DAG.getSetCC(SDLoc(N0), VT, LHS, RHS, NotCC); |
| case ISD::SELECT_CC: |
| return DAG.getSelectCC(SDLoc(N0), LHS, RHS, N0.getOperand(2), |
| N0.getOperand(3), NotCC); |
| case ISD::STRICT_FSETCC: |
| case ISD::STRICT_FSETCCS: { |
| if (N0.hasOneUse()) { |
| // FIXME Can we handle multiple uses? Could we token factor the chain |
| // results from the new/old setcc? |
| SDValue SetCC = |
| DAG.getSetCC(SDLoc(N0), VT, LHS, RHS, NotCC, |
| N0.getOperand(0), N0Opcode == ISD::STRICT_FSETCCS); |
| CombineTo(N, SetCC); |
| DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), SetCC.getValue(1)); |
| recursivelyDeleteUnusedNodes(N0.getNode()); |
| return SDValue(N, 0); // Return N so it doesn't get rechecked! |
| } |
| break; |
| } |
| } |
| } |
| } |
| |
| // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y))) |
| if (isOneConstant(N1) && N0Opcode == ISD::ZERO_EXTEND && N0.hasOneUse() && |
| isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){ |
| SDValue V = N0.getOperand(0); |
| SDLoc DL0(N0); |
| V = DAG.getNode(ISD::XOR, DL0, V.getValueType(), V, |
| DAG.getConstant(1, DL0, V.getValueType())); |
| AddToWorklist(V.getNode()); |
| return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, V); |
| } |
| |
| // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc |
| // fold (not (and x, y)) -> (or (not x), (not y)) iff x or y are setcc |
| if (isOneConstant(N1) && VT == MVT::i1 && N0.hasOneUse() && |
| (N0Opcode == ISD::OR || N0Opcode == ISD::AND)) { |
| SDValue N00 = N0.getOperand(0), N01 = N0.getOperand(1); |
| if (isOneUseSetCC(N01) || isOneUseSetCC(N00)) { |
| unsigned NewOpcode = N0Opcode == ISD::AND ? ISD::OR : ISD::AND; |
| N00 = DAG.getNode(ISD::XOR, SDLoc(N00), VT, N00, N1); // N00 = ~N00 |
| N01 = DAG.getNode(ISD::XOR, SDLoc(N01), VT, N01, N1); // N01 = ~N01 |
| AddToWorklist(N00.getNode()); AddToWorklist(N01.getNode()); |
| return DAG.getNode(NewOpcode, DL, VT, N00, N01); |
| } |
| } |
| // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants |
| // fold (not (and x, y)) -> (or (not x), (not y)) iff x or y are constants |
| if (isAllOnesConstant(N1) && N0.hasOneUse() && |
| (N0Opcode == ISD::OR || N0Opcode == ISD::AND)) { |
| SDValue N00 = N0.getOperand(0), N01 = N0.getOperand(1); |
| if (isa<ConstantSDNode>(N01) || isa<ConstantSDNode>(N00)) { |
| unsigned NewOpcode = N0Opcode == ISD::AND ? ISD::OR : ISD::AND; |
| N00 = DAG.getNode(ISD::XOR, SDLoc(N00), VT, N00, N1); // N00 = ~N00 |
| N01 = DAG.getNode(ISD::XOR, SDLoc(N01), VT, N01, N1); // N01 = ~N01 |
| AddToWorklist(N00.getNode()); AddToWorklist(N01.getNode()); |
| return DAG.getNode(NewOpcode, DL, VT, N00, N01); |
| } |
| } |
| |
| // fold (not (neg x)) -> (add X, -1) |
| // FIXME: This can be generalized to (not (sub Y, X)) -> (add X, ~Y) if |
| // Y is a constant or the subtract has a single use. |
| if (isAllOnesConstant(N1) && N0.getOpcode() == ISD::SUB && |
| isNullConstant(N0.getOperand(0))) { |
| return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(1), |
| DAG.getAllOnesConstant(DL, VT)); |
| } |
| |
| // fold (not (add X, -1)) -> (neg X) |
| if (N0.getOpcode() == ISD::ADD && N0.hasOneUse() && isAllOnesConstant(N1) && |
| isAllOnesOrAllOnesSplat(N0.getOperand(1))) { |
| return DAG.getNegative(N0.getOperand(0), DL, VT); |
| } |
| |
| // fold (xor (and x, y), y) -> (and (not x), y) |
| if (N0Opcode == ISD::AND && N0.hasOneUse() && N0->getOperand(1) == N1) { |
| SDValue X = N0.getOperand(0); |
| SDValue NotX = DAG.getNOT(SDLoc(X), X, VT); |
| AddToWorklist(NotX.getNode()); |
| return DAG.getNode(ISD::AND, DL, VT, NotX, N1); |
| } |
| |
| // fold Y = sra (X, size(X)-1); xor (add (X, Y), Y) -> (abs X) |
| if (!LegalOperations || hasOperation(ISD::ABS, VT)) { |
| SDValue A = N0Opcode == ISD::ADD ? N0 : N1; |
| SDValue S = N0Opcode == ISD::SRA ? N0 : N1; |
| if (A.getOpcode() == ISD::ADD && S.getOpcode() == ISD::SRA) { |
| SDValue A0 = A.getOperand(0), A1 = A.getOperand(1); |
| SDValue S0 = S.getOperand(0); |
| if ((A0 == S && A1 == S0) || (A1 == S && A0 == S0)) |
| if (ConstantSDNode *C = isConstOrConstSplat(S.getOperand(1))) |
| if (C->getAPIntValue() == (VT.getScalarSizeInBits() - 1)) |
| return DAG.getNode(ISD::ABS, DL, VT, S0); |
| } |
| } |
| |
| // fold (xor x, x) -> 0 |
| if (N0 == N1) |
| return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations); |
| |
| // fold (xor (shl 1, x), -1) -> (rotl ~1, x) |
| // Here is a concrete example of this equivalence: |
| // i16 x == 14 |
| // i16 shl == 1 << 14 == 16384 == 0b0100000000000000 |
| // i16 xor == ~(1 << 14) == 49151 == 0b1011111111111111 |
| // |
| // => |
| // |
| // i16 ~1 == 0b1111111111111110 |
| // i16 rol(~1, 14) == 0b1011111111111111 |
| // |
| // Some additional tips to help conceptualize this transform: |
| // - Try to see the operation as placing a single zero in a value of all ones. |
| // - There exists no value for x which would allow the result to contain zero. |
| // - Values of x larger than the bitwidth are undefined and do not require a |
| // consistent result. |
| // - Pushing the zero left requires shifting one bits in from the right. |
| // A rotate left of ~1 is a nice way of achieving the desired result. |
| if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT) && N0Opcode == ISD::SHL && |
| isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0))) { |
| return DAG.getNode(ISD::ROTL, DL, VT, DAG.getSignedConstant(~1, DL, VT), |
| N0.getOperand(1)); |
| } |
| |
| // Simplify: xor (op x...), (op y...) -> (op (xor x, y)) |
| if (N0Opcode == N1.getOpcode()) |
| if (SDValue V = hoistLogicOpWithSameOpcodeHands(N)) |
| return V; |
| |
| if (SDValue R = foldLogicOfShifts(N, N0, N1, DAG)) |
| return R; |
| if (SDValue R = foldLogicOfShifts(N, N1, N0, DAG)) |
| return R; |
| if (SDValue R = foldLogicTreeOfShifts(N, N0, N1, DAG)) |
| return R; |
| |
| // Unfold ((x ^ y) & m) ^ y into (x & m) | (y & ~m) if profitable |
| if (SDValue MM = unfoldMaskedMerge(N)) |
| return MM; |
| |
| // Simplify the expression using non-local knowledge. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| if (SDValue Combined = combineCarryDiamond(DAG, TLI, N0, N1, N)) |
| return Combined; |
| |
| return SDValue(); |
| } |
| |
| /// If we have a shift-by-constant of a bitwise logic op that itself has a |
| /// shift-by-constant operand with identical opcode, we may be able to convert |
| /// that into 2 independent shifts followed by the logic op. This is a |
| /// throughput improvement. |
| static SDValue combineShiftOfShiftedLogic(SDNode *Shift, SelectionDAG &DAG) { |
| // Match a one-use bitwise logic op. |
| SDValue LogicOp = Shift->getOperand(0); |
| if (!LogicOp.hasOneUse()) |
| return SDValue(); |
| |
| unsigned LogicOpcode = LogicOp.getOpcode(); |
| if (LogicOpcode != ISD::AND && LogicOpcode != ISD::OR && |
| LogicOpcode != ISD::XOR) |
| return SDValue(); |
| |
| // Find a matching one-use shift by constant. |
| unsigned ShiftOpcode = Shift->getOpcode(); |
| SDValue C1 = Shift->getOperand(1); |
| ConstantSDNode *C1Node = isConstOrConstSplat(C1); |
| assert(C1Node && "Expected a shift with constant operand"); |
| const APInt &C1Val = C1Node->getAPIntValue(); |
| auto matchFirstShift = [&](SDValue V, SDValue &ShiftOp, |
| const APInt *&ShiftAmtVal) { |
| if (V.getOpcode() != ShiftOpcode || !V.hasOneUse()) |
| return false; |
| |
| ConstantSDNode *ShiftCNode = isConstOrConstSplat(V.getOperand(1)); |
| if (!ShiftCNode) |
| return false; |
| |
| // Capture the shifted operand and shift amount value. |
| ShiftOp = V.getOperand(0); |
| ShiftAmtVal = &ShiftCNode->getAPIntValue(); |
| |
| // Shift amount types do not have to match their operand type, so check that |
| // the constants are the same width. |
| if (ShiftAmtVal->getBitWidth() != C1Val.getBitWidth()) |
| return false; |
| |
| // The fold is not valid if the sum of the shift values doesn't fit in the |
| // given shift amount type. |
| bool Overflow = false; |
| APInt NewShiftAmt = C1Val.uadd_ov(*ShiftAmtVal, Overflow); |
| if (Overflow) |
| return false; |
| |
| // The fold is not valid if the sum of the shift values exceeds bitwidth. |
| if (NewShiftAmt.uge(V.getScalarValueSizeInBits())) |
| return false; |
| |
| return true; |
| }; |
| |
| // Logic ops are commutative, so check each operand for a match. |
| SDValue X, Y; |
| const APInt *C0Val; |
| if (matchFirstShift(LogicOp.getOperand(0), X, C0Val)) |
| Y = LogicOp.getOperand(1); |
| else if (matchFirstShift(LogicOp.getOperand(1), X, C0Val)) |
| Y = LogicOp.getOperand(0); |
| else |
| return SDValue(); |
| |
| // shift (logic (shift X, C0), Y), C1 -> logic (shift X, C0+C1), (shift Y, C1) |
| SDLoc DL(Shift); |
| EVT VT = Shift->getValueType(0); |
| EVT ShiftAmtVT = Shift->getOperand(1).getValueType(); |
| SDValue ShiftSumC = DAG.getConstant(*C0Val + C1Val, DL, ShiftAmtVT); |
| SDValue NewShift1 = DAG.getNode(ShiftOpcode, DL, VT, X, ShiftSumC); |
| SDValue NewShift2 = DAG.getNode(ShiftOpcode, DL, VT, Y, C1); |
| return DAG.getNode(LogicOpcode, DL, VT, NewShift1, NewShift2, |
| LogicOp->getFlags()); |
| } |
| |
| /// Handle transforms common to the three shifts, when the shift amount is a |
| /// constant. |
| /// We are looking for: (shift being one of shl/sra/srl) |
| /// shift (binop X, C0), C1 |
| /// And want to transform into: |
| /// binop (shift X, C1), (shift C0, C1) |
| SDValue DAGCombiner::visitShiftByConstant(SDNode *N) { |
| assert(isConstOrConstSplat(N->getOperand(1)) && "Expected constant operand"); |
| |
| // Do not turn a 'not' into a regular xor. |
| if (isBitwiseNot(N->getOperand(0))) |
| return SDValue(); |
| |
| // The inner binop must be one-use, since we want to replace it. |
| SDValue LHS = N->getOperand(0); |
| if (!LHS.hasOneUse() || !TLI.isDesirableToCommuteWithShift(N, Level)) |
| return SDValue(); |
| |
| // Fold shift(bitop(shift(x,c1),y), c2) -> bitop(shift(x,c1+c2),shift(y,c2)). |
| if (SDValue R = combineShiftOfShiftedLogic(N, DAG)) |
| return R; |
| |
| // We want to pull some binops through shifts, so that we have (and (shift)) |
| // instead of (shift (and)), likewise for add, or, xor, etc. This sort of |
| // thing happens with address calculations, so it's important to canonicalize |
| // it. |
| switch (LHS.getOpcode()) { |
| default: |
| return SDValue(); |
| case ISD::OR: |
| case ISD::XOR: |
| case ISD::AND: |
| break; |
| case ISD::ADD: |
| if (N->getOpcode() != ISD::SHL) |
| return SDValue(); // only shl(add) not sr[al](add). |
| break; |
| } |
| |
| // FIXME: disable this unless the input to the binop is a shift by a constant |
| // or is copy/select. Enable this in other cases when figure out it's exactly |
| // profitable. |
| SDValue BinOpLHSVal = LHS.getOperand(0); |
| bool IsShiftByConstant = (BinOpLHSVal.getOpcode() == ISD::SHL || |
| BinOpLHSVal.getOpcode() == ISD::SRA || |
| BinOpLHSVal.getOpcode() == ISD::SRL) && |
| isa<ConstantSDNode>(BinOpLHSVal.getOperand(1)); |
| bool IsCopyOrSelect = BinOpLHSVal.getOpcode() == ISD::CopyFromReg || |
| BinOpLHSVal.getOpcode() == ISD::SELECT; |
| |
| if (!IsShiftByConstant && !IsCopyOrSelect) |
| return SDValue(); |
| |
| if (IsCopyOrSelect && N->hasOneUse()) |
| return SDValue(); |
| |
| // Attempt to fold the constants, shifting the binop RHS by the shift amount. |
| SDLoc DL(N); |
| EVT VT = N->getValueType(0); |
| if (SDValue NewRHS = DAG.FoldConstantArithmetic( |
| N->getOpcode(), DL, VT, {LHS.getOperand(1), N->getOperand(1)})) { |
| SDValue NewShift = DAG.getNode(N->getOpcode(), DL, VT, LHS.getOperand(0), |
| N->getOperand(1)); |
| return DAG.getNode(LHS.getOpcode(), DL, VT, NewShift, NewRHS); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) { |
| assert(N->getOpcode() == ISD::TRUNCATE); |
| assert(N->getOperand(0).getOpcode() == ISD::AND); |
| |
| // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC) |
| EVT TruncVT = N->getValueType(0); |
| if (N->hasOneUse() && N->getOperand(0).hasOneUse() && |
| TLI.isTypeDesirableForOp(ISD::AND, TruncVT)) { |
| SDValue N01 = N->getOperand(0).getOperand(1); |
| if (isConstantOrConstantVector(N01, /* NoOpaques */ true)) { |
| SDLoc DL(N); |
| SDValue N00 = N->getOperand(0).getOperand(0); |
| SDValue Trunc00 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00); |
| SDValue Trunc01 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N01); |
| AddToWorklist(Trunc00.getNode()); |
| AddToWorklist(Trunc01.getNode()); |
| return DAG.getNode(ISD::AND, DL, TruncVT, Trunc00, Trunc01); |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitRotate(SDNode *N) { |
| SDLoc dl(N); |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| EVT VT = N->getValueType(0); |
| unsigned Bitsize = VT.getScalarSizeInBits(); |
| |
| // fold (rot x, 0) -> x |
| if (isNullOrNullSplat(N1)) |
| return N0; |
| |
| // fold (rot x, c) -> x iff (c % BitSize) == 0 |
| if (isPowerOf2_32(Bitsize) && Bitsize > 1) { |
| APInt ModuloMask(N1.getScalarValueSizeInBits(), Bitsize - 1); |
| if (DAG.MaskedValueIsZero(N1, ModuloMask)) |
| return N0; |
| } |
| |
| // fold (rot x, c) -> (rot x, c % BitSize) |
| bool OutOfRange = false; |
| auto MatchOutOfRange = [Bitsize, &OutOfRange](ConstantSDNode *C) { |
| OutOfRange |= C->getAPIntValue().uge(Bitsize); |
| return true; |
| }; |
| if (ISD::matchUnaryPredicate(N1, MatchOutOfRange) && OutOfRange) { |
| EVT AmtVT = N1.getValueType(); |
| SDValue Bits = DAG.getConstant(Bitsize, dl, AmtVT); |
| if (SDValue Amt = |
| DAG.FoldConstantArithmetic(ISD::UREM, dl, AmtVT, {N1, Bits})) |
| return DAG.getNode(N->getOpcode(), dl, VT, N0, Amt); |
| } |
| |
| // rot i16 X, 8 --> bswap X |
| auto *RotAmtC = isConstOrConstSplat(N1); |
| if (RotAmtC && RotAmtC->getAPIntValue() == 8 && |
| VT.getScalarSizeInBits() == 16 && hasOperation(ISD::BSWAP, VT)) |
| return DAG.getNode(ISD::BSWAP, dl, VT, N0); |
| |
| // Simplify the operands using demanded-bits information. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| // fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))). |
| if (N1.getOpcode() == ISD::TRUNCATE && |
| N1.getOperand(0).getOpcode() == ISD::AND) { |
| if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) |
| return DAG.getNode(N->getOpcode(), dl, VT, N0, NewOp1); |
| } |
| |
| unsigned NextOp = N0.getOpcode(); |
| |
| // fold (rot* (rot* x, c2), c1) |
| // -> (rot* x, ((c1 % bitsize) +- (c2 % bitsize) + bitsize) % bitsize) |
| if (NextOp == ISD::ROTL || NextOp == ISD::ROTR) { |
| bool C1 = DAG.isConstantIntBuildVectorOrConstantInt(N1); |
| bool C2 = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1)); |
| if (C1 && C2 && N1.getValueType() == N0.getOperand(1).getValueType()) { |
| EVT ShiftVT = N1.getValueType(); |
| bool SameSide = (N->getOpcode() == NextOp); |
| unsigned CombineOp = SameSide ? ISD::ADD : ISD::SUB; |
| SDValue BitsizeC = DAG.getConstant(Bitsize, dl, ShiftVT); |
| SDValue Norm1 = DAG.FoldConstantArithmetic(ISD::UREM, dl, ShiftVT, |
| {N1, BitsizeC}); |
| SDValue Norm2 = DAG.FoldConstantArithmetic(ISD::UREM, dl, ShiftVT, |
| {N0.getOperand(1), BitsizeC}); |
| if (Norm1 && Norm2) |
| if (SDValue CombinedShift = DAG.FoldConstantArithmetic( |
| CombineOp, dl, ShiftVT, {Norm1, Norm2})) { |
| CombinedShift = DAG.FoldConstantArithmetic(ISD::ADD, dl, ShiftVT, |
| {CombinedShift, BitsizeC}); |
| SDValue CombinedShiftNorm = DAG.FoldConstantArithmetic( |
| ISD::UREM, dl, ShiftVT, {CombinedShift, BitsizeC}); |
| return DAG.getNode(N->getOpcode(), dl, VT, N0->getOperand(0), |
| CombinedShiftNorm); |
| } |
| } |
| } |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSHL(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| if (SDValue V = DAG.simplifyShift(N0, N1)) |
| return V; |
| |
| SDLoc DL(N); |
| EVT VT = N0.getValueType(); |
| EVT ShiftVT = N1.getValueType(); |
| unsigned OpSizeInBits = VT.getScalarSizeInBits(); |
| |
| // fold (shl c1, c2) -> c1<<c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, DL, VT, {N0, N1})) |
| return C; |
| |
| // fold vector ops |
| if (VT.isVector()) { |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1); |
| // If setcc produces all-one true value then: |
| // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV) |
| if (N1CV && N1CV->isConstant()) { |
| if (N0.getOpcode() == ISD::AND) { |
| SDValue N00 = N0->getOperand(0); |
| SDValue N01 = N0->getOperand(1); |
| BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01); |
| |
| if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC && |
| TLI.getBooleanContents(N00.getOperand(0).getValueType()) == |
| TargetLowering::ZeroOrNegativeOneBooleanContent) { |
| if (SDValue C = |
| DAG.FoldConstantArithmetic(ISD::SHL, DL, VT, {N01, N1})) |
| return DAG.getNode(ISD::AND, DL, VT, N00, C); |
| } |
| } |
| } |
| } |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| // if (shl x, c) is known to be zero, return 0 |
| if (DAG.MaskedValueIsZero(SDValue(N, 0), APInt::getAllOnes(OpSizeInBits))) |
| return DAG.getConstant(0, DL, VT); |
| |
| // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))). |
| if (N1.getOpcode() == ISD::TRUNCATE && |
| N1.getOperand(0).getOpcode() == ISD::AND) { |
| if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) |
| return DAG.getNode(ISD::SHL, DL, VT, N0, NewOp1); |
| } |
| |
| // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2)) |
| if (N0.getOpcode() == ISD::SHL) { |
| auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS, |
| ConstantSDNode *RHS) { |
| APInt c1 = LHS->getAPIntValue(); |
| APInt c2 = RHS->getAPIntValue(); |
| zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); |
| return (c1 + c2).uge(OpSizeInBits); |
| }; |
| if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange)) |
| return DAG.getConstant(0, DL, VT); |
| |
| auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS, |
| ConstantSDNode *RHS) { |
| APInt c1 = LHS->getAPIntValue(); |
| APInt c2 = RHS->getAPIntValue(); |
| zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); |
| return (c1 + c2).ult(OpSizeInBits); |
| }; |
| if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) { |
| SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1)); |
| return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), Sum); |
| } |
| } |
| |
| // fold (shl (ext (shl x, c1)), c2) -> (shl (ext x), (add c1, c2)) |
| // For this to be valid, the second form must not preserve any of the bits |
| // that are shifted out by the inner shift in the first form. This means |
| // the outer shift size must be >= the number of bits added by the ext. |
| // As a corollary, we don't care what kind of ext it is. |
| if ((N0.getOpcode() == ISD::ZERO_EXTEND || |
| N0.getOpcode() == ISD::ANY_EXTEND || |
| N0.getOpcode() == ISD::SIGN_EXTEND) && |
| N0.getOperand(0).getOpcode() == ISD::SHL) { |
| SDValue N0Op0 = N0.getOperand(0); |
| SDValue InnerShiftAmt = N0Op0.getOperand(1); |
| EVT InnerVT = N0Op0.getValueType(); |
| uint64_t InnerBitwidth = InnerVT.getScalarSizeInBits(); |
| |
| auto MatchOutOfRange = [OpSizeInBits, InnerBitwidth](ConstantSDNode *LHS, |
| ConstantSDNode *RHS) { |
| APInt c1 = LHS->getAPIntValue(); |
| APInt c2 = RHS->getAPIntValue(); |
| zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); |
| return c2.uge(OpSizeInBits - InnerBitwidth) && |
| (c1 + c2).uge(OpSizeInBits); |
| }; |
| if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchOutOfRange, |
| /*AllowUndefs*/ false, |
| /*AllowTypeMismatch*/ true)) |
| return DAG.getConstant(0, DL, VT); |
| |
| auto MatchInRange = [OpSizeInBits, InnerBitwidth](ConstantSDNode *LHS, |
| ConstantSDNode *RHS) { |
| APInt c1 = LHS->getAPIntValue(); |
| APInt c2 = RHS->getAPIntValue(); |
| zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); |
| return c2.uge(OpSizeInBits - InnerBitwidth) && |
| (c1 + c2).ult(OpSizeInBits); |
| }; |
| if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchInRange, |
| /*AllowUndefs*/ false, |
| /*AllowTypeMismatch*/ true)) { |
| SDValue Ext = DAG.getNode(N0.getOpcode(), DL, VT, N0Op0.getOperand(0)); |
| SDValue Sum = DAG.getZExtOrTrunc(InnerShiftAmt, DL, ShiftVT); |
| Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, Sum, N1); |
| return DAG.getNode(ISD::SHL, DL, VT, Ext, Sum); |
| } |
| } |
| |
| // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C)) |
| // Only fold this if the inner zext has no other uses to avoid increasing |
| // the total number of instructions. |
| if (N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() && |
| N0.getOperand(0).getOpcode() == ISD::SRL) { |
| SDValue N0Op0 = N0.getOperand(0); |
| SDValue InnerShiftAmt = N0Op0.getOperand(1); |
| |
| auto MatchEqual = [VT](ConstantSDNode *LHS, ConstantSDNode *RHS) { |
| APInt c1 = LHS->getAPIntValue(); |
| APInt c2 = RHS->getAPIntValue(); |
| zeroExtendToMatch(c1, c2); |
| return c1.ult(VT.getScalarSizeInBits()) && (c1 == c2); |
| }; |
| if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchEqual, |
| /*AllowUndefs*/ false, |
| /*AllowTypeMismatch*/ true)) { |
| EVT InnerShiftAmtVT = N0Op0.getOperand(1).getValueType(); |
| SDValue NewSHL = DAG.getZExtOrTrunc(N1, DL, InnerShiftAmtVT); |
| NewSHL = DAG.getNode(ISD::SHL, DL, N0Op0.getValueType(), N0Op0, NewSHL); |
| AddToWorklist(NewSHL.getNode()); |
| return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL); |
| } |
| } |
| |
| if (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) { |
| auto MatchShiftAmount = [OpSizeInBits](ConstantSDNode *LHS, |
| ConstantSDNode *RHS) { |
| const APInt &LHSC = LHS->getAPIntValue(); |
| const APInt &RHSC = RHS->getAPIntValue(); |
| return LHSC.ult(OpSizeInBits) && RHSC.ult(OpSizeInBits) && |
| LHSC.getZExtValue() <= RHSC.getZExtValue(); |
| }; |
| |
| // fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2 |
| // fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 >= C2 |
| if (N0->getFlags().hasExact()) { |
| if (ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchShiftAmount, |
| /*AllowUndefs*/ false, |
| /*AllowTypeMismatch*/ true)) { |
| SDValue N01 = DAG.getZExtOrTrunc(N0.getOperand(1), DL, ShiftVT); |
| SDValue Diff = DAG.getNode(ISD::SUB, DL, ShiftVT, N1, N01); |
| return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), Diff); |
| } |
| if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchShiftAmount, |
| /*AllowUndefs*/ false, |
| /*AllowTypeMismatch*/ true)) { |
| SDValue N01 = DAG.getZExtOrTrunc(N0.getOperand(1), DL, ShiftVT); |
| SDValue Diff = DAG.getNode(ISD::SUB, DL, ShiftVT, N01, N1); |
| return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0), Diff); |
| } |
| } |
| |
| // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or |
| // (and (srl x, (sub c1, c2), MASK) |
| // Only fold this if the inner shift has no other uses -- if it does, |
| // folding this will increase the total number of instructions. |
| if (N0.getOpcode() == ISD::SRL && |
| (N0.getOperand(1) == N1 || N0.hasOneUse()) && |
| TLI.shouldFoldConstantShiftPairToMask(N, Level)) { |
| if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchShiftAmount, |
| /*AllowUndefs*/ false, |
| /*AllowTypeMismatch*/ true)) { |
| SDValue N01 = DAG.getZExtOrTrunc(N0.getOperand(1), DL, ShiftVT); |
| SDValue Diff = DAG.getNode(ISD::SUB, DL, ShiftVT, N01, N1); |
| SDValue Mask = DAG.getAllOnesConstant(DL, VT); |
| Mask = DAG.getNode(ISD::SHL, DL, VT, Mask, N01); |
| Mask = DAG.getNode(ISD::SRL, DL, VT, Mask, Diff); |
| SDValue Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), Diff); |
| return DAG.getNode(ISD::AND, DL, VT, Shift, Mask); |
| } |
| if (ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchShiftAmount, |
| /*AllowUndefs*/ false, |
| /*AllowTypeMismatch*/ true)) { |
| SDValue N01 = DAG.getZExtOrTrunc(N0.getOperand(1), DL, ShiftVT); |
| SDValue Diff = DAG.getNode(ISD::SUB, DL, ShiftVT, N1, N01); |
| SDValue Mask = DAG.getAllOnesConstant(DL, VT); |
| Mask = DAG.getNode(ISD::SHL, DL, VT, Mask, N1); |
| SDValue Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), Diff); |
| return DAG.getNode(ISD::AND, DL, VT, Shift, Mask); |
| } |
| } |
| } |
| |
| // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1)) |
| if (N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1) && |
| isConstantOrConstantVector(N1, /* No Opaques */ true)) { |
| SDValue AllBits = DAG.getAllOnesConstant(DL, VT); |
| SDValue HiBitsMask = DAG.getNode(ISD::SHL, DL, VT, AllBits, N1); |
| return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), HiBitsMask); |
| } |
| |
| // fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) |
| // fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) |
| // Variant of version done on multiply, except mul by a power of 2 is turned |
| // into a shift. |
| if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR) && |
| TLI.isDesirableToCommuteWithShift(N, Level)) { |
| SDValue N01 = N0.getOperand(1); |
| if (SDValue Shl1 = |
| DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N1), VT, {N01, N1})) { |
| SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1); |
| AddToWorklist(Shl0.getNode()); |
| SDNodeFlags Flags; |
| // Preserve the disjoint flag for Or. |
| if (N0.getOpcode() == ISD::OR && N0->getFlags().hasDisjoint()) |
| Flags |= SDNodeFlags::Disjoint; |
| return DAG.getNode(N0.getOpcode(), DL, VT, Shl0, Shl1, Flags); |
| } |
| } |
| |
| // fold (shl (sext (add_nsw x, c1)), c2) -> (add (shl (sext x), c2), c1 << c2) |
| // TODO: Add zext/add_nuw variant with suitable test coverage |
| // TODO: Should we limit this with isLegalAddImmediate? |
| if (N0.getOpcode() == ISD::SIGN_EXTEND && |
| N0.getOperand(0).getOpcode() == ISD::ADD && |
| N0.getOperand(0)->getFlags().hasNoSignedWrap() && |
| TLI.isDesirableToCommuteWithShift(N, Level)) { |
| SDValue Add = N0.getOperand(0); |
| SDLoc DL(N0); |
| if (SDValue ExtC = DAG.FoldConstantArithmetic(N0.getOpcode(), DL, VT, |
| {Add.getOperand(1)})) { |
| if (SDValue ShlC = |
| DAG.FoldConstantArithmetic(ISD::SHL, DL, VT, {ExtC, N1})) { |
| SDValue ExtX = DAG.getNode(N0.getOpcode(), DL, VT, Add.getOperand(0)); |
| SDValue ShlX = DAG.getNode(ISD::SHL, DL, VT, ExtX, N1); |
| return DAG.getNode(ISD::ADD, DL, VT, ShlX, ShlC); |
| } |
| } |
| } |
| |
| // fold (shl (mul x, c1), c2) -> (mul x, c1 << c2) |
| if (N0.getOpcode() == ISD::MUL && N0->hasOneUse()) { |
| SDValue N01 = N0.getOperand(1); |
| if (SDValue Shl = |
| DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N1), VT, {N01, N1})) |
| return DAG.getNode(ISD::MUL, DL, VT, N0.getOperand(0), Shl); |
| } |
| |
| ConstantSDNode *N1C = isConstOrConstSplat(N1); |
| if (N1C && !N1C->isOpaque()) |
| if (SDValue NewSHL = visitShiftByConstant(N)) |
| return NewSHL; |
| |
| // fold (shl X, cttz(Y)) -> (mul (Y & -Y), X) if cttz is unsupported on the |
| // target. |
| if (((N1.getOpcode() == ISD::CTTZ && |
| VT.getScalarSizeInBits() <= ShiftVT.getScalarSizeInBits()) || |
| N1.getOpcode() == ISD::CTTZ_ZERO_UNDEF) && |
| N1.hasOneUse() && !TLI.isOperationLegalOrCustom(ISD::CTTZ, ShiftVT) && |
| TLI.isOperationLegalOrCustom(ISD::MUL, VT)) { |
| SDValue Y = N1.getOperand(0); |
| SDLoc DL(N); |
| SDValue NegY = DAG.getNegative(Y, DL, ShiftVT); |
| SDValue And = |
| DAG.getZExtOrTrunc(DAG.getNode(ISD::AND, DL, ShiftVT, Y, NegY), DL, VT); |
| return DAG.getNode(ISD::MUL, DL, VT, And, N0); |
| } |
| |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| // Fold (shl (vscale * C0), C1) to (vscale * (C0 << C1)). |
| if (N0.getOpcode() == ISD::VSCALE && N1C) { |
| const APInt &C0 = N0.getConstantOperandAPInt(0); |
| const APInt &C1 = N1C->getAPIntValue(); |
| return DAG.getVScale(DL, VT, C0 << C1); |
| } |
| |
| // Fold (shl step_vector(C0), C1) to (step_vector(C0 << C1)). |
| APInt ShlVal; |
| if (N0.getOpcode() == ISD::STEP_VECTOR && |
| ISD::isConstantSplatVector(N1.getNode(), ShlVal)) { |
| const APInt &C0 = N0.getConstantOperandAPInt(0); |
| if (ShlVal.ult(C0.getBitWidth())) { |
| APInt NewStep = C0 << ShlVal; |
| return DAG.getStepVector(DL, VT, NewStep); |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| // Transform a right shift of a multiply into a multiply-high. |
| // Examples: |
| // (srl (mul (zext i32:$a to i64), (zext i32:$a to i64)), 32) -> (mulhu $a, $b) |
| // (sra (mul (sext i32:$a to i64), (sext i32:$a to i64)), 32) -> (mulhs $a, $b) |
| static SDValue combineShiftToMULH(SDNode *N, const SDLoc &DL, SelectionDAG &DAG, |
| const TargetLowering &TLI) { |
| assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && |
| "SRL or SRA node is required here!"); |
| |
| // Check the shift amount. Proceed with the transformation if the shift |
| // amount is constant. |
| ConstantSDNode *ShiftAmtSrc = isConstOrConstSplat(N->getOperand(1)); |
| if (!ShiftAmtSrc) |
| return SDValue(); |
| |
| // The operation feeding into the shift must be a multiply. |
| SDValue ShiftOperand = N->getOperand(0); |
| if (ShiftOperand.getOpcode() != ISD::MUL) |
| return SDValue(); |
| |
| // Both operands must be equivalent extend nodes. |
| SDValue LeftOp = ShiftOperand.getOperand(0); |
| SDValue RightOp = ShiftOperand.getOperand(1); |
| |
| bool IsSignExt = LeftOp.getOpcode() == ISD::SIGN_EXTEND; |
| bool IsZeroExt = LeftOp.getOpcode() == ISD::ZERO_EXTEND; |
| |
| if (!IsSignExt && !IsZeroExt) |
| return SDValue(); |
| |
| EVT NarrowVT = LeftOp.getOperand(0).getValueType(); |
| unsigned NarrowVTSize = NarrowVT.getScalarSizeInBits(); |
| |
| // return true if U may use the lower bits of its operands |
| auto UserOfLowerBits = [NarrowVTSize](SDNode *U) { |
| if (U->getOpcode() != ISD::SRL && U->getOpcode() != ISD::SRA) { |
| return true; |
| } |
| ConstantSDNode *UShiftAmtSrc = isConstOrConstSplat(U->getOperand(1)); |
| if (!UShiftAmtSrc) { |
| return true; |
| } |
| unsigned UShiftAmt = UShiftAmtSrc->getZExtValue(); |
| return UShiftAmt < NarrowVTSize; |
| }; |
| |
| // If the lower part of the MUL is also used and MUL_LOHI is supported |
| // do not introduce the MULH in favor of MUL_LOHI |
| unsigned MulLoHiOp = IsSignExt ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; |
| if (!ShiftOperand.hasOneUse() && |
| TLI.isOperationLegalOrCustom(MulLoHiOp, NarrowVT) && |
| llvm::any_of(ShiftOperand->users(), UserOfLowerBits)) { |
| return SDValue(); |
| } |
| |
| SDValue MulhRightOp; |
| if (ConstantSDNode *Constant = isConstOrConstSplat(RightOp)) { |
| unsigned ActiveBits = IsSignExt |
| ? Constant->getAPIntValue().getSignificantBits() |
| : Constant->getAPIntValue().getActiveBits(); |
| if (ActiveBits > NarrowVTSize) |
| return SDValue(); |
| MulhRightOp = DAG.getConstant( |
| Constant->getAPIntValue().trunc(NarrowVT.getScalarSizeInBits()), DL, |
| NarrowVT); |
| } else { |
| if (LeftOp.getOpcode() != RightOp.getOpcode()) |
| return SDValue(); |
| // Check that the two extend nodes are the same type. |
| if (NarrowVT != RightOp.getOperand(0).getValueType()) |
| return SDValue(); |
| MulhRightOp = RightOp.getOperand(0); |
| } |
| |
| EVT WideVT = LeftOp.getValueType(); |
| // Proceed with the transformation if the wide types match. |
| assert((WideVT == RightOp.getValueType()) && |
| "Cannot have a multiply node with two different operand types."); |
| |
| // Proceed with the transformation if the wide type is twice as large |
| // as the narrow type. |
| if (WideVT.getScalarSizeInBits() != 2 * NarrowVTSize) |
| return SDValue(); |
| |
| // Check the shift amount with the narrow type size. |
| // Proceed with the transformation if the shift amount is the width |
| // of the narrow type. |
| unsigned ShiftAmt = ShiftAmtSrc->getZExtValue(); |
| if (ShiftAmt != NarrowVTSize) |
| return SDValue(); |
| |
| // If the operation feeding into the MUL is a sign extend (sext), |
| // we use mulhs. Othewise, zero extends (zext) use mulhu. |
| unsigned MulhOpcode = IsSignExt ? ISD::MULHS : ISD::MULHU; |
| |
| // Combine to mulh if mulh is legal/custom for the narrow type on the target |
| // or if it is a vector type then we could transform to an acceptable type and |
| // rely on legalization to split/combine the result. |
| if (NarrowVT.isVector()) { |
| EVT TransformVT = TLI.getTypeToTransformTo(*DAG.getContext(), NarrowVT); |
| if (TransformVT.getVectorElementType() != NarrowVT.getVectorElementType() || |
| !TLI.isOperationLegalOrCustom(MulhOpcode, TransformVT)) |
| return SDValue(); |
| } else { |
| if (!TLI.isOperationLegalOrCustom(MulhOpcode, NarrowVT)) |
| return SDValue(); |
| } |
| |
| SDValue Result = |
| DAG.getNode(MulhOpcode, DL, NarrowVT, LeftOp.getOperand(0), MulhRightOp); |
| bool IsSigned = N->getOpcode() == ISD::SRA; |
| return DAG.getExtOrTrunc(IsSigned, Result, DL, WideVT); |
| } |
| |
| // fold (bswap (logic_op(bswap(x),y))) -> logic_op(x,bswap(y)) |
| // This helper function accept SDNode with opcode ISD::BSWAP and ISD::BITREVERSE |
| static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG) { |
| unsigned Opcode = N->getOpcode(); |
| if (Opcode != ISD::BSWAP && Opcode != ISD::BITREVERSE) |
| return SDValue(); |
| |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| SDValue X, Y; |
| |
| // If both operands are bswap/bitreverse, ignore the multiuse |
| if (sd_match(N0, m_OneUse(m_BitwiseLogic(m_UnaryOp(Opcode, m_Value(X)), |
| m_UnaryOp(Opcode, m_Value(Y)))))) |
| return DAG.getNode(N0.getOpcode(), DL, VT, X, Y); |
| |
| // Otherwise need to ensure logic_op and bswap/bitreverse(x) have one use. |
| if (sd_match(N0, m_OneUse(m_BitwiseLogic( |
| m_OneUse(m_UnaryOp(Opcode, m_Value(X))), m_Value(Y))))) { |
| SDValue NewBitReorder = DAG.getNode(Opcode, DL, VT, Y); |
| return DAG.getNode(N0.getOpcode(), DL, VT, X, NewBitReorder); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSRA(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| if (SDValue V = DAG.simplifyShift(N0, N1)) |
| return V; |
| |
| SDLoc DL(N); |
| EVT VT = N0.getValueType(); |
| unsigned OpSizeInBits = VT.getScalarSizeInBits(); |
| |
| // fold (sra c1, c2) -> (sra c1, c2) |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::SRA, DL, VT, {N0, N1})) |
| return C; |
| |
| // Arithmetic shifting an all-sign-bit value is a no-op. |
| // fold (sra 0, x) -> 0 |
| // fold (sra -1, x) -> -1 |
| if (DAG.ComputeNumSignBits(N0) == OpSizeInBits) |
| return N0; |
| |
| // fold vector ops |
| if (VT.isVector()) |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| ConstantSDNode *N1C = isConstOrConstSplat(N1); |
| |
| // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) |
| // clamp (add c1, c2) to max shift. |
| if (N0.getOpcode() == ISD::SRA) { |
| EVT ShiftVT = N1.getValueType(); |
| EVT ShiftSVT = ShiftVT.getScalarType(); |
| SmallVector<SDValue, 16> ShiftValues; |
| |
| auto SumOfShifts = [&](ConstantSDNode *LHS, ConstantSDNode *RHS) { |
| APInt c1 = LHS->getAPIntValue(); |
| APInt c2 = RHS->getAPIntValue(); |
| zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); |
| APInt Sum = c1 + c2; |
| unsigned ShiftSum = |
| Sum.uge(OpSizeInBits) ? (OpSizeInBits - 1) : Sum.getZExtValue(); |
| ShiftValues.push_back(DAG.getConstant(ShiftSum, DL, ShiftSVT)); |
| return true; |
| }; |
| if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), SumOfShifts)) { |
| SDValue ShiftValue; |
| if (N1.getOpcode() == ISD::BUILD_VECTOR) |
| ShiftValue = DAG.getBuildVector(ShiftVT, DL, ShiftValues); |
| else if (N1.getOpcode() == ISD::SPLAT_VECTOR) { |
| assert(ShiftValues.size() == 1 && |
| "Expected matchBinaryPredicate to return one element for " |
| "SPLAT_VECTORs"); |
| ShiftValue = DAG.getSplatVector(ShiftVT, DL, ShiftValues[0]); |
| } else |
| ShiftValue = ShiftValues[0]; |
| return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0), ShiftValue); |
| } |
| } |
| |
| // fold (sra (shl X, m), (sub result_size, n)) |
| // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for |
| // result_size - n != m. |
| // If truncate is free for the target sext(shl) is likely to result in better |
| // code. |
| if (N0.getOpcode() == ISD::SHL && N1C) { |
| // Get the two constants of the shifts, CN0 = m, CN = n. |
| const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1)); |
| if (N01C) { |
| LLVMContext &Ctx = *DAG.getContext(); |
| // Determine what the truncate's result bitsize and type would be. |
| EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue()); |
| |
| if (VT.isVector()) |
| TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorElementCount()); |
| |
| // Determine the residual right-shift amount. |
| int ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue(); |
| |
| // If the shift is not a no-op (in which case this should be just a sign |
| // extend already), the truncated to type is legal, sign_extend is legal |
| // on that type, and the truncate to that type is both legal and free, |
| // perform the transform. |
| if ((ShiftAmt > 0) && |
| TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) && |
| TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) && |
| TLI.isTruncateFree(VT, TruncVT)) { |
| SDValue Amt = DAG.getShiftAmountConstant(ShiftAmt, VT, DL); |
| SDValue Shift = DAG.getNode(ISD::SRL, DL, VT, |
| N0.getOperand(0), Amt); |
| SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, |
| Shift); |
| return DAG.getNode(ISD::SIGN_EXTEND, DL, |
| N->getValueType(0), Trunc); |
| } |
| } |
| } |
| |
| // We convert trunc/ext to opposing shifts in IR, but casts may be cheaper. |
| // sra (add (shl X, N1C), AddC), N1C --> |
| // sext (add (trunc X to (width - N1C)), AddC') |
| // sra (sub AddC, (shl X, N1C)), N1C --> |
| // sext (sub AddC1',(trunc X to (width - N1C))) |
| if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB) && N1C && |
| N0.hasOneUse()) { |
| bool IsAdd = N0.getOpcode() == ISD::ADD; |
| SDValue Shl = N0.getOperand(IsAdd ? 0 : 1); |
| if (Shl.getOpcode() == ISD::SHL && Shl.getOperand(1) == N1 && |
| Shl.hasOneUse()) { |
| // TODO: AddC does not need to be a splat. |
| if (ConstantSDNode *AddC = |
| isConstOrConstSplat(N0.getOperand(IsAdd ? 1 : 0))) { |
| // Determine what the truncate's type would be and ask the target if |
| // that is a free operation. |
| LLVMContext &Ctx = *DAG.getContext(); |
| unsigned ShiftAmt = N1C->getZExtValue(); |
| EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - ShiftAmt); |
| if (VT.isVector()) |
| TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorElementCount()); |
| |
| // TODO: The simple type check probably belongs in the default hook |
| // implementation and/or target-specific overrides (because |
| // non-simple types likely require masking when legalized), but |
| // that restriction may conflict with other transforms. |
| if (TruncVT.isSimple() && isTypeLegal(TruncVT) && |
| TLI.isTruncateFree(VT, TruncVT)) { |
| SDValue Trunc = DAG.getZExtOrTrunc(Shl.getOperand(0), DL, TruncVT); |
| SDValue ShiftC = |
| DAG.getConstant(AddC->getAPIntValue().lshr(ShiftAmt).trunc( |
| TruncVT.getScalarSizeInBits()), |
| DL, TruncVT); |
| SDValue Add; |
| if (IsAdd) |
| Add = DAG.getNode(ISD::ADD, DL, TruncVT, Trunc, ShiftC); |
| else |
| Add = DAG.getNode(ISD::SUB, DL, TruncVT, ShiftC, Trunc); |
| return DAG.getSExtOrTrunc(Add, DL, VT); |
| } |
| } |
| } |
| } |
| |
| // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))). |
| if (N1.getOpcode() == ISD::TRUNCATE && |
| N1.getOperand(0).getOpcode() == ISD::AND) { |
| if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) |
| return DAG.getNode(ISD::SRA, DL, VT, N0, NewOp1); |
| } |
| |
| // fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2)) |
| // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2)) |
| // if c1 is equal to the number of bits the trunc removes |
| // TODO - support non-uniform vector shift amounts. |
| if (N0.getOpcode() == ISD::TRUNCATE && |
| (N0.getOperand(0).getOpcode() == ISD::SRL || |
| N0.getOperand(0).getOpcode() == ISD::SRA) && |
| N0.getOperand(0).hasOneUse() && |
| N0.getOperand(0).getOperand(1).hasOneUse() && N1C) { |
| SDValue N0Op0 = N0.getOperand(0); |
| if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) { |
| EVT LargeVT = N0Op0.getValueType(); |
| unsigned TruncBits = LargeVT.getScalarSizeInBits() - OpSizeInBits; |
| if (LargeShift->getAPIntValue() == TruncBits) { |
| EVT LargeShiftVT = getShiftAmountTy(LargeVT); |
| SDValue Amt = DAG.getZExtOrTrunc(N1, DL, LargeShiftVT); |
| Amt = DAG.getNode(ISD::ADD, DL, LargeShiftVT, Amt, |
| DAG.getConstant(TruncBits, DL, LargeShiftVT)); |
| SDValue SRA = |
| DAG.getNode(ISD::SRA, DL, LargeVT, N0Op0.getOperand(0), Amt); |
| return DAG.getNode(ISD::TRUNCATE, DL, VT, SRA); |
| } |
| } |
| } |
| |
| // Simplify, based on bits shifted out of the LHS. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| // If the sign bit is known to be zero, switch this to a SRL. |
| if (DAG.SignBitIsZero(N0)) |
| return DAG.getNode(ISD::SRL, DL, VT, N0, N1); |
| |
| if (N1C && !N1C->isOpaque()) |
| if (SDValue NewSRA = visitShiftByConstant(N)) |
| return NewSRA; |
| |
| // Try to transform this shift into a multiply-high if |
| // it matches the appropriate pattern detected in combineShiftToMULH. |
| if (SDValue MULH = combineShiftToMULH(N, DL, DAG, TLI)) |
| return MULH; |
| |
| // Attempt to convert a sra of a load into a narrower sign-extending load. |
| if (SDValue NarrowLoad = reduceLoadWidth(N)) |
| return NarrowLoad; |
| |
| if (SDValue AVG = foldShiftToAvg(N)) |
| return AVG; |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSRL(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| if (SDValue V = DAG.simplifyShift(N0, N1)) |
| return V; |
| |
| SDLoc DL(N); |
| EVT VT = N0.getValueType(); |
| EVT ShiftVT = N1.getValueType(); |
| unsigned OpSizeInBits = VT.getScalarSizeInBits(); |
| |
| // fold (srl c1, c2) -> c1 >>u c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::SRL, DL, VT, {N0, N1})) |
| return C; |
| |
| // fold vector ops |
| if (VT.isVector()) |
| if (SDValue FoldedVOp = SimplifyVBinOp(N, DL)) |
| return FoldedVOp; |
| |
| if (SDValue NewSel = foldBinOpIntoSelect(N)) |
| return NewSel; |
| |
| // if (srl x, c) is known to be zero, return 0 |
| ConstantSDNode *N1C = isConstOrConstSplat(N1); |
| if (N1C && |
| DAG.MaskedValueIsZero(SDValue(N, 0), APInt::getAllOnes(OpSizeInBits))) |
| return DAG.getConstant(0, DL, VT); |
| |
| // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2)) |
| if (N0.getOpcode() == ISD::SRL) { |
| auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS, |
| ConstantSDNode *RHS) { |
| APInt c1 = LHS->getAPIntValue(); |
| APInt c2 = RHS->getAPIntValue(); |
| zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); |
| return (c1 + c2).uge(OpSizeInBits); |
| }; |
| if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange)) |
| return DAG.getConstant(0, DL, VT); |
| |
| auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS, |
| ConstantSDNode *RHS) { |
| APInt c1 = LHS->getAPIntValue(); |
| APInt c2 = RHS->getAPIntValue(); |
| zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); |
| return (c1 + c2).ult(OpSizeInBits); |
| }; |
| if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) { |
| SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1)); |
| return DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), Sum); |
| } |
| } |
| |
| if (N1C && N0.getOpcode() == ISD::TRUNCATE && |
| N0.getOperand(0).getOpcode() == ISD::SRL) { |
| SDValue InnerShift = N0.getOperand(0); |
| // TODO - support non-uniform vector shift amounts. |
| if (auto *N001C = isConstOrConstSplat(InnerShift.getOperand(1))) { |
| uint64_t c1 = N001C->getZExtValue(); |
| uint64_t c2 = N1C->getZExtValue(); |
| EVT InnerShiftVT = InnerShift.getValueType(); |
| EVT ShiftAmtVT = InnerShift.getOperand(1).getValueType(); |
| uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits(); |
| // srl (trunc (srl x, c1)), c2 --> 0 or (trunc (srl x, (add c1, c2))) |
| // This is only valid if the OpSizeInBits + c1 = size of inner shift. |
| if (c1 + OpSizeInBits == InnerShiftSize) { |
| if (c1 + c2 >= InnerShiftSize) |
| return DAG.getConstant(0, DL, VT); |
| SDValue NewShiftAmt = DAG.getConstant(c1 + c2, DL, ShiftAmtVT); |
| SDValue NewShift = DAG.getNode(ISD::SRL, DL, InnerShiftVT, |
| InnerShift.getOperand(0), NewShiftAmt); |
| return DAG.getNode(ISD::TRUNCATE, DL, VT, NewShift); |
| } |
| // In the more general case, we can clear the high bits after the shift: |
| // srl (trunc (srl x, c1)), c2 --> trunc (and (srl x, (c1+c2)), Mask) |
| if (N0.hasOneUse() && InnerShift.hasOneUse() && |
| c1 + c2 < InnerShiftSize) { |
| SDValue NewShiftAmt = DAG.getConstant(c1 + c2, DL, ShiftAmtVT); |
| SDValue NewShift = DAG.getNode(ISD::SRL, DL, InnerShiftVT, |
| InnerShift.getOperand(0), NewShiftAmt); |
| SDValue Mask = DAG.getConstant(APInt::getLowBitsSet(InnerShiftSize, |
| OpSizeInBits - c2), |
| DL, InnerShiftVT); |
| SDValue And = DAG.getNode(ISD::AND, DL, InnerShiftVT, NewShift, Mask); |
| return DAG.getNode(ISD::TRUNCATE, DL, VT, And); |
| } |
| } |
| } |
| |
| // fold (srl (shl x, c1), c2) -> (and (shl x, (sub c1, c2), MASK) or |
| // (and (srl x, (sub c2, c1), MASK) |
| if (N0.getOpcode() == ISD::SHL && |
| (N0.getOperand(1) == N1 || N0->hasOneUse()) && |
| TLI.shouldFoldConstantShiftPairToMask(N, Level)) { |
| auto MatchShiftAmount = [OpSizeInBits](ConstantSDNode *LHS, |
| ConstantSDNode *RHS) { |
| const APInt &LHSC = LHS->getAPIntValue(); |
| const APInt &RHSC = RHS->getAPIntValue(); |
| return LHSC.ult(OpSizeInBits) && RHSC.ult(OpSizeInBits) && |
| LHSC.getZExtValue() <= RHSC.getZExtValue(); |
| }; |
| if (ISD::matchBinaryPredicate(N1, N0.getOperand(1), MatchShiftAmount, |
| /*AllowUndefs*/ false, |
| /*AllowTypeMismatch*/ true)) { |
| SDValue N01 = DAG.getZExtOrTrunc(N0.getOperand(1), DL, ShiftVT); |
| SDValue Diff = DAG.getNode(ISD::SUB, DL, ShiftVT, N01, N1); |
| SDValue Mask = DAG.getAllOnesConstant(DL, VT); |
| Mask = DAG.getNode(ISD::SRL, DL, VT, Mask, N01); |
| Mask = DAG.getNode(ISD::SHL, DL, VT, Mask, Diff); |
| SDValue Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), Diff); |
| return DAG.getNode(ISD::AND, DL, VT, Shift, Mask); |
| } |
| if (ISD::matchBinaryPredicate(N0.getOperand(1), N1, MatchShiftAmount, |
| /*AllowUndefs*/ false, |
| /*AllowTypeMismatch*/ true)) { |
| SDValue N01 = DAG.getZExtOrTrunc(N0.getOperand(1), DL, ShiftVT); |
| SDValue Diff = DAG.getNode(ISD::SUB, DL, ShiftVT, N1, N01); |
| SDValue Mask = DAG.getAllOnesConstant(DL, VT); |
| Mask = DAG.getNode(ISD::SRL, DL, VT, Mask, N1); |
| SDValue Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), Diff); |
| return DAG.getNode(ISD::AND, DL, VT, Shift, Mask); |
| } |
| } |
| |
| // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask) |
| // TODO - support non-uniform vector shift amounts. |
| if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { |
| // Shifting in all undef bits? |
| EVT SmallVT = N0.getOperand(0).getValueType(); |
| unsigned BitSize = SmallVT.getScalarSizeInBits(); |
| if (N1C->getAPIntValue().uge(BitSize)) |
| return DAG.getUNDEF(VT); |
| |
| if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) { |
| uint64_t ShiftAmt = N1C->getZExtValue(); |
| SDLoc DL0(N0); |
| SDValue SmallShift = |
| DAG.getNode(ISD::SRL, DL0, SmallVT, N0.getOperand(0), |
| DAG.getShiftAmountConstant(ShiftAmt, SmallVT, DL0)); |
| AddToWorklist(SmallShift.getNode()); |
| APInt Mask = APInt::getLowBitsSet(OpSizeInBits, OpSizeInBits - ShiftAmt); |
| return DAG.getNode(ISD::AND, DL, VT, |
| DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift), |
| DAG.getConstant(Mask, DL, VT)); |
| } |
| } |
| |
| // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign |
| // bit, which is unmodified by sra. |
| if (N1C && N1C->getAPIntValue() == (OpSizeInBits - 1)) { |
| if (N0.getOpcode() == ISD::SRA) |
| return DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1); |
| } |
| |
| // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit), and x has a power |
| // of two bitwidth. The "5" represents (log2 (bitwidth x)). |
| if (N1C && N0.getOpcode() == ISD::CTLZ && |
| isPowerOf2_32(OpSizeInBits) && |
| N1C->getAPIntValue() == Log2_32(OpSizeInBits)) { |
| KnownBits Known = DAG.computeKnownBits(N0.getOperand(0)); |
| |
| // If any of the input bits are KnownOne, then the input couldn't be all |
| // zeros, thus the result of the srl will always be zero. |
| if (Known.One.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT); |
| |
| // If all of the bits input the to ctlz node are known to be zero, then |
| // the result of the ctlz is "32" and the result of the shift is one. |
| APInt UnknownBits = ~Known.Zero; |
| if (UnknownBits == 0) return DAG.getConstant(1, SDLoc(N0), VT); |
| |
| // Otherwise, check to see if there is exactly one bit input to the ctlz. |
| if (UnknownBits.isPowerOf2()) { |
| // Okay, we know that only that the single bit specified by UnknownBits |
| // could be set on input to the CTLZ node. If this bit is set, the SRL |
| // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair |
| // to an SRL/XOR pair, which is likely to simplify more. |
| unsigned ShAmt = UnknownBits.countr_zero(); |
| SDValue Op = N0.getOperand(0); |
| |
| if (ShAmt) { |
| SDLoc DL(N0); |
| Op = DAG.getNode(ISD::SRL, DL, VT, Op, |
| DAG.getShiftAmountConstant(ShAmt, VT, DL)); |
| AddToWorklist(Op.getNode()); |
| } |
| return DAG.getNode(ISD::XOR, DL, VT, Op, DAG.getConstant(1, DL, VT)); |
| } |
| } |
| |
| // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))). |
| if (N1.getOpcode() == ISD::TRUNCATE && |
| N1.getOperand(0).getOpcode() == ISD::AND) { |
| if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) |
| return DAG.getNode(ISD::SRL, DL, VT, N0, NewOp1); |
| } |
| |
| // fold (srl (logic_op x, (shl (zext y), c1)), c1) |
| // -> (logic_op (srl x, c1), (zext y)) |
| // c1 <= leadingzeros(zext(y)) |
| SDValue X, ZExtY; |
| if (N1C && sd_match(N0, m_OneUse(m_BitwiseLogic( |
| m_Value(X), |
| m_OneUse(m_Shl(m_AllOf(m_Value(ZExtY), |
| m_Opc(ISD::ZERO_EXTEND)), |
| m_Specific(N1))))))) { |
| unsigned NumLeadingZeros = ZExtY.getScalarValueSizeInBits() - |
| ZExtY.getOperand(0).getScalarValueSizeInBits(); |
| if (N1C->getZExtValue() <= NumLeadingZeros) |
| return DAG.getNode(N0.getOpcode(), SDLoc(N0), VT, |
| DAG.getNode(ISD::SRL, SDLoc(N0), VT, X, N1), ZExtY); |
| } |
| |
| // fold operands of srl based on knowledge that the low bits are not |
| // demanded. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| if (N1C && !N1C->isOpaque()) |
| if (SDValue NewSRL = visitShiftByConstant(N)) |
| return NewSRL; |
| |
| // Attempt to convert a srl of a load into a narrower zero-extending load. |
| if (SDValue NarrowLoad = reduceLoadWidth(N)) |
| return NarrowLoad; |
| |
| // Here is a common situation. We want to optimize: |
| // |
| // %a = ... |
| // %b = and i32 %a, 2 |
| // %c = srl i32 %b, 1 |
| // brcond i32 %c ... |
| // |
| // into |
| // |
| // %a = ... |
| // %b = and %a, 2 |
| // %c = setcc eq %b, 0 |
| // brcond %c ... |
| // |
| // However when after the source operand of SRL is optimized into AND, the SRL |
| // itself may not be optimized further. Look for it and add the BRCOND into |
| // the worklist. |
| // |
| // The also tends to happen for binary operations when SimplifyDemandedBits |
| // is involved. |
| // |
| // FIXME: This is unecessary if we process the DAG in topological order, |
| // which we plan to do. This workaround can be removed once the DAG is |
| // processed in topological order. |
| if (N->hasOneUse()) { |
| SDNode *User = *N->user_begin(); |
| |
| // Look pass the truncate. |
| if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) |
| User = *User->user_begin(); |
| |
| if (User->getOpcode() == ISD::BRCOND || User->getOpcode() == ISD::AND || |
| User->getOpcode() == ISD::OR || User->getOpcode() == ISD::XOR) |
| AddToWorklist(User); |
| } |
| |
| // Try to transform this shift into a multiply-high if |
| // it matches the appropriate pattern detected in combineShiftToMULH. |
| if (SDValue MULH = combineShiftToMULH(N, DL, DAG, TLI)) |
| return MULH; |
| |
| if (SDValue AVG = foldShiftToAvg(N)) |
| return AVG; |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitFunnelShift(SDNode *N) { |
| EVT VT = N->getValueType(0); |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue N2 = N->getOperand(2); |
| bool IsFSHL = N->getOpcode() == ISD::FSHL; |
| unsigned BitWidth = VT.getScalarSizeInBits(); |
| SDLoc DL(N); |
| |
| // fold (fshl N0, N1, 0) -> N0 |
| // fold (fshr N0, N1, 0) -> N1 |
| if (isPowerOf2_32(BitWidth)) |
| if (DAG.MaskedValueIsZero( |
| N2, APInt(N2.getScalarValueSizeInBits(), BitWidth - 1))) |
| return IsFSHL ? N0 : N1; |
| |
| auto IsUndefOrZero = [](SDValue V) { |
| return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true); |
| }; |
| |
| // TODO - support non-uniform vector shift amounts. |
| if (ConstantSDNode *Cst = isConstOrConstSplat(N2)) { |
| EVT ShAmtTy = N2.getValueType(); |
| |
| // fold (fsh* N0, N1, c) -> (fsh* N0, N1, c % BitWidth) |
| if (Cst->getAPIntValue().uge(BitWidth)) { |
| uint64_t RotAmt = Cst->getAPIntValue().urem(BitWidth); |
| return DAG.getNode(N->getOpcode(), DL, VT, N0, N1, |
| DAG.getConstant(RotAmt, DL, ShAmtTy)); |
| } |
| |
| unsigned ShAmt = Cst->getZExtValue(); |
| if (ShAmt == 0) |
| return IsFSHL ? N0 : N1; |
| |
| // fold fshl(undef_or_zero, N1, C) -> lshr(N1, BW-C) |
| // fold fshr(undef_or_zero, N1, C) -> lshr(N1, C) |
| // fold fshl(N0, undef_or_zero, C) -> shl(N0, C) |
| // fold fshr(N0, undef_or_zero, C) -> shl(N0, BW-C) |
| if (IsUndefOrZero(N0)) |
| return DAG.getNode( |
| ISD::SRL, DL, VT, N1, |
| DAG.getConstant(IsFSHL ? BitWidth - ShAmt : ShAmt, DL, ShAmtTy)); |
| if (IsUndefOrZero(N1)) |
| return DAG.getNode( |
| ISD::SHL, DL, VT, N0, |
| DAG.getConstant(IsFSHL ? ShAmt : BitWidth - ShAmt, DL, ShAmtTy)); |
| |
| // fold (fshl ld1, ld0, c) -> (ld0[ofs]) iff ld0 and ld1 are consecutive. |
| // fold (fshr ld1, ld0, c) -> (ld0[ofs]) iff ld0 and ld1 are consecutive. |
| // TODO - bigendian support once we have test coverage. |
| // TODO - can we merge this with CombineConseutiveLoads/MatchLoadCombine? |
| // TODO - permit LHS EXTLOAD if extensions are shifted out. |
| if ((BitWidth % 8) == 0 && (ShAmt % 8) == 0 && !VT.isVector() && |
| !DAG.getDataLayout().isBigEndian()) { |
| auto *LHS = dyn_cast<LoadSDNode>(N0); |
| auto *RHS = dyn_cast<LoadSDNode>(N1); |
| if (LHS && RHS && LHS->isSimple() && RHS->isSimple() && |
| LHS->getAddressSpace() == RHS->getAddressSpace() && |
| (LHS->hasNUsesOfValue(1, 0) || RHS->hasNUsesOfValue(1, 0)) && |
| ISD::isNON_EXTLoad(RHS) && ISD::isNON_EXTLoad(LHS)) { |
| if (DAG.areNonVolatileConsecutiveLoads(LHS, RHS, BitWidth / 8, 1)) { |
| SDLoc DL(RHS); |
| uint64_t PtrOff = |
| IsFSHL ? (((BitWidth - ShAmt) % BitWidth) / 8) : (ShAmt / 8); |
| Align NewAlign = commonAlignment(RHS->getAlign(), PtrOff); |
| unsigned Fast = 0; |
| if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, |
| RHS->getAddressSpace(), NewAlign, |
| RHS->getMemOperand()->getFlags(), &Fast) && |
| Fast) { |
| SDValue NewPtr = DAG.getMemBasePlusOffset( |
| RHS->getBasePtr(), TypeSize::getFixed(PtrOff), DL); |
| AddToWorklist(NewPtr.getNode()); |
| SDValue Load = DAG.getLoad( |
| VT, DL, RHS->getChain(), NewPtr, |
| RHS->getPointerInfo().getWithOffset(PtrOff), NewAlign, |
| RHS->getMemOperand()->getFlags(), RHS->getAAInfo()); |
| DAG.makeEquivalentMemoryOrdering(LHS, Load.getValue(1)); |
| DAG.makeEquivalentMemoryOrdering(RHS, Load.getValue(1)); |
| return Load; |
| } |
| } |
| } |
| } |
| } |
| |
| // fold fshr(undef_or_zero, N1, N2) -> lshr(N1, N2) |
| // fold fshl(N0, undef_or_zero, N2) -> shl(N0, N2) |
| // iff We know the shift amount is in range. |
| // TODO: when is it worth doing SUB(BW, N2) as well? |
| if (isPowerOf2_32(BitWidth)) { |
| APInt ModuloBits(N2.getScalarValueSizeInBits(), BitWidth - 1); |
| if (IsUndefOrZero(N0) && !IsFSHL && DAG.MaskedValueIsZero(N2, ~ModuloBits)) |
| return DAG.getNode(ISD::SRL, DL, VT, N1, N2); |
| if (IsUndefOrZero(N1) && IsFSHL && DAG.MaskedValueIsZero(N2, ~ModuloBits)) |
| return DAG.getNode(ISD::SHL, DL, VT, N0, N2); |
| } |
| |
| // fold (fshl N0, N0, N2) -> (rotl N0, N2) |
| // fold (fshr N0, N0, N2) -> (rotr N0, N2) |
| // TODO: Investigate flipping this rotate if only one is legal. |
| // If funnel shift is legal as well we might be better off avoiding |
| // non-constant (BW - N2). |
| unsigned RotOpc = IsFSHL ? ISD::ROTL : ISD::ROTR; |
| if (N0 == N1 && hasOperation(RotOpc, VT)) |
| return DAG.getNode(RotOpc, DL, VT, N0, N2); |
| |
| // Simplify, based on bits shifted out of N0/N1. |
| if (SimplifyDemandedBits(SDValue(N, 0))) |
| return SDValue(N, 0); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSHLSAT(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| if (SDValue V = DAG.simplifyShift(N0, N1)) |
| return V; |
| |
| SDLoc DL(N); |
| EVT VT = N0.getValueType(); |
| |
| // fold (*shlsat c1, c2) -> c1<<c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(N->getOpcode(), DL, VT, {N0, N1})) |
| return C; |
| |
| ConstantSDNode *N1C = isConstOrConstSplat(N1); |
| |
| if (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::SHL, VT)) { |
| // fold (sshlsat x, c) -> (shl x, c) |
| if (N->getOpcode() == ISD::SSHLSAT && N1C && |
| N1C->getAPIntValue().ult(DAG.ComputeNumSignBits(N0))) |
| return DAG.getNode(ISD::SHL, DL, VT, N0, N1); |
| |
| // fold (ushlsat x, c) -> (shl x, c) |
| if (N->getOpcode() == ISD::USHLSAT && N1C && |
| N1C->getAPIntValue().ule( |
| DAG.computeKnownBits(N0).countMinLeadingZeros())) |
| return DAG.getNode(ISD::SHL, DL, VT, N0, N1); |
| } |
| |
| return SDValue(); |
| } |
| |
| // Given a ABS node, detect the following patterns: |
| // (ABS (SUB (EXTEND a), (EXTEND b))). |
| // (TRUNC (ABS (SUB (EXTEND a), (EXTEND b)))). |
| // Generates UABD/SABD instruction. |
| SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) { |
| EVT SrcVT = N->getValueType(0); |
| |
| if (N->getOpcode() == ISD::TRUNCATE) |
| N = N->getOperand(0).getNode(); |
| |
| EVT VT = N->getValueType(0); |
| SDValue Op0, Op1; |
| |
| if (!sd_match(N, m_Abs(m_Sub(m_Value(Op0), m_Value(Op1))))) |
| return SDValue(); |
| |
| SDValue AbsOp0 = N->getOperand(0); |
| unsigned Opc0 = Op0.getOpcode(); |
| |
| // Check if the operands of the sub are (zero|sign)-extended. |
| // TODO: Should we use ValueTracking instead? |
| if (Opc0 != Op1.getOpcode() || |
| (Opc0 != ISD::ZERO_EXTEND && Opc0 != ISD::SIGN_EXTEND && |
| Opc0 != ISD::SIGN_EXTEND_INREG)) { |
| // fold (abs (sub nsw x, y)) -> abds(x, y) |
| // Don't fold this for unsupported types as we lose the NSW handling. |
| if (AbsOp0->getFlags().hasNoSignedWrap() && hasOperation(ISD::ABDS, VT) && |
| TLI.preferABDSToABSWithNSW(VT)) { |
| SDValue ABD = DAG.getNode(ISD::ABDS, DL, VT, Op0, Op1); |
| return DAG.getZExtOrTrunc(ABD, DL, SrcVT); |
| } |
| return SDValue(); |
| } |
| |
| EVT VT0, VT1; |
| if (Opc0 == ISD::SIGN_EXTEND_INREG) { |
| VT0 = cast<VTSDNode>(Op0.getOperand(1))->getVT(); |
| VT1 = cast<VTSDNode>(Op1.getOperand(1))->getVT(); |
| } else { |
| VT0 = Op0.getOperand(0).getValueType(); |
| VT1 = Op1.getOperand(0).getValueType(); |
| } |
| unsigned ABDOpcode = (Opc0 == ISD::ZERO_EXTEND) ? ISD::ABDU : ISD::ABDS; |
| |
| // fold abs(sext(x) - sext(y)) -> zext(abds(x, y)) |
| // fold abs(zext(x) - zext(y)) -> zext(abdu(x, y)) |
| EVT MaxVT = VT0.bitsGT(VT1) ? VT0 : VT1; |
| if ((VT0 == MaxVT || Op0->hasOneUse()) && |
| (VT1 == MaxVT || Op1->hasOneUse()) && |
| (!LegalTypes || hasOperation(ABDOpcode, MaxVT))) { |
| SDValue ABD = DAG.getNode(ABDOpcode, DL, MaxVT, |
| DAG.getNode(ISD::TRUNCATE, DL, MaxVT, Op0), |
| DAG.getNode(ISD::TRUNCATE, DL, MaxVT, Op1)); |
| ABD = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, ABD); |
| return DAG.getZExtOrTrunc(ABD, DL, SrcVT); |
| } |
| |
| // fold abs(sext(x) - sext(y)) -> abds(sext(x), sext(y)) |
| // fold abs(zext(x) - zext(y)) -> abdu(zext(x), zext(y)) |
| if (!LegalOperations || hasOperation(ABDOpcode, VT)) { |
| SDValue ABD = DAG.getNode(ABDOpcode, DL, VT, Op0, Op1); |
| return DAG.getZExtOrTrunc(ABD, DL, SrcVT); |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitABS(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // fold (abs c1) -> c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::ABS, DL, VT, {N0})) |
| return C; |
| // fold (abs (abs x)) -> (abs x) |
| if (N0.getOpcode() == ISD::ABS) |
| return N0; |
| // fold (abs x) -> x iff not-negative |
| if (DAG.SignBitIsZero(N0)) |
| return N0; |
| |
| if (SDValue ABD = foldABSToABD(N, DL)) |
| return ABD; |
| |
| // fold (abs (sign_extend_inreg x)) -> (zero_extend (abs (truncate x))) |
| // iff zero_extend/truncate are free. |
| if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG) { |
| EVT ExtVT = cast<VTSDNode>(N0.getOperand(1))->getVT(); |
| if (TLI.isTruncateFree(VT, ExtVT) && TLI.isZExtFree(ExtVT, VT) && |
| TLI.isTypeDesirableForOp(ISD::ABS, ExtVT) && |
| hasOperation(ISD::ABS, ExtVT)) { |
| return DAG.getNode( |
| ISD::ZERO_EXTEND, DL, VT, |
| DAG.getNode(ISD::ABS, DL, ExtVT, |
| DAG.getNode(ISD::TRUNCATE, DL, ExtVT, N0.getOperand(0)))); |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitBSWAP(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // fold (bswap c1) -> c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::BSWAP, DL, VT, {N0})) |
| return C; |
| // fold (bswap (bswap x)) -> x |
| if (N0.getOpcode() == ISD::BSWAP) |
| return N0.getOperand(0); |
| |
| // Canonicalize bswap(bitreverse(x)) -> bitreverse(bswap(x)). If bitreverse |
| // isn't supported, it will be expanded to bswap followed by a manual reversal |
| // of bits in each byte. By placing bswaps before bitreverse, we can remove |
| // the two bswaps if the bitreverse gets expanded. |
| if (N0.getOpcode() == ISD::BITREVERSE && N0.hasOneUse()) { |
| SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, N0.getOperand(0)); |
| return DAG.getNode(ISD::BITREVERSE, DL, VT, BSwap); |
| } |
| |
| // fold (bswap shl(x,c)) -> (zext(bswap(trunc(shl(x,sub(c,bw/2)))))) |
| // iff x >= bw/2 (i.e. lower half is known zero) |
| unsigned BW = VT.getScalarSizeInBits(); |
| if (BW >= 32 && N0.getOpcode() == ISD::SHL && N0.hasOneUse()) { |
| auto *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
| EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), BW / 2); |
| if (ShAmt && ShAmt->getAPIntValue().ult(BW) && |
| ShAmt->getZExtValue() >= (BW / 2) && |
| (ShAmt->getZExtValue() % 16) == 0 && TLI.isTypeLegal(HalfVT) && |
| TLI.isTruncateFree(VT, HalfVT) && |
| (!LegalOperations || hasOperation(ISD::BSWAP, HalfVT))) { |
| SDValue Res = N0.getOperand(0); |
| if (uint64_t NewShAmt = (ShAmt->getZExtValue() - (BW / 2))) |
| Res = DAG.getNode(ISD::SHL, DL, VT, Res, |
| DAG.getShiftAmountConstant(NewShAmt, VT, DL)); |
| Res = DAG.getZExtOrTrunc(Res, DL, HalfVT); |
| Res = DAG.getNode(ISD::BSWAP, DL, HalfVT, Res); |
| return DAG.getZExtOrTrunc(Res, DL, VT); |
| } |
| } |
| |
| // Try to canonicalize bswap-of-logical-shift-by-8-bit-multiple as |
| // inverse-shift-of-bswap: |
| // bswap (X u<< C) --> (bswap X) u>> C |
| // bswap (X u>> C) --> (bswap X) u<< C |
| if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) && |
| N0.hasOneUse()) { |
| auto *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1)); |
| if (ShAmt && ShAmt->getAPIntValue().ult(BW) && |
| ShAmt->getZExtValue() % 8 == 0) { |
| SDValue NewSwap = DAG.getNode(ISD::BSWAP, DL, VT, N0.getOperand(0)); |
| unsigned InverseShift = N0.getOpcode() == ISD::SHL ? ISD::SRL : ISD::SHL; |
| return DAG.getNode(InverseShift, DL, VT, NewSwap, N0.getOperand(1)); |
| } |
| } |
| |
| if (SDValue V = foldBitOrderCrossLogicOp(N, DAG)) |
| return V; |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitBITREVERSE(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // fold (bitreverse c1) -> c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::BITREVERSE, DL, VT, {N0})) |
| return C; |
| |
| // fold (bitreverse (bitreverse x)) -> x |
| if (N0.getOpcode() == ISD::BITREVERSE) |
| return N0.getOperand(0); |
| |
| SDValue X, Y; |
| |
| // fold (bitreverse (lshr (bitreverse x), y)) -> (shl x, y) |
| if ((!LegalOperations || TLI.isOperationLegal(ISD::SHL, VT)) && |
| sd_match(N, m_BitReverse(m_Srl(m_BitReverse(m_Value(X)), m_Value(Y))))) |
| return DAG.getNode(ISD::SHL, DL, VT, X, Y); |
| |
| // fold (bitreverse (shl (bitreverse x), y)) -> (lshr x, y) |
| if ((!LegalOperations || TLI.isOperationLegal(ISD::SRL, VT)) && |
| sd_match(N, m_BitReverse(m_Shl(m_BitReverse(m_Value(X)), m_Value(Y))))) |
| return DAG.getNode(ISD::SRL, DL, VT, X, Y); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitCTLZ(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // fold (ctlz c1) -> c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::CTLZ, DL, VT, {N0})) |
| return C; |
| |
| // If the value is known never to be zero, switch to the undef version. |
| if (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ_ZERO_UNDEF, VT)) |
| if (DAG.isKnownNeverZero(N0)) |
| return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, DL, VT, N0); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // fold (ctlz_zero_undef c1) -> c2 |
| if (SDValue C = |
| DAG.FoldConstantArithmetic(ISD::CTLZ_ZERO_UNDEF, DL, VT, {N0})) |
| return C; |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitCTTZ(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // fold (cttz c1) -> c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::CTTZ, DL, VT, {N0})) |
| return C; |
| |
| // If the value is known never to be zero, switch to the undef version. |
| if (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ_ZERO_UNDEF, VT)) |
| if (DAG.isKnownNeverZero(N0)) |
| return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, DL, VT, N0); |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N->getValueType(0); |
| SDLoc DL(N); |
| |
| // fold (cttz_zero_undef c1) -> c2 |
| if (SDValue C = |
| DAG.FoldConstantArithmetic(ISD::CTTZ_ZERO_UNDEF, DL, VT, {N0})) |
| return C; |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitCTPOP(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| EVT VT = N->getValueType(0); |
| unsigned NumBits = VT.getScalarSizeInBits(); |
| SDLoc DL(N); |
| |
| // fold (ctpop c1) -> c2 |
| if (SDValue C = DAG.FoldConstantArithmetic(ISD::CTPOP, DL, VT, {N0})) |
| return C; |
| |
| // If the source is being shifted, but doesn't affect any active bits, |
| // then we can call CTPOP on the shift source directly. |
| if (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SHL) { |
| if (ConstantSDNode *AmtC = isConstOrConstSplat(N0.getOperand(1))) { |
| const APInt &Amt = AmtC->getAPIntValue(); |
| if (Amt.ult(NumBits)) { |
| KnownBits KnownSrc = DAG.computeKnownBits(N0.getOperand(0)); |
| if ((N0.getOpcode() == ISD::SRL && |
| Amt.ule(KnownSrc.countMinTrailingZeros())) || |
| (N0.getOpcode() == ISD::SHL && |
| Amt.ule(KnownSrc.countMinLeadingZeros()))) { |
| return DAG.getNode(ISD::CTPOP, DL, VT, N0.getOperand(0)); |
| } |
| } |
| } |
| } |
| |
| // If the upper bits are known to be zero, then see if its profitable to |
| // only count the lower bits. |
| if (VT.isScalarInteger() && NumBits > 8 && (NumBits & 1) == 0) { |
| EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), NumBits / 2); |
| if (hasOperation(ISD::CTPOP, HalfVT) && |
| TLI.isTypeDesirableForOp(ISD::CTPOP, HalfVT) && |
| TLI.isTruncateFree(N0, HalfVT) && TLI.isZExtFree(HalfVT, VT)) { |
| APInt UpperBits = APInt::getHighBitsSet(NumBits, NumBits / 2); |
| if (DAG.MaskedValueIsZero(N0, UpperBits)) { |
| SDValue PopCnt = DAG.getNode(ISD::CTPOP, DL, HalfVT, |
| DAG.getZExtOrTrunc(N0, DL, HalfVT)); |
| return DAG.getZExtOrTrunc(PopCnt, DL, VT); |
| } |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| static bool isLegalToCombineMinNumMaxNum(SelectionDAG &DAG, SDValue LHS, |
| SDValue RHS, const SDNodeFlags Flags, |
| const TargetLowering &TLI) { |
| EVT VT = LHS.getValueType(); |
| if (!VT.isFloatingPoint()) |
| return false; |
| |
| const TargetOptions &Options = DAG.getTarget().Options; |
| |
| return (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) && |
| TLI.isProfitableToCombineMinNumMaxNum(VT) && |
| (Flags.hasNoNaNs() || |
| (DAG.isKnownNeverNaN(RHS) && DAG.isKnownNeverNaN(LHS))); |
| } |
| |
| static SDValue combineMinNumMaxNumImpl(const SDLoc &DL, EVT VT, SDValue LHS, |
| SDValue RHS, SDValue True, SDValue False, |
| ISD::CondCode CC, |
| const TargetLowering &TLI, |
| SelectionDAG &DAG) { |
| EVT TransformVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); |
| switch (CC) { |
| case ISD::SETOLT: |
| case ISD::SETOLE: |
| case ISD::SETLT: |
| case ISD::SETLE: |
| case ISD::SETULT: |
| case ISD::SETULE: { |
| // Since it's known never nan to get here already, either fminnum or |
| // fminnum_ieee are OK. Try the ieee version first, since it's fminnum is |
| // expanded in terms of it. |
| unsigned IEEEOpcode = (LHS == True) ? ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; |
| if (TLI.isOperationLegalOrCustom(IEEEOpcode, VT)) |
| return DAG.getNode(IEEEOpcode, DL, VT, LHS, RHS); |
| |
| unsigned Opcode = (LHS == True) ? ISD::FMINNUM : ISD::FMAXNUM; |
| if (TLI.isOperationLegalOrCustom(Opcode, TransformVT)) |
| return DAG.getNode(Opcode, DL, VT, LHS, RHS); |
| return SDValue(); |
| } |
| case ISD::SETOGT: |
| case ISD::SETOGE: |
| case ISD::SETGT: |
| case ISD::SETGE: |
| case ISD::SETUGT: |
| case ISD::SETUGE: { |
| unsigned IEEEOpcode = (LHS == True) ? ISD::FMAXNUM_IEEE : ISD::FMINNUM_IEEE; |
| if (TLI.isOperationLegalOrCustom(IEEEOpcode, VT)) |
| return DAG.getNode(IEEEOpcode, DL, VT, LHS, RHS); |
| |
| unsigned Opcode = (LHS == True) ? ISD::FMAXNUM : ISD::FMINNUM; |
| if (TLI.isOperationLegalOrCustom(Opcode, TransformVT)) |
| return DAG.getNode(Opcode, DL, VT, LHS, RHS); |
| return SDValue(); |
| } |
| default: |
| return SDValue(); |
| } |
| } |
| |
| SDValue DAGCombiner::foldShiftToAvg(SDNode *N) { |
| const unsigned Opcode = N->getOpcode(); |
| |
| // Convert (sr[al] (add n[su]w x, y)) -> (avgfloor[su] x, y) |
| if (Opcode != ISD::SRA && Opcode != ISD::SRL) |
| return SDValue(); |
| |
| unsigned FloorISD = 0; |
| auto VT = N->getValueType(0); |
| bool IsUnsigned = false; |
| |
| // Decide wether signed or unsigned. |
| switch (Opcode) { |
| case ISD::SRA: |
| if (!hasOperation(ISD::AVGFLOORS, VT)) |
| return SDValue(); |
| FloorISD = ISD::AVGFLOORS; |
| break; |
| case ISD::SRL: |
| IsUnsigned = true; |
| if (!hasOperation(ISD::AVGFLOORU, VT)) |
| return SDValue(); |
| FloorISD = ISD::AVGFLOORU; |
| break; |
| default: |
| return SDValue(); |
| } |
| |
| // Captured values. |
| SDValue A, B, Add; |
| |
| // Match floor average as it is common to both floor/ceil avgs. |
| if (!sd_match(N, m_BinOp(Opcode, |
| m_AllOf(m_Value(Add), m_Add(m_Value(A), m_Value(B))), |
| m_One()))) |
| return SDValue(); |
| |
| // Can't optimize adds that may wrap. |
| if (IsUnsigned && !Add->getFlags().hasNoUnsignedWrap()) |
| return SDValue(); |
| |
| if (!IsUnsigned && !Add->getFlags().hasNoSignedWrap()) |
| return SDValue(); |
| |
| return DAG.getNode(FloorISD, SDLoc(N), N->getValueType(0), {A, B}); |
| } |
| |
| SDValue DAGCombiner::foldBitwiseOpWithNeg(SDNode *N, const SDLoc &DL, EVT VT) { |
| unsigned Opc = N->getOpcode(); |
| SDValue X, Y, Z; |
| if (sd_match( |
| N, m_BitwiseLogic(m_Value(X), m_Add(m_Not(m_Value(Y)), m_Value(Z))))) |
| return DAG.getNode(Opc, DL, VT, X, |
| DAG.getNOT(DL, DAG.getNode(ISD::SUB, DL, VT, Y, Z), VT)); |
| |
| if (sd_match(N, m_BitwiseLogic(m_Value(X), m_Sub(m_OneUse(m_Not(m_Value(Y))), |
| m_Value(Z))))) |
| return DAG.getNode(Opc, DL, VT, X, |
| DAG.getNOT(DL, DAG.getNode(ISD::ADD, DL, VT, Y, Z), VT)); |
| |
| return SDValue(); |
| } |
| |
| /// Generate Min/Max node |
| SDValue DAGCombiner::combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS, |
| SDValue RHS, SDValue True, |
| SDValue False, ISD::CondCode CC) { |
| if ((LHS == True && RHS == False) || (LHS == False && RHS == True)) |
| return combineMinNumMaxNumImpl(DL, VT, LHS, RHS, True, False, CC, TLI, DAG); |
| |
| // If we can't directly match this, try to see if we can pull an fneg out of |
| // the select. |
| SDValue NegTrue = TLI.getCheaperOrNeutralNegatedExpression( |
| True, DAG, LegalOperations, ForCodeSize); |
| if (!NegTrue) |
| return SDValue(); |
| |
| HandleSDNode NegTrueHandle(NegTrue); |
| |
| // Try to unfold an fneg from the select if we are comparing the negated |
| // constant. |
| // |
| // select (setcc x, K) (fneg x), -K -> fneg(minnum(x, K)) |
| // |
| // TODO: Handle fabs |
| if (LHS == NegTrue) { |
| // If we can't directly match this, try to see if we can pull an fneg out of |
| // the select. |
| SDValue NegRHS = TLI.getCheaperOrNeutralNegatedExpression( |
| RHS, DAG, LegalOperations, ForCodeSize); |
| if (NegRHS) { |
| HandleSDNode NegRHSHandle(NegRHS); |
| if (NegRHS == False) { |
| SDValue Combined = combineMinNumMaxNumImpl(DL, VT, LHS, RHS, NegTrue, |
| False, CC, TLI, DAG); |
| if (Combined) |
| return DAG.getNode(ISD::FNEG, DL, VT, Combined); |
| } |
| } |
| } |
| |
| return SDValue(); |
| } |
| |
| /// If a (v)select has a condition value that is a sign-bit test, try to smear |
| /// the condition operand sign-bit across the value width and use it as a mask. |
| static SDValue foldSelectOfConstantsUsingSra(SDNode *N, const SDLoc &DL, |
| SelectionDAG &DAG) { |
| SDValue Cond = N->getOperand(0); |
| SDValue C1 = N->getOperand(1); |
| SDValue C2 = N->getOperand(2); |
| if (!isConstantOrConstantVector(C1) || !isConstantOrConstantVector(C2)) |
| return SDValue(); |
| |
| EVT VT = N->getValueType(0); |
| if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse() || |
| VT != Cond.getOperand(0).getValueType()) |
| return SDValue(); |
| |
| // The inverted-condition + commuted-select variants of these patterns are |
| // canonicalized to these forms in IR. |
| SDValue X = Cond.getOperand(0); |
| SDValue CondC = Cond.getOperand(1); |
| ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); |
| if (CC == ISD::SETGT && isAllOnesOrAllOnesSplat(CondC) && |
| isAllOnesOrAllOnesSplat(C2)) { |
| // i32 X > -1 ? C1 : -1 --> (X >>s 31) | C1 |
| SDValue ShAmtC = DAG.getConstant(X.getScalarValueSizeInBits() - 1, DL, VT); |
| SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShAmtC); |
| return DAG.getNode(ISD::OR, DL, VT, Sra, C1); |
| } |
| if (CC == ISD::SETLT && isNullOrNullSplat(CondC) && isNullOrNullSplat(C2)) { |
| // i8 X < 0 ? C1 : 0 --> (X >>s 7) & C1 |
| SDValue ShAmtC = DAG.getConstant(X.getScalarValueSizeInBits() - 1, DL, VT); |
| SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShAmtC); |
| return DAG.getNode(ISD::AND, DL, VT, Sra, C1); |
| } |
| return SDValue(); |
| } |
| |
| static bool shouldConvertSelectOfConstantsToMath(const SDValue &Cond, EVT VT, |
| const TargetLowering &TLI) { |
| if (!TLI.convertSelectOfConstantsToMath(VT)) |
| return false; |
| |
| if (Cond.getOpcode() != ISD::SETCC || !Cond->hasOneUse()) |
| return true; |
| if (!TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) |
| return true; |
| |
| ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); |
| if (CC == ISD::SETLT && isNullOrNullSplat(Cond.getOperand(1))) |
| return true; |
| if (CC == ISD::SETGT && isAllOnesOrAllOnesSplat(Cond.getOperand(1))) |
| return true; |
| |
| return false; |
| } |
| |
| SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) { |
| SDValue Cond = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue N2 = N->getOperand(2); |
| EVT VT = N->getValueType(0); |
| EVT CondVT = Cond.getValueType(); |
| SDLoc DL(N); |
| |
| if (!VT.isInteger()) |
| return SDValue(); |
| |
| auto *C1 = dyn_cast<ConstantSDNode>(N1); |
| auto *C2 = dyn_cast<ConstantSDNode>(N2); |
| if (!C1 || !C2) |
| return SDValue(); |
| |
| if (CondVT != MVT::i1 || LegalOperations) { |
| // fold (select Cond, 0, 1) -> (xor Cond, 1) |
| // We can't do this reliably if integer based booleans have different contents |
| // to floating point based booleans. This is because we can't tell whether we |
| // have an integer-based boolean or a floating-point-based boolean unless we |
| // can find the SETCC that produced it and inspect its operands. This is |
| // fairly easy if C is the SETCC node, but it can potentially be |
| // undiscoverable (or not reasonably discoverable). For example, it could be |
| // in another basic block or it could require searching a complicated |
| // expression. |
| if (CondVT.isInteger() && |
| TLI.getBooleanContents(/*isVec*/false, /*isFloat*/true) == |
| TargetLowering::ZeroOrOneBooleanContent && |
| TLI.getBooleanContents(/*isVec*/false, /*isFloat*/false) == |
| TargetLowering::ZeroOrOneBooleanContent && |
| C1->isZero() && C2->isOne()) { |
| SDValue NotCond = |
| DAG.getNode(ISD::XOR, DL, CondVT, Cond, DAG.getConstant(1, DL, CondVT)); |
| if (VT.bitsEq(CondVT)) |
| return NotCond; |
| return DAG.getZExtOrTrunc(NotCond, DL, VT); |
| } |
| |
| return SDValue(); |
| } |
| |
| // Only do this before legalization to avoid conflicting with target-specific |
| // transforms in the other direction (create a select from a zext/sext). There |
| // is also a target-independent combine here in DAGCombiner in the other |
| // direction for (select Cond, -1, 0) when the condition is not i1. |
| assert(CondVT == MVT::i1 && !LegalOperations); |
| |
| // select Cond, 1, 0 --> zext (Cond) |
| if (C1->isOne() && C2->isZero()) |
| return DAG.getZExtOrTrunc(Cond, DL, VT); |
| |
| // select Cond, -1, 0 --> sext (Cond) |
| if (C1->isAllOnes() && C2->isZero()) |
| return DAG.getSExtOrTrunc(Cond, DL, VT); |
| |
| // select Cond, 0, 1 --> zext (!Cond) |
| if (C1->isZero() && C2->isOne()) { |
| SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1); |
| NotCond = DAG.getZExtOrTrunc(NotCond, DL, VT); |
| return NotCond; |
| } |
| |
| // select Cond, 0, -1 --> sext (!Cond) |
| if (C1->isZero() && C2->isAllOnes()) { |
| SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1); |
| NotCond = DAG.getSExtOrTrunc(NotCond, DL, VT); |
| return NotCond; |
| } |
| |
| // Use a target hook because some targets may prefer to transform in the |
| // other direction. |
| if (!shouldConvertSelectOfConstantsToMath(Cond, VT, TLI)) |
| return SDValue(); |
| |
| // For any constants that differ by 1, we can transform the select into |
| // an extend and add. |
| const APInt &C1Val = C1->getAPIntValue(); |
| const APInt &C2Val = C2->getAPIntValue(); |
| |
| // select Cond, C1, C1-1 --> add (zext Cond), C1-1 |
| if (C1Val - 1 == C2Val) { |
| Cond = DAG.getZExtOrTrunc(Cond, DL, VT); |
| return DAG.getNode(ISD::ADD, DL, VT, Cond, N2); |
| } |
| |
| // select Cond, C1, C1+1 --> add (sext Cond), C1+1 |
| if (C1Val + 1 == C2Val) { |
| Cond = DAG.getSExtOrTrunc(Cond, DL, VT); |
| return DAG.getNode(ISD::ADD, DL, VT, Cond, N2); |
| } |
| |
| // select Cond, Pow2, 0 --> (zext Cond) << log2(Pow2) |
| if (C1Val.isPowerOf2() && C2Val.isZero()) { |
| Cond = DAG.getZExtOrTrunc(Cond, DL, VT); |
| SDValue ShAmtC = |
| DAG.getShiftAmountConstant(C1Val.exactLogBase2(), VT, DL); |
| return DAG.getNode(ISD::SHL, DL, VT, Cond, ShAmtC); |
| } |
| |
| // select Cond, -1, C --> or (sext Cond), C |
| if (C1->isAllOnes()) { |
| Cond = DAG.getSExtOrTrunc(Cond, DL, VT); |
| return DAG.getNode(ISD::OR, DL, VT, Cond, N2); |
| } |
| |
| // select Cond, C, -1 --> or (sext (not Cond)), C |
| if (C2->isAllOnes()) { |
| SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1); |
| NotCond = DAG.getSExtOrTrunc(NotCond, DL, VT); |
| return DAG.getNode(ISD::OR, DL, VT, NotCond, N1); |
| } |
| |
| if (SDValue V = foldSelectOfConstantsUsingSra(N, DL, DAG)) |
| return V; |
| |
| return SDValue(); |
| } |
| |
| template <class MatchContextClass> |
| static SDValue foldBoolSelectToLogic(SDNode *N, const SDLoc &DL, |
| SelectionDAG &DAG) { |
| assert((N->getOpcode() == ISD::SELECT || N->getOpcode() == ISD::VSELECT || |
| N->getOpcode() == ISD::VP_SELECT) && |
| "Expected a (v)(vp.)select"); |
| SDValue Cond = N->getOperand(0); |
| SDValue T = N->getOperand(1), F = N->getOperand(2); |
| EVT VT = N->getValueType(0); |
| const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| MatchContextClass matcher(DAG, TLI, N); |
| |
| if (VT != Cond.getValueType() || VT.getScalarSizeInBits() != 1) |
| return SDValue(); |
| |
| // select Cond, Cond, F --> or Cond, freeze(F) |
| // select Cond, 1, F --> or Cond, freeze(F) |
| if (Cond == T || isOneOrOneSplat(T, /* AllowUndefs */ true)) |
| return matcher.getNode(ISD::OR, DL, VT, Cond, DAG.getFreeze(F)); |
| |
| // select Cond, T, Cond --> and Cond, freeze(T) |
| // select Cond, T, 0 --> and Cond, freeze(T) |
| if (Cond == F || isNullOrNullSplat(F, /* AllowUndefs */ true)) |
| return matcher.getNode(ISD::AND, DL, VT, Cond, DAG.getFreeze(T)); |
| |
| // select Cond, T, 1 --> or (not Cond), freeze(T) |
| if (isOneOrOneSplat(F, /* AllowUndefs */ true)) { |
| SDValue NotCond = |
| matcher.getNode(ISD::XOR, DL, VT, Cond, DAG.getAllOnesConstant(DL, VT)); |
| return matcher.getNode(ISD::OR, DL, VT, NotCond, DAG.getFreeze(T)); |
| } |
| |
| // select Cond, 0, F --> and (not Cond), freeze(F) |
| if (isNullOrNullSplat(T, /* AllowUndefs */ true)) { |
| SDValue NotCond = |
| matcher.getNode(ISD::XOR, DL, VT, Cond, DAG.getAllOnesConstant(DL, VT)); |
| return matcher.getNode(ISD::AND, DL, VT, NotCond, DAG.getFreeze(F)); |
| } |
| |
| return SDValue(); |
| } |
| |
| static SDValue foldVSelectToSignBitSplatMask(SDNode *N, SelectionDAG &DAG) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue N2 = N->getOperand(2); |
| EVT VT = N->getValueType(0); |
| unsigned EltSizeInBits = VT.getScalarSizeInBits(); |
| |
| SDValue Cond0, Cond1; |
| ISD::CondCode CC; |
| if (!sd_match(N0, m_OneUse(m_SetCC(m_Value(Cond0), m_Value(Cond1), |
| m_CondCode(CC)))) || |
| VT != Cond0.getValueType()) |
| return SDValue(); |
| |
| // Match a signbit check of Cond0 as "Cond0 s<0". Swap select operands if the |
| // compare is inverted from that pattern ("Cond0 s> -1"). |
| if (CC == ISD::SETLT && isNullOrNullSplat(Cond1)) |
| ; // This is the pattern we are looking for. |
| else if (CC == ISD::SETGT && isAllOnesOrAllOnesSplat(Cond1)) |
| std::swap(N1, N2); |
| else |
| return SDValue(); |
| |
| // (Cond0 s< 0) ? N1 : 0 --> (Cond0 s>> BW-1) & freeze(N1) |
| if (isNullOrNullSplat(N2)) { |
| SDLoc DL(N); |
| SDValue ShiftAmt = DAG.getShiftAmountConstant(EltSizeInBits - 1, VT, DL); |
| SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Cond0, ShiftAmt); |
| return DAG.getNode(ISD::AND, DL, VT, Sra, DAG.getFreeze(N1)); |
| } |
| |
| // (Cond0 s< 0) ? -1 : N2 --> (Cond0 s>> BW-1) | freeze(N2) |
| if (isAllOnesOrAllOnesSplat(N1)) { |
| SDLoc DL(N); |
| SDValue ShiftAmt = DAG.getShiftAmountConstant(EltSizeInBits - 1, VT, DL); |
| SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Cond0, ShiftAmt); |
| return DAG.getNode(ISD::OR, DL, VT, Sra, DAG.getFreeze(N2)); |
| } |
| |
| // If we have to invert the sign bit mask, only do that transform if the |
| // target has a bitwise 'and not' instruction (the invert is free). |
| // (Cond0 s< -0) ? 0 : N2 --> ~(Cond0 s>> BW-1) & freeze(N2) |
| const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| if (isNullOrNullSplat(N1) && TLI.hasAndNot(N1)) { |
| SDLoc DL(N); |
| SDValue ShiftAmt = DAG.getShiftAmountConstant(EltSizeInBits - 1, VT, DL); |
| SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, Cond0, ShiftAmt); |
| SDValue Not = DAG.getNOT(DL, Sra, VT); |
| return DAG.getNode(ISD::AND, DL, VT, Not, DAG.getFreeze(N2)); |
| } |
| |
| // TODO: There's another pattern in this family, but it may require |
| // implementing hasOrNot() to check for profitability: |
| // (Cond0 s> -1) ? -1 : N2 --> ~(Cond0 s>> BW-1) | freeze(N2) |
| |
| return SDValue(); |
| } |
| |
| // Match SELECTs with absolute difference patterns. |
| // (select (setcc a, b, set?gt), (sub a, b), (sub b, a)) --> (abd? a, b) |
| // (select (setcc a, b, set?ge), (sub a, b), (sub b, a)) --> (abd? a, b) |
| // (select (setcc a, b, set?lt), (sub b, a), (sub a, b)) --> (abd? a, b) |
| // (select (setcc a, b, set?le), (sub b, a), (sub a, b)) --> (abd? a, b) |
| SDValue DAGCombiner::foldSelectToABD(SDValue LHS, SDValue RHS, SDValue True, |
| SDValue False, ISD::CondCode CC, |
| const SDLoc &DL) { |
| bool IsSigned = isSignedIntSetCC(CC); |
| unsigned ABDOpc = IsSigned ? ISD::ABDS : ISD::ABDU; |
| EVT VT = LHS.getValueType(); |
| |
| if (LegalOperations && !hasOperation(ABDOpc, VT)) |
| return SDValue(); |
| |
| switch (CC) { |
| case ISD::SETGT: |
| case ISD::SETGE: |
| case ISD::SETUGT: |
| case ISD::SETUGE: |
| if (sd_match(True, m_Sub(m_Specific(LHS), m_Specific(RHS))) && |
| sd_match(False, m_Sub(m_Specific(RHS), m_Specific(LHS)))) |
| return DAG.getNode(ABDOpc, DL, VT, LHS, RHS); |
| if (sd_match(True, m_Sub(m_Specific(RHS), m_Specific(LHS))) && |
| sd_match(False, m_Sub(m_Specific(LHS), m_Specific(RHS))) && |
| hasOperation(ABDOpc, VT)) |
| return DAG.getNegative(DAG.getNode(ABDOpc, DL, VT, LHS, RHS), DL, VT); |
| break; |
| case ISD::SETLT: |
| case ISD::SETLE: |
| case ISD::SETULT: |
| case ISD::SETULE: |
| if (sd_match(True, m_Sub(m_Specific(RHS), m_Specific(LHS))) && |
| sd_match(False, m_Sub(m_Specific(LHS), m_Specific(RHS)))) |
| return DAG.getNode(ABDOpc, DL, VT, LHS, RHS); |
| if (sd_match(True, m_Sub(m_Specific(LHS), m_Specific(RHS))) && |
| sd_match(False, m_Sub(m_Specific(RHS), m_Specific(LHS))) && |
| hasOperation(ABDOpc, VT)) |
| return DAG.getNegative(DAG.getNode(ABDOpc, DL, VT, LHS, RHS), DL, VT); |
| break; |
| default: |
| break; |
| } |
| |
| return SDValue(); |
| } |
| |
| SDValue DAGCombiner::visitSELECT(SDNode *N) { |
| SDValue N0 = N->getOperand(0); |
| SDValue N1 = N->getOperand(1); |
| SDValue N2 = N->getOperand(2); |
| EVT VT = N->getValueType(0); |
| EVT VT0 = N0.getValueType(); |
| SDLoc DL(N); |
| SDNodeFlags Flags = N->getFlags(); |
| |
| if (SDValue V = DAG.simplifySelect(N0, N1, N2)) |
| return V; |
| |
| if (SDValue V = foldBoolSelectToLogic<EmptyMatchContext>(N, DL, DAG)) |
| return V; |
| |
| // select (not Cond), N1, N2 -> select Cond, N2, N1 |
| if (SDValue F = extractBooleanFlip(N0, DAG, TLI, false)) { |
| SDValue SelectOp = DAG.getSelect(DL, VT, F, N2, N1); |
| SelectOp->setFlags(Flags); |
| return SelectOp; |
| } |
| |
| if (SDValue V = foldSelectOfConstants(N)) |
| return V; |
| |
| // If we can fold this based on the true/false value, do so. |
| if (SimplifySelectOps(N, N1, N2)) |
| return SDValue(N, 0); // Don't revisit N. |
| |
| if (VT0 == MVT::i1) { |
| // The code in this block deals with the following 2 equivalences: |
| // select(C0|C1, x, y) <=> select(C0, x, select(C1, x, y)) |
| // select(C0&C1, x, y) <=> select(C0, select(C1, x, y), y) |
| // The target can specify its preferred form with the |
| // shouldNormalizeToSelectSequence() callback. However we always transform |
| // to the right anyway if we find the inner select exists in the DAG anyway |
| // and we always transform to the left side if we know that we can further |
| // optimize the combination of the conditions. |
| bool normalizeToSequence = |
| TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT); |
| // select (and Cond0, Cond1), X, Y |
| // -> select Cond0, (select Cond1, X, Y), Y |
| if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) { |
| SDValue Cond0 = N0->getOperand(0); |
| SDValue Cond1 = N0->getOperand(1); |
| SDValue InnerSelect = |
| DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond1, N1, N2, Flags); |
| if (normalizeToSequence || !InnerSelect.use_empty()) |
| return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0, |
| InnerSelect, N2, Flags); |
| // Cleanup on failure. |
| if (InnerSelect.use_empty()) |
| recursivelyDeleteUnusedNodes(InnerSelect.getNode()); |
| } |
| // select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y) |
| if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) { |
| SDValue Cond0 = N0->getOperand(0); |
| SDValue Cond1 = N0->getOperand(1); |
| SDValue InnerSelect = DAG.getNode(ISD::SELECT, DL, N1.getValueType(), |
| Cond1, N1, N2, Flags); |
| if (normalizeToSequence || !InnerSelect.use_empty()) |
| return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Cond0, N1, |
| InnerSelect, Flags); |
| // Cleanup on failure. |
| if (InnerSelect.use_empty()) |
| recursivelyDeleteUnusedNodes(InnerSelect.getNode()); |
| } |
| |
| // select Cond0, (select Cond1, X, Y), Y -> select (and Cond0, Cond1), X, Y |
| if (N1->getOpcode() == ISD::SELECT && N1->hasOneUse()) { |
| SDValue N1_0 = N1->getOperand(0); |
| SDValue N1_1 = N1->getOperand(1); |
| SDValue N1_2 = N1->getOperand(2); |
| if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) { |
| // Create the actual and node if we can generate good code for it. |
| if (!normalizeToSequence) { |
| SDValue And = DAG.getNode(ISD::AND, DL, N0.getValueType(), N0, N1_0); |
| return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), And, N1_1, |
| N2, Flags); |
| } |
| // Otherwise see if we can optimize the "and" to a better pattern. |
| if (SDValue Combined = visitANDLike(N0, N1_0, N)) { |
| return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1_1, |
| N2, Flags); |
| } |
| } |
| } |
| // select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y |
| if (N2->getOpcode() == ISD::SELECT && N2->hasOneUse()) { |
| SDValue N2_0 = N2->getOperand(0); |
| SDValue N2_1 = N2->getOperand(1); |
| SDValue N2_2 = N2->getOperand(2); |
| if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) { |
| // Create the actual or node if we can generate good code for it. |
| if (!normalizeToSequence) { |
| SDValue Or = DAG.getNode(ISD::OR, DL, N0.getValueType(), N0, N2_0); |
| return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Or, N1, |
| N2_2, Flags); |
| } |
| // Otherwise see if we can optimize to a better pattern. |
| if (SDValue Combined = visitORLike(N0, N2_0, DL)) |
| return DAG.getNode(ISD::SELECT, DL, N1.getValueType(), Combined, N1, |
| N2_2, Flags); |
| } |
| } |
| |
| // select usubo(x, y).overflow, (sub y, x), (usubo x, y) -> abdu(x, y) |
| if (N0.getOpcode() == ISD::USUBO && N0.getResNo() == 1 && |
| N2.getNode() == N0.getNode() && N2.getResNo() == 0 && |
| N1.getOpcode() == ISD::SUB && N2.getOperand(0) == N1.getOperand(1) && |
| N2.getOperand(1) == N1.getOperand(0) && |
| (!LegalOperations || TLI.isOperationLegal(ISD::ABDU, VT))) |
| return DAG.getNode(ISD::ABDU, DL, VT, N0.getOperand(0), N0.getOperand(1)); |
| |
| // select usubo(x, y).overflow, (usubo x, y), (sub y, x) -> neg (abdu x, y) |
| if (N0.getOpcode() == ISD::USUBO && N0.getResNo() == 1 && |
| N1.getNode() == N0.getNode() && N1.getResNo() == 0 && |
| N2.getOpcode() == ISD::SUB && N2.getOperand(0) == N1.getOperand(1) && |
| N2.getOperand(1) == N1.getOperand(0) && |
| (!LegalOperations || TLI.isOperationLegal(ISD::ABDU, VT))) |
| return DAG.getNegative( |
| DAG.getNode(ISD::ABDU, DL, VT, N0.getOperand(0), N0.getOperand(1)), |
| DL, VT); |
| } |
| |
| // Fold selects based on a setcc into other things, such as min/max/abs. |
| if (N0.getOpcode() == ISD::SETCC) { |
| SDValue Cond0 = N0.getOperand(0), Cond1 = N0.getOperand(1); |
| ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); |
| |
| // select (fcmp lt x, y), x, y -> fminnum x, y |
| // select (fcmp gt x, y), x, y -> fmaxnum x, y |
| // |
| // This is OK if we don't care what happens if either operand is a NaN. |
| if (N0.hasOneUse() && isLegalToCombineMinNumMaxNum(DAG, N1, N2, Flags, TLI)) |
| if (SDValue FMinMax = |
| combineMinNumMaxNum(DL, VT, Cond0, Cond1, N1, N2, CC)) |
| return FMinMax; |
| |
| // Use 'unsigned add with overflow' to optimize an unsigned saturating add. |
| // This is conservatively limited to pre-legal-operations to give targets |
| // a chance to reverse the transform if they want to do that. Also, it is |
| // unlikely that the pattern would be formed late, so it's probably not |
| // worth going through the other checks. |
| if (!LegalOperations && TLI.isOperationLegalOrCustom(ISD::UADDO, VT) && |
| CC == ISD::SETUGT && N0.hasOneUse() && isAllOnesConstant(N1) && |
| N2.getOpcode() == ISD::ADD && Cond0 == N2.getOperand(0)) { |
| auto *C = dyn_cast<ConstantSDNode>(N2.getOperand(1)); |
| auto *NotC = dyn_cast<ConstantSDNode>(Cond1); |
| if (C && NotC && C->getAPIntValue() == ~NotC->getAPIntValue()) { |
| // select (setcc Cond0, ~C, ugt), -1, (add Cond0, C) --> |
| // uaddo Cond0, C; select uaddo.1, -1, uaddo.0 |
| // |
| // The IR equivalent of this transform would have this form: |
| // %a = add %x, C |
| // %c = icmp ugt %x, ~C |
| // %r = select %c, -1, %a |
| // => |
| // %u = call {iN,i1} llvm.uadd.with.overflow(%x, C) |
| // %u0 = extractvalue %u, 0 |
| // %u1 = extractvalue %u, 1 |
| // %r = select %u1, -1, %u0 |
| SDVTList VTs = DAG.getVTList(VT, VT0); |
| SDValue UAO = DAG.getNode(ISD::UADDO, DL, VTs, Cond0, N2.getOperand(1)); |
| return DAG.getSelect(DL, VT, UAO.getValue(1), N1, UAO.getValue(0)); |
| } |
| } |
| |
| if (TLI.isOperationLegal(ISD::SELECT_CC, VT) || |
| (!LegalOperations && |
| TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT))) { |
| // Any flags available in a select/setcc fold will be on the setcc as they |
| // migrated from fcmp |
| Flags = N0->getFlags(); |
| SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, VT, Cond0, Cond1, N1, |
| N2, N0.getOperand(2)); |
| SelectNode->setFlags(Flags); |
| return SelectNode; |
| } |
| |
| if (SDValue ABD = foldSelectToABD(Cond0, Cond1, N1, N2, CC, DL)) |
| return ABD; |
| |
| if (SDValue NewSel = SimplifySelect(DL, N0, N1, N2)) |
| return NewSel; |
| |
| // (select (ugt x, C), (add x, ~C), x) -> (umin (add x, ~C), x) |
| // (select (ult x, C), x, (add x, -C)) -> (umin x, (add x, -C)) |
| APInt C; |
| if (sd_match(Cond1, m_ConstInt(C)) && hasUMin(VT)) { |
| if (CC == ISD::SETUGT && Cond0 == N2 && |
| sd_match(N1, m_Add(m_Specific(N2), m_SpecificInt(~C)))) { |
| // The resulting code relies on an unsigned wrap in ADD. |
| // Recreating ADD to drop possible nuw/nsw flags. |
| SDValue AddC = DAG.getConstant(~C, DL, VT); |
| SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N2, AddC); |
| return DAG.getNode(ISD::UMIN, DL, VT, Add, N2); |
| } |
| if (CC == ISD::SETULT && Cond0 == N1 && |
| sd_match(N2, m_Add(m_Specific(N1), m_SpecificInt(-C)))) { |
| // Ditto. |
| SDValue AddC = DAG.getConstant(-C, DL, VT); |
| SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, AddC); |
| return DAG.getNode(ISD::UMIN, DL, VT, N1, Add); |
| } |
| } |
| } |
| |
| if (!VT.isVector()) |
| if (SDValue BinOp = foldSelectOfBinops(N)) |
| return BinOp; |
| |
| if (SDValue R = combineSelectAsExtAnd(N0, N1, N2, DL, DAG)) |
| return R; |
| |
| return SDValue(); |
| } |
| |
| // This function assumes all the vselect's arguments are CONCAT_VECTOR |
| // nodes and that the condition is a BV of ConstantSDNodes (or undefs). |
| static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) { |
| SDLoc DL(N); |
| SDValue Cond = N->getOperand(0); |
| SDValue LHS = N->getOperand(1); |
| SDValue RHS = N->getOperand(2); |
| EVT VT = N->getValueType(0); |
| int NumElems = VT.getVectorNumElements(); |
| assert(LHS.getOpcode() == ISD::CONCAT_VECTORS && |
| RHS.getOpcode() == ISD::CONCAT_VECTORS && |
| Cond.getOpcode() == ISD::BUILD_VECTOR); |
| |
| // CONCAT_VECTOR can take an arbitrary number of arguments. We only care about |
| // binary ones here. |
| if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2) |
| return SDValue(); |
| |
| // We're sure we have an even number of elements due to the |
| // concat_vectors we have as arguments to vselect. |
| // Skip BV elements until we find one that's not an UNDEF |
| // After we find an UNDEF element, keep looping until we get to half the |
| // length of the BV and see if all the non-undef nodes are the same. |
| ConstantSDNode *BottomHalf = nullptr; |
| for (int i = 0; i < NumElems / 2; ++i) { |
| if (Cond->getOperand(i)->isUndef()) |
| continue; |
| |
| if (BottomHalf == nullptr) |
| BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i)); |
| else if (Cond->getOperand(i).getNode() != BottomHalf) |
| return SDValue(); |
| } |
| |
| // Do the same for the second half of the BuildVector |
| ConstantSDNode *TopHalf = nullptr; |
| for (int i = NumElems / 2; i < NumElems; ++i) { |
| if (Cond->getOperand(i)->isUndef()) |
| continue; |
| |
| if (TopHalf == nullptr) |
| TopHalf = cast<ConstantSDNode>(Cond.getOperand(i)); |
| else if (Cond->getOperand(i).getNode() != TopHalf) |
| return SDValue(); |
| } |
| |
| assert(TopHalf && BottomHalf && |
| "One half of the selector was all UNDEFs and the other was all the " |
| "same value. This should have been addressed before this function."); |
| return DAG.getNode( |
| ISD::CONCAT_VECTORS, DL, VT, |
| BottomHalf->isZero() ? RHS->getOperand(0) : LHS->getOperand(0), |
| TopHalf->isZero() ? RHS->getOperand(1) : LHS->getOperand(1)); |
| } |
| |
| bool refineUniformBase(SDValue &BasePtr, SDValue &Index, bool IndexIsScaled, |
| SelectionDAG &DAG, const SDLoc &DL) { |
| |
| // Only perform the transformation when existing operands can be reused. |
| if (IndexIsScaled) |
| return false; |
| |
| if (!isNullConstant(BasePtr) && !Index.hasOneUse()) |
| return false; |
| |
| EVT VT = BasePtr.getValueType(); |
| |
| if (SDValue SplatVal = DAG.getSplatValue(Index); |
| SplatVal && !isNullConstant(SplatVal) && |
| SplatVal.getValueType() == VT) { |
| BasePtr = DAG.getNode(ISD::ADD, DL, VT, BasePtr, SplatVal); |
| Index = DAG.getSplat(Index.getValueType(), DL, DAG.getConstant(0, DL, VT)); |
| return true; |
| } |
| |
| if (Index.getOpcode() != ISD::ADD) |
| return false; |
| |
| if (SDValue SplatVal = DAG.getSplatValue(Index.getOperand(0)); |
| SplatVal && SplatVal.getValueType() == VT) { |
| BasePtr = DAG.getNode(ISD::ADD, DL, VT, BasePtr, SplatVal); |
| Index = Index.getOperand(1); |
| return true; |
| } |
| if (SDValue SplatVal = DAG.getSplatValue(Index.getOperand(1)); |
| SplatVal && SplatVal.getValueType() == VT) { |
| BasePtr = DAG.getNode(ISD::ADD, DL, VT, BasePtr, SplatVal); |
| Index = Index.getOperand(0); |
| return true; |
| } |
| return false; |
| } |
| |
| // Fold sext/zext of index into index type. |
| bool refineIndexType(SDValue &Index, ISD::MemIndexType &IndexType, EVT DataVT, |
| SelectionDAG &DAG) { |
| const TargetLowering &TLI = DAG.getTargetLoweringInfo(); |
| |
| // It's always safe to look through zero extends. |
| if (Index.getOpcode() == ISD::ZERO_EXTEND) { |
| if (TLI.shouldRemoveExtendFromGSIndex(Index, DataVT)) { |
| IndexType = ISD::UNSIGNED_SCALED; |
| Index = Index.getOperand(0); |
| return true; |
| } |
| if (ISD::isIndexTypeSigned(IndexType)) { |
| IndexType = ISD::UNSIGNED_SCALED; |
| return true; |
| } |
| } |
| |
| // It's only safe to look through sign extends when Index is signed. |
| if (Index.getOpcode() == ISD::SIGN_EXTEND && |
| ISD::isIndexTypeSigned(IndexType) && |
| TLI.shouldRemoveExtendFromGSIndex(Index, DataVT)) { |
| Index = Index.getOperand(0); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| SDValue DAGCombiner::visitVPSCATTER(SDNode *N) { |
| VPScatterSDNode *MSC = cast<VPScatterSDNode>(N)
|