blob: 4d394ea4d99c040ec538d98764ded59d43f09186 [file] [log] [blame]
David Blaikie1213dbf2015-06-26 16:57:30 +00001//===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
David Blaikie1213dbf2015-06-26 16:57:30 +00006//
7//===----------------------------------------------------------------------===//
8//
9// This file defines vectorizer utilities.
10//
11//===----------------------------------------------------------------------===//
12
Chandler Carruth6bda14b2017-06-06 11:49:48 +000013#include "llvm/Analysis/VectorUtils.h"
James Molloy55d633b2015-10-12 12:34:45 +000014#include "llvm/ADT/EquivalenceClasses.h"
Paschalis Mpeisddb6db42023-12-19 12:05:28 +000015#include "llvm/ADT/SmallVector.h"
James Molloy55d633b2015-10-12 12:34:45 +000016#include "llvm/Analysis/DemandedBits.h"
Hal Finkel9cf58c42015-07-11 10:52:42 +000017#include "llvm/Analysis/LoopInfo.h"
Florian Hahn1086ce22018-09-12 08:01:57 +000018#include "llvm/Analysis/LoopIterator.h"
Hal Finkel9cf58c42015-07-11 10:52:42 +000019#include "llvm/Analysis/ScalarEvolution.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "llvm/Analysis/ScalarEvolutionExpressions.h"
James Molloy55d633b2015-10-12 12:34:45 +000021#include "llvm/Analysis/TargetTransformInfo.h"
David Majnemerb4b27230b2016-04-19 19:10:21 +000022#include "llvm/Analysis/ValueTracking.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000023#include "llvm/IR/Constants.h"
Paschalis Mpeisddb6db42023-12-19 12:05:28 +000024#include "llvm/IR/DerivedTypes.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000025#include "llvm/IR/IRBuilder.h"
Pierre van Houtryvecf328ff2024-04-24 08:52:25 +020026#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
Hal Finkel9cf58c42015-07-11 10:52:42 +000027#include "llvm/IR/PatternMatch.h"
28#include "llvm/IR/Value.h"
Reid Kleckner4c1a1d32019-11-14 15:15:48 -080029#include "llvm/Support/CommandLine.h"
Renato Golin3b1d3b02015-08-30 10:49:04 +000030
Florian Hahn1086ce22018-09-12 08:01:57 +000031#define DEBUG_TYPE "vectorutils"
32
David Majnemer5eaf08f2015-08-18 22:07:20 +000033using namespace llvm;
34using namespace llvm::PatternMatch;
David Blaikie1213dbf2015-06-26 16:57:30 +000035
Florian Hahn1086ce22018-09-12 08:01:57 +000036/// Maximum factor for an interleaved memory access.
37static cl::opt<unsigned> MaxInterleaveGroupFactor(
38 "max-interleave-group-factor", cl::Hidden,
39 cl::desc("Maximum factor for an interleaved access group (default = 8)"),
40 cl::init(8));
41
Sanjay Patel0f4f48062018-11-12 15:10:30 +000042/// Return true if all of the intrinsic's arguments and return type are scalars
Bjorn Pettersson512b1182019-06-24 12:07:11 +000043/// for the scalar form of the intrinsic, and vectors for the vector form of the
44/// intrinsic (except operands that are marked as always being scalar by
David Green6f819032022-05-03 09:32:34 +010045/// isVectorIntrinsicWithScalarOpAtArg).
David Blaikie1213dbf2015-06-26 16:57:30 +000046bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
47 switch (ID) {
Craig Topper3efc9782020-07-29 10:05:25 -070048 case Intrinsic::abs: // Begin integer bit-manipulation.
49 case Intrinsic::bswap:
Sanjay Patel0f4f48062018-11-12 15:10:30 +000050 case Intrinsic::bitreverse:
51 case Intrinsic::ctpop:
52 case Intrinsic::ctlz:
53 case Intrinsic::cttz:
Sanjay Patel1456fd72018-11-12 15:20:14 +000054 case Intrinsic::fshl:
55 case Intrinsic::fshr:
Craig Topper3efc9782020-07-29 10:05:25 -070056 case Intrinsic::smax:
57 case Intrinsic::smin:
58 case Intrinsic::umax:
59 case Intrinsic::umin:
Simon Pilgrim0e08b6f2019-01-23 13:49:10 +000060 case Intrinsic::sadd_sat:
61 case Intrinsic::ssub_sat:
62 case Intrinsic::uadd_sat:
63 case Intrinsic::usub_sat:
Simon Pilgrima066f1f2019-02-25 15:42:02 +000064 case Intrinsic::smul_fix:
Bjorn Pettersson512b1182019-06-24 12:07:11 +000065 case Intrinsic::smul_fix_sat:
Simon Pilgrima066f1f2019-02-25 15:42:02 +000066 case Intrinsic::umul_fix:
Bjorn Pettersson5e331e42019-09-07 12:16:14 +000067 case Intrinsic::umul_fix_sat:
Sanjay Patel0f4f48062018-11-12 15:10:30 +000068 case Intrinsic::sqrt: // Begin floating-point.
Simon Pilgrimd58d1052024-08-30 16:49:23 +010069 case Intrinsic::asin:
70 case Intrinsic::acos:
71 case Intrinsic::atan:
Tex Riddell818d7152024-11-08 16:07:38 -080072 case Intrinsic::atan2:
David Blaikie1213dbf2015-06-26 16:57:30 +000073 case Intrinsic::sin:
74 case Intrinsic::cos:
Benjamin Maxwell3307b032025-02-27 09:37:06 +000075 case Intrinsic::sincos:
Benjamin Maxwell89e7f4d2025-02-28 12:56:12 +000076 case Intrinsic::sincospi:
Farzon Lotfi1d874332024-06-05 15:01:33 -040077 case Intrinsic::tan:
Simon Pilgrimd58d1052024-08-30 16:49:23 +010078 case Intrinsic::sinh:
79 case Intrinsic::cosh:
80 case Intrinsic::tanh:
David Blaikie1213dbf2015-06-26 16:57:30 +000081 case Intrinsic::exp:
Rohit Aggarwaldfb60bb2024-10-29 15:39:55 +053082 case Intrinsic::exp10:
David Blaikie1213dbf2015-06-26 16:57:30 +000083 case Intrinsic::exp2:
84 case Intrinsic::log:
85 case Intrinsic::log10:
86 case Intrinsic::log2:
87 case Intrinsic::fabs:
88 case Intrinsic::minnum:
89 case Intrinsic::maxnum:
Thomas Lively8a91cf12018-10-19 21:11:43 +000090 case Intrinsic::minimum:
91 case Intrinsic::maximum:
YunQiang Sufe9e2092025-04-15 08:08:45 +080092 case Intrinsic::minimumnum:
93 case Intrinsic::maximumnum:
Benjamin Maxwell89e7f4d2025-02-28 12:56:12 +000094 case Intrinsic::modf:
David Blaikie1213dbf2015-06-26 16:57:30 +000095 case Intrinsic::copysign:
96 case Intrinsic::floor:
97 case Intrinsic::ceil:
98 case Intrinsic::trunc:
99 case Intrinsic::rint:
100 case Intrinsic::nearbyint:
101 case Intrinsic::round:
Serge Pavlov4d20e312020-05-26 19:24:05 +0700102 case Intrinsic::roundeven:
David Blaikie1213dbf2015-06-26 16:57:30 +0000103 case Intrinsic::pow:
104 case Intrinsic::fma:
105 case Intrinsic::fmuladd:
Jay Foad593e25f2023-04-21 14:11:31 +0100106 case Intrinsic::is_fpclass:
David Blaikie1213dbf2015-06-26 16:57:30 +0000107 case Intrinsic::powi:
Matt Arsenault80ea6dd2018-09-17 13:24:30 +0000108 case Intrinsic::canonicalize:
David Green6f819032022-05-03 09:32:34 +0100109 case Intrinsic::fptosi_sat:
110 case Intrinsic::fptoui_sat:
Ramkumar Ramachandra2302e4c2023-11-06 18:49:49 +0000111 case Intrinsic::lrint:
112 case Intrinsic::llrint:
Yingwei Zhenga156b5a2024-09-02 17:06:07 +0800113 case Intrinsic::ucmp:
114 case Intrinsic::scmp:
David Blaikie1213dbf2015-06-26 16:57:30 +0000115 return true;
116 default:
117 return false;
118 }
119}
120
Finn Plummer45c01e82024-12-19 11:54:26 -0800121bool llvm::isTriviallyScalarizable(Intrinsic::ID ID,
122 const TargetTransformInfo *TTI) {
123 if (isTriviallyVectorizable(ID))
124 return true;
125
126 if (TTI && Intrinsic::isTargetIntrinsic(ID))
127 return TTI->isTargetIntrinsicTriviallyScalarizable(ID);
128
129 // TODO: Move frexp to isTriviallyVectorizable.
130 // https://github.com/llvm/llvm-project/issues/112408
131 switch (ID) {
132 case Intrinsic::frexp:
Deric Cheung37ed2e62025-02-13 13:31:39 -0800133 case Intrinsic::uadd_with_overflow:
134 case Intrinsic::sadd_with_overflow:
135 case Intrinsic::ssub_with_overflow:
136 case Intrinsic::usub_with_overflow:
137 case Intrinsic::umul_with_overflow:
138 case Intrinsic::smul_with_overflow:
Finn Plummer45c01e82024-12-19 11:54:26 -0800139 return true;
140 }
141 return false;
142}
143
Bjorn Pettersson512b1182019-06-24 12:07:11 +0000144/// Identifies if the vector form of the intrinsic has a scalar operand.
David Green6f819032022-05-03 09:32:34 +0100145bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
Finn Plummer45c01e82024-12-19 11:54:26 -0800146 unsigned ScalarOpdIdx,
147 const TargetTransformInfo *TTI) {
148
149 if (TTI && Intrinsic::isTargetIntrinsic(ID))
150 return TTI->isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx);
151
David Blaikie1213dbf2015-06-26 16:57:30 +0000152 switch (ID) {
Craig Topper3efc9782020-07-29 10:05:25 -0700153 case Intrinsic::abs:
LiqinWeng4a3f46d2024-11-28 10:05:08 +0800154 case Intrinsic::vp_abs:
David Blaikie1213dbf2015-06-26 16:57:30 +0000155 case Intrinsic::ctlz:
LiqinWeng4a3f46d2024-11-28 10:05:08 +0800156 case Intrinsic::vp_ctlz:
David Blaikie1213dbf2015-06-26 16:57:30 +0000157 case Intrinsic::cttz:
LiqinWeng4a3f46d2024-11-28 10:05:08 +0800158 case Intrinsic::vp_cttz:
Jay Foad593e25f2023-04-21 14:11:31 +0100159 case Intrinsic::is_fpclass:
LiqinWeng4a3f46d2024-11-28 10:05:08 +0800160 case Intrinsic::vp_is_fpclass:
David Blaikie1213dbf2015-06-26 16:57:30 +0000161 case Intrinsic::powi:
162 return (ScalarOpdIdx == 1);
Simon Pilgrima066f1f2019-02-25 15:42:02 +0000163 case Intrinsic::smul_fix:
Bjorn Pettersson512b1182019-06-24 12:07:11 +0000164 case Intrinsic::smul_fix_sat:
Simon Pilgrima066f1f2019-02-25 15:42:02 +0000165 case Intrinsic::umul_fix:
Bjorn Pettersson5e331e42019-09-07 12:16:14 +0000166 case Intrinsic::umul_fix_sat:
Simon Pilgrima066f1f2019-02-25 15:42:02 +0000167 return (ScalarOpdIdx == 2);
Mel Chen9b4ad2f2025-03-03 21:27:13 +0800168 case Intrinsic::experimental_vp_splice:
169 return ScalarOpdIdx == 2 || ScalarOpdIdx == 4 || ScalarOpdIdx == 5;
David Blaikie1213dbf2015-06-26 16:57:30 +0000170 default:
171 return false;
172 }
173}
174
Finn Plummer8663b872024-11-21 11:04:25 -0800175bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
176 Intrinsic::ID ID, int OpdIdx, const TargetTransformInfo *TTI) {
Alexandros Lamprinease512df32024-01-02 19:14:16 +0100177 assert(ID != Intrinsic::not_intrinsic && "Not an intrinsic!");
178
Finn Plummer8663b872024-11-21 11:04:25 -0800179 if (TTI && Intrinsic::isTargetIntrinsic(ID))
Finn Plummer45c01e82024-12-19 11:54:26 -0800180 return TTI->isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx);
Finn Plummer8663b872024-11-21 11:04:25 -0800181
LiqinWengb7590202024-12-11 10:01:41 +0800182 if (VPCastIntrinsic::isVPCast(ID))
183 return OpdIdx == -1 || OpdIdx == 0;
184
Bjorn Pettersson4c7f8202021-03-26 21:02:26 +0100185 switch (ID) {
David Green6f819032022-05-03 09:32:34 +0100186 case Intrinsic::fptosi_sat:
187 case Intrinsic::fptoui_sat:
Ramkumar Ramachandra2302e4c2023-11-06 18:49:49 +0000188 case Intrinsic::lrint:
189 case Intrinsic::llrint:
LiqinWeng4a3f46d2024-11-28 10:05:08 +0800190 case Intrinsic::vp_lrint:
191 case Intrinsic::vp_llrint:
Yingwei Zhenga156b5a2024-09-02 17:06:07 +0800192 case Intrinsic::ucmp:
193 case Intrinsic::scmp:
Jay Foad593e25f2023-04-21 14:11:31 +0100194 return OpdIdx == -1 || OpdIdx == 0;
Benjamin Maxwell89e7f4d2025-02-28 12:56:12 +0000195 case Intrinsic::modf:
Benjamin Maxwell3307b032025-02-27 09:37:06 +0000196 case Intrinsic::sincos:
Benjamin Maxwell89e7f4d2025-02-28 12:56:12 +0000197 case Intrinsic::sincospi:
Jay Foad593e25f2023-04-21 14:11:31 +0100198 case Intrinsic::is_fpclass:
LiqinWeng4a3f46d2024-11-28 10:05:08 +0800199 case Intrinsic::vp_is_fpclass:
David Green6f819032022-05-03 09:32:34 +0100200 return OpdIdx == 0;
Bjorn Pettersson4c7f8202021-03-26 21:02:26 +0100201 case Intrinsic::powi:
Jay Foad593e25f2023-04-21 14:11:31 +0100202 return OpdIdx == -1 || OpdIdx == 1;
Bjorn Pettersson4c7f8202021-03-26 21:02:26 +0100203 default:
Jay Foad593e25f2023-04-21 14:11:31 +0100204 return OpdIdx == -1;
Bjorn Pettersson4c7f8202021-03-26 21:02:26 +0100205 }
206}
207
Finn Plummer45c01e82024-12-19 11:54:26 -0800208bool llvm::isVectorIntrinsicWithStructReturnOverloadAtField(
209 Intrinsic::ID ID, int RetIdx, const TargetTransformInfo *TTI) {
210
211 if (TTI && Intrinsic::isTargetIntrinsic(ID))
212 return TTI->isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx);
213
Farzon Lotfidcbf2c22024-10-21 12:51:01 -0400214 switch (ID) {
215 case Intrinsic::frexp:
216 return RetIdx == 0 || RetIdx == 1;
217 default:
218 return RetIdx == 0;
219 }
220}
221
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000222/// Returns intrinsic ID for call.
David Blaikie1213dbf2015-06-26 16:57:30 +0000223/// For the input call instruction it finds mapping intrinsic and returns
224/// its ID, in case it does not found it return not_intrinsic.
David Majnemerb4b27230b2016-04-19 19:10:21 +0000225Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
226 const TargetLibraryInfo *TLI) {
Craig Topperbe04aba2020-04-22 10:48:09 -0700227 Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);
David Majnemerb4b27230b2016-04-19 19:10:21 +0000228 if (ID == Intrinsic::not_intrinsic)
David Blaikie1213dbf2015-06-26 16:57:30 +0000229 return Intrinsic::not_intrinsic;
230
David Majnemerb4b27230b2016-04-19 19:10:21 +0000231 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
Dan Gohman2c74fe92017-11-08 21:59:51 +0000232 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
Jeroen Dobbelaere121cac02021-01-19 20:04:52 +0100233 ID == Intrinsic::experimental_noalias_scope_decl ||
Hongtao Yuf3c44562020-11-18 12:42:51 -0800234 ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
David Majnemerb4b27230b2016-04-19 19:10:21 +0000235 return ID;
David Blaikie1213dbf2015-06-26 16:57:30 +0000236 return Intrinsic::not_intrinsic;
237}
Hal Finkel9cf58c42015-07-11 10:52:42 +0000238
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000239/// Given a vector and an element number, see if the scalar value is
David Majnemer599ca442015-07-13 01:15:53 +0000240/// already around as a register, for example if it were inserted then extracted
241/// from the vector.
David Majnemer5eaf08f2015-08-18 22:07:20 +0000242Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
David Majnemer599ca442015-07-13 01:15:53 +0000243 assert(V->getType()->isVectorTy() && "Not looking at a vector?");
244 VectorType *VTy = cast<VectorType>(V->getType());
Nikita Popov605e1842024-06-24 16:18:38 +0200245 // For fixed-length vector, return poison for out of range access.
Christopher Tetreault9174e022020-04-23 12:19:54 -0700246 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
247 unsigned Width = FVTy->getNumElements();
Huihui Zhang8f525732020-03-11 15:09:01 -0700248 if (EltNo >= Width)
Nikita Popov605e1842024-06-24 16:18:38 +0200249 return PoisonValue::get(FVTy->getElementType());
Huihui Zhang8f525732020-03-11 15:09:01 -0700250 }
David Majnemer599ca442015-07-13 01:15:53 +0000251
252 if (Constant *C = dyn_cast<Constant>(V))
253 return C->getAggregateElement(EltNo);
254
255 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
256 // If this is an insert to a variable element, we don't know what it is.
257 if (!isa<ConstantInt>(III->getOperand(2)))
258 return nullptr;
259 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
260
261 // If this is an insert to the element we are looking for, return the
262 // inserted value.
263 if (EltNo == IIElt)
264 return III->getOperand(1);
265
Sanjay Patel9d6d24c2020-12-02 13:35:05 -0500266 // Guard against infinite loop on malformed, unreachable IR.
267 if (III == III->getOperand(0))
268 return nullptr;
269
David Majnemer599ca442015-07-13 01:15:53 +0000270 // Otherwise, the insertelement doesn't modify the value, recurse on its
271 // vector input.
272 return findScalarElement(III->getOperand(0), EltNo);
273 }
274
Huihui Zhang1ec0cc02020-05-07 13:03:26 -0700275 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
276 // Restrict the following transformation to fixed-length vector.
277 if (SVI && isa<FixedVectorType>(SVI->getType())) {
Christopher Tetreaultb96558f2020-04-09 12:19:23 -0700278 unsigned LHSWidth =
Huihui Zhang1ec0cc02020-05-07 13:03:26 -0700279 cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
David Majnemer599ca442015-07-13 01:15:53 +0000280 int InEl = SVI->getMaskValue(EltNo);
281 if (InEl < 0)
Nikita Popov605e1842024-06-24 16:18:38 +0200282 return PoisonValue::get(VTy->getElementType());
David Majnemer599ca442015-07-13 01:15:53 +0000283 if (InEl < (int)LHSWidth)
284 return findScalarElement(SVI->getOperand(0), InEl);
285 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
286 }
287
288 // Extract a value from a vector add operation with a constant zero.
Sanjay Patel3413a662018-09-24 17:18:32 +0000289 // TODO: Use getBinOpIdentity() to generalize this.
290 Value *Val; Constant *C;
291 if (match(V, m_Add(m_Value(Val), m_Constant(C))))
292 if (Constant *Elt = C->getAggregateElement(EltNo))
David Majnemerc6bb0e22015-08-18 22:07:25 +0000293 if (Elt->isNullValue())
294 return findScalarElement(Val, EltNo);
David Majnemer599ca442015-07-13 01:15:53 +0000295
Sander de Smalen96f6785b2021-08-31 13:48:52 +0100296 // If the vector is a splat then we can trivially find the scalar element.
297 if (isa<ScalableVectorType>(VTy))
298 if (Value *Splat = getSplatValue(V))
299 if (EltNo < VTy->getElementCount().getKnownMinValue())
300 return Splat;
301
David Majnemer599ca442015-07-13 01:15:53 +0000302 // Otherwise, we don't know.
303 return nullptr;
304}
Renato Golin3b1d3b02015-08-30 10:49:04 +0000305
Sanjay Patel686a0382020-02-05 14:18:13 -0500306int llvm::getSplatIndex(ArrayRef<int> Mask) {
307 int SplatIndex = -1;
308 for (int M : Mask) {
309 // Ignore invalid (undefined) mask elements.
310 if (M < 0)
311 continue;
312
313 // There can be only 1 non-negative mask element value if this is a splat.
314 if (SplatIndex != -1 && SplatIndex != M)
315 return -1;
316
317 // Initialize the splat index to the 1st non-negative mask element.
318 SplatIndex = M;
319 }
320 assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
321 return SplatIndex;
322}
323
Adrian Prantl5f8f34e42018-05-01 15:54:18 +0000324/// Get splat value if the input is a splat vector or return nullptr.
Elena Demikhovsky63a7ca92015-08-30 13:48:02 +0000325/// This function is not fully general. It checks only 2 cases:
Sanjay Patele490e4a2019-06-07 16:09:54 +0000326/// the input value is (1) a splat constant vector or (2) a sequence
327/// of instructions that broadcasts a scalar at element 0.
Craig Topperb16e8682020-09-02 20:44:12 -0700328Value *llvm::getSplatValue(const Value *V) {
Sanjay Patele490e4a2019-06-07 16:09:54 +0000329 if (isa<VectorType>(V->getType()))
330 if (auto *C = dyn_cast<Constant>(V))
Elena Demikhovsky47fa2712015-12-01 12:30:40 +0000331 return C->getSplatValue();
Elena Demikhovsky63a7ca92015-08-30 13:48:02 +0000332
Sanjay Patele490e4a2019-06-07 16:09:54 +0000333 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
334 Value *Splat;
Sanjay Patel7eed7722020-05-23 10:13:50 -0400335 if (match(V,
336 m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
337 m_Value(), m_ZeroMask())))
Sanjay Patele490e4a2019-06-07 16:09:54 +0000338 return Splat;
Renato Golin3b1d3b02015-08-30 10:49:04 +0000339
Sanjay Patele490e4a2019-06-07 16:09:54 +0000340 return nullptr;
Renato Golin3b1d3b02015-08-30 10:49:04 +0000341}
James Molloy55d633b2015-10-12 12:34:45 +0000342
Sanjay Patel9b9e2da2020-02-02 09:16:42 -0500343bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
Sanjay Patel6f3511a2020-08-19 16:32:24 -0400344 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
Sanjay Patel40e3bdf2019-06-11 22:25:18 +0000345
346 if (isa<VectorType>(V->getType())) {
347 if (isa<UndefValue>(V))
348 return true;
Sanjay Patel9b9e2da2020-02-02 09:16:42 -0500349 // FIXME: We can allow undefs, but if Index was specified, we may want to
350 // check that the constant is defined at that index.
Sanjay Patel40e3bdf2019-06-11 22:25:18 +0000351 if (auto *C = dyn_cast<Constant>(V))
352 return C->getSplatValue() != nullptr;
353 }
354
Sanjay Patel9b9e2da2020-02-02 09:16:42 -0500355 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
356 // FIXME: We can safely allow undefs here. If Index was specified, we will
357 // check that the mask elt is defined at the required index.
Jakub Kuderski6fa87ec2022-08-23 11:36:12 -0400358 if (!all_equal(Shuf->getShuffleMask()))
Sanjay Patel9b9e2da2020-02-02 09:16:42 -0500359 return false;
360
361 // Match any index.
362 if (Index == -1)
363 return true;
364
365 // Match a specific element. The mask should be defined at and match the
366 // specified index.
367 return Shuf->getMaskValue(Index) == Index;
368 }
Sanjay Patel40e3bdf2019-06-11 22:25:18 +0000369
370 // The remaining tests are all recursive, so bail out if we hit the limit.
Sanjay Patel6f3511a2020-08-19 16:32:24 -0400371 if (Depth++ == MaxAnalysisRecursionDepth)
Sanjay Patel40e3bdf2019-06-11 22:25:18 +0000372 return false;
373
374 // If both operands of a binop are splats, the result is a splat.
375 Value *X, *Y, *Z;
376 if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
Sanjay Patel9b9e2da2020-02-02 09:16:42 -0500377 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
Sanjay Patel40e3bdf2019-06-11 22:25:18 +0000378
379 // If all operands of a select are splats, the result is a splat.
380 if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
Sanjay Patel9b9e2da2020-02-02 09:16:42 -0500381 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
382 isSplatValue(Z, Index, Depth);
Sanjay Patel40e3bdf2019-06-11 22:25:18 +0000383
384 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
385
386 return false;
387}
388
Simon Pilgrim55a11b52022-10-30 17:03:49 +0000389bool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
390 const APInt &DemandedElts, APInt &DemandedLHS,
391 APInt &DemandedRHS, bool AllowUndefElts) {
392 DemandedLHS = DemandedRHS = APInt::getZero(SrcWidth);
393
394 // Early out if we don't demand any elements.
395 if (DemandedElts.isZero())
396 return true;
397
398 // Simple case of a shuffle with zeroinitializer.
399 if (all_of(Mask, [](int Elt) { return Elt == 0; })) {
400 DemandedLHS.setBit(0);
401 return true;
402 }
403
404 for (unsigned I = 0, E = Mask.size(); I != E; ++I) {
405 int M = Mask[I];
406 assert((-1 <= M) && (M < (SrcWidth * 2)) &&
407 "Invalid shuffle mask constant");
408
409 if (!DemandedElts[I] || (AllowUndefElts && (M < 0)))
410 continue;
411
412 // For undef elements, we don't know anything about the common state of
413 // the shuffle result.
414 if (M < 0)
415 return false;
416
417 if (M < SrcWidth)
418 DemandedLHS.setBit(M);
419 else
420 DemandedRHS.setBit(M - SrcWidth);
421 }
422
423 return true;
424}
425
Philip Reames248be982025-02-28 07:21:40 -0800426bool llvm::isMaskedSlidePair(ArrayRef<int> Mask, int NumElts,
427 std::array<std::pair<int, int>, 2> &SrcInfo) {
428 const int SignalValue = NumElts * 2;
429 SrcInfo[0] = {-1, SignalValue};
430 SrcInfo[1] = {-1, SignalValue};
431 for (auto [i, M] : enumerate(Mask)) {
432 if (M < 0)
433 continue;
434 int Src = M >= (int)NumElts;
435 int Diff = (int)i - (M % NumElts);
436 bool Match = false;
437 for (int j = 0; j < 2; j++) {
438 auto &[SrcE, DiffE] = SrcInfo[j];
439 if (SrcE == -1) {
440 assert(DiffE == SignalValue);
441 SrcE = Src;
442 DiffE = Diff;
443 }
444 if (SrcE == Src && DiffE == Diff) {
445 Match = true;
446 break;
447 }
448 }
449 if (!Match)
450 return false;
451 }
452 // Avoid all undef masks
453 return SrcInfo[0].first != -1;
454}
455
Sanjay Patel1318ddb2020-04-11 10:05:49 -0400456void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
457 SmallVectorImpl<int> &ScaledMask) {
Craig Topperf92563f2020-03-31 23:15:10 -0700458 assert(Scale > 0 && "Unexpected scaling factor");
459
460 // Fast-path: if no scaling, then it is just a copy.
461 if (Scale == 1) {
462 ScaledMask.assign(Mask.begin(), Mask.end());
463 return;
464 }
465
466 ScaledMask.clear();
Sanjay Patel1318ddb2020-04-11 10:05:49 -0400467 for (int MaskElt : Mask) {
468 if (MaskElt >= 0) {
Simon Pilgrim48b510c2020-09-11 15:32:03 +0100469 assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
Sanjay Patel1318ddb2020-04-11 10:05:49 -0400470 "Overflowed 32-bits");
471 }
472 for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
473 ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
474 }
Craig Topperf92563f2020-03-31 23:15:10 -0700475}
476
Sanjay Patelc23cbef2020-04-12 09:17:59 -0400477bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
478 SmallVectorImpl<int> &ScaledMask) {
479 assert(Scale > 0 && "Unexpected scaling factor");
480
481 // Fast-path: if no scaling, then it is just a copy.
482 if (Scale == 1) {
483 ScaledMask.assign(Mask.begin(), Mask.end());
484 return true;
485 }
486
487 // We must map the original elements down evenly to a type with less elements.
488 int NumElts = Mask.size();
489 if (NumElts % Scale != 0)
490 return false;
491
492 ScaledMask.clear();
493 ScaledMask.reserve(NumElts / Scale);
494
495 // Step through the input mask by splitting into Scale-sized slices.
496 do {
497 ArrayRef<int> MaskSlice = Mask.take_front(Scale);
498 assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice.");
499
500 // The first element of the slice determines how we evaluate this slice.
501 int SliceFront = MaskSlice.front();
502 if (SliceFront < 0) {
503 // Negative values (undef or other "sentinel" values) must be equal across
504 // the entire slice.
Jakub Kuderski6fa87ec2022-08-23 11:36:12 -0400505 if (!all_equal(MaskSlice))
Sanjay Patelc23cbef2020-04-12 09:17:59 -0400506 return false;
507 ScaledMask.push_back(SliceFront);
508 } else {
509 // A positive mask element must be cleanly divisible.
510 if (SliceFront % Scale != 0)
511 return false;
512 // Elements of the slice must be consecutive.
513 for (int i = 1; i < Scale; ++i)
514 if (MaskSlice[i] != SliceFront + i)
515 return false;
516 ScaledMask.push_back(SliceFront / Scale);
517 }
518 Mask = Mask.drop_front(Scale);
519 } while (!Mask.empty());
520
521 assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask");
522
523 // All elements of the original mask can be scaled down to map to the elements
524 // of a mask with wider elements.
525 return true;
526}
527
Philip Reames24bb1802025-01-10 07:12:24 -0800528bool llvm::widenShuffleMaskElts(ArrayRef<int> M,
529 SmallVectorImpl<int> &NewMask) {
530 unsigned NumElts = M.size();
531 if (NumElts % 2 != 0)
532 return false;
533
534 NewMask.clear();
535 for (unsigned i = 0; i < NumElts; i += 2) {
536 int M0 = M[i];
537 int M1 = M[i + 1];
538
539 // If both elements are undef, new mask is undef too.
540 if (M0 == -1 && M1 == -1) {
541 NewMask.push_back(-1);
542 continue;
543 }
544
545 if (M0 == -1 && M1 != -1 && (M1 % 2) == 1) {
546 NewMask.push_back(M1 / 2);
547 continue;
548 }
549
550 if (M0 != -1 && (M0 % 2) == 0 && ((M0 + 1) == M1 || M1 == -1)) {
551 NewMask.push_back(M0 / 2);
552 continue;
553 }
554
555 NewMask.clear();
556 return false;
557 }
558
559 assert(NewMask.size() == NumElts / 2 && "Incorrect size for mask!");
560 return true;
561}
562
Simon Pilgrim5b4000d2024-06-26 10:43:58 +0100563bool llvm::scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask,
564 SmallVectorImpl<int> &ScaledMask) {
565 unsigned NumSrcElts = Mask.size();
566 assert(NumSrcElts > 0 && NumDstElts > 0 && "Unexpected scaling factor");
567
568 // Fast-path: if no scaling, then it is just a copy.
569 if (NumSrcElts == NumDstElts) {
570 ScaledMask.assign(Mask.begin(), Mask.end());
571 return true;
572 }
573
574 // Ensure we can find a whole scale factor.
575 assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
576 "Unexpected scaling factor");
577
578 if (NumSrcElts > NumDstElts) {
579 int Scale = NumSrcElts / NumDstElts;
580 return widenShuffleMaskElts(Scale, Mask, ScaledMask);
581 }
582
583 int Scale = NumDstElts / NumSrcElts;
584 narrowShuffleMaskElts(Scale, Mask, ScaledMask);
585 return true;
586}
587
Roman Lebedevf487dfd2022-12-26 00:41:12 +0300588void llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
589 SmallVectorImpl<int> &ScaledMask) {
590 std::array<SmallVector<int, 16>, 2> TmpMasks;
591 SmallVectorImpl<int> *Output = &TmpMasks[0], *Tmp = &TmpMasks[1];
592 ArrayRef<int> InputMask = Mask;
593 for (unsigned Scale = 2; Scale <= InputMask.size(); ++Scale) {
594 while (widenShuffleMaskElts(Scale, InputMask, *Output)) {
595 InputMask = *Output;
596 std::swap(Output, Tmp);
597 }
598 }
599 ScaledMask.assign(InputMask.begin(), InputMask.end());
600}
601
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800602void llvm::processShuffleMasks(
603 ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
604 unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
Alexey Bataev75e1cf42021-04-14 07:49:32 -0700605 function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
Alexey Bataevbab79202025-01-13 17:06:25 -0500606 function_ref<void(ArrayRef<int>, unsigned, unsigned, bool)>
607 ManyInputsAction) {
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800608 SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs);
609 // Try to perform better estimation of the permutation.
610 // 1. Split the source/destination vectors into real registers.
611 // 2. Do the mask analysis to identify which real registers are
612 // permuted.
613 int Sz = Mask.size();
614 unsigned SzDest = Sz / NumOfDestRegs;
615 unsigned SzSrc = Sz / NumOfSrcRegs;
616 for (unsigned I = 0; I < NumOfDestRegs; ++I) {
617 auto &RegMasks = Res[I];
Alexey Bataevb9aa1552024-12-06 12:27:00 -0500618 RegMasks.assign(2 * NumOfSrcRegs, {});
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800619 // Check that the values in dest registers are in the one src
620 // register.
621 for (unsigned K = 0; K < SzDest; ++K) {
622 int Idx = I * SzDest + K;
623 if (Idx == Sz)
624 break;
Alexey Bataevb9aa1552024-12-06 12:27:00 -0500625 if (Mask[Idx] >= 2 * Sz || Mask[Idx] == PoisonMaskElem)
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800626 continue;
Alexey Bataevb9aa1552024-12-06 12:27:00 -0500627 int MaskIdx = Mask[Idx] % Sz;
628 int SrcRegIdx = MaskIdx / SzSrc + (Mask[Idx] >= Sz ? NumOfSrcRegs : 0);
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800629 // Add a cost of PermuteTwoSrc for each new source register permute,
630 // if we have more than one source registers.
631 if (RegMasks[SrcRegIdx].empty())
ManuelJBritod22edb92023-04-27 16:22:57 +0100632 RegMasks[SrcRegIdx].assign(SzDest, PoisonMaskElem);
Alexey Bataevb9aa1552024-12-06 12:27:00 -0500633 RegMasks[SrcRegIdx][K] = MaskIdx % SzSrc;
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800634 }
635 }
636 // Process split mask.
Alexey Bataevb9aa1552024-12-06 12:27:00 -0500637 for (unsigned I : seq<unsigned>(NumOfUsedRegs)) {
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800638 auto &Dest = Res[I];
639 int NumSrcRegs =
640 count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
641 switch (NumSrcRegs) {
642 case 0:
643 // No input vectors were used!
644 NoInputAction();
645 break;
646 case 1: {
647 // Find the only mask with at least single undef mask elem.
648 auto *It =
649 find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
650 unsigned SrcReg = std::distance(Dest.begin(), It);
Alexey Bataev75e1cf42021-04-14 07:49:32 -0700651 SingleInputAction(*It, SrcReg, I);
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800652 break;
653 }
654 default: {
655 // The first mask is a permutation of a single register. Since we have >2
656 // input registers to shuffle, we merge the masks for 2 first registers
657 // and generate a shuffle of 2 registers rather than the reordering of the
658 // first register and then shuffle with the second register. Next,
659 // generate the shuffles of the resulting register + the remaining
660 // registers from the list.
661 auto &&CombineMasks = [](MutableArrayRef<int> FirstMask,
662 ArrayRef<int> SecondMask) {
663 for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) {
ManuelJBritod22edb92023-04-27 16:22:57 +0100664 if (SecondMask[Idx] != PoisonMaskElem) {
665 assert(FirstMask[Idx] == PoisonMaskElem &&
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800666 "Expected undefined mask element.");
667 FirstMask[Idx] = SecondMask[Idx] + VF;
668 }
669 }
670 };
671 auto &&NormalizeMask = [](MutableArrayRef<int> Mask) {
672 for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
ManuelJBritod22edb92023-04-27 16:22:57 +0100673 if (Mask[Idx] != PoisonMaskElem)
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800674 Mask[Idx] = Idx;
675 }
676 };
677 int SecondIdx;
Alexey Bataevbab79202025-01-13 17:06:25 -0500678 bool NewReg = true;
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800679 do {
680 int FirstIdx = -1;
681 SecondIdx = -1;
682 MutableArrayRef<int> FirstMask, SecondMask;
Alexey Bataevb9aa1552024-12-06 12:27:00 -0500683 for (unsigned I : seq<unsigned>(2 * NumOfSrcRegs)) {
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800684 SmallVectorImpl<int> &RegMask = Dest[I];
685 if (RegMask.empty())
686 continue;
687
688 if (FirstIdx == SecondIdx) {
689 FirstIdx = I;
690 FirstMask = RegMask;
691 continue;
692 }
693 SecondIdx = I;
694 SecondMask = RegMask;
695 CombineMasks(FirstMask, SecondMask);
Alexey Bataevbab79202025-01-13 17:06:25 -0500696 ManyInputsAction(FirstMask, FirstIdx, SecondIdx, NewReg);
697 NewReg = false;
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800698 NormalizeMask(FirstMask);
699 RegMask.clear();
700 SecondMask = FirstMask;
701 SecondIdx = FirstIdx;
702 }
703 if (FirstIdx != SecondIdx && SecondIdx >= 0) {
704 CombineMasks(SecondMask, FirstMask);
Alexey Bataevbab79202025-01-13 17:06:25 -0500705 ManyInputsAction(SecondMask, SecondIdx, FirstIdx, NewReg);
706 NewReg = false;
Alexey Bataev2cca53c2021-12-13 10:38:28 -0800707 Dest[FirstIdx].clear();
708 NormalizeMask(SecondMask);
709 }
710 } while (SecondIdx >= 0);
711 break;
712 }
713 }
714 }
715}
716
mskampb22fa902024-07-16 16:50:21 +0200717void llvm::getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth,
718 const APInt &DemandedElts,
719 APInt &DemandedLHS,
720 APInt &DemandedRHS) {
721 assert(VectorBitWidth >= 128 && "Vectors smaller than 128 bit not supported");
722 int NumLanes = VectorBitWidth / 128;
723 int NumElts = DemandedElts.getBitWidth();
724 int NumEltsPerLane = NumElts / NumLanes;
725 int HalfEltsPerLane = NumEltsPerLane / 2;
726
727 DemandedLHS = APInt::getZero(NumElts);
728 DemandedRHS = APInt::getZero(NumElts);
729
730 // Map DemandedElts to the horizontal operands.
731 for (int Idx = 0; Idx != NumElts; ++Idx) {
732 if (!DemandedElts[Idx])
733 continue;
734 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
735 int LocalIdx = Idx % NumEltsPerLane;
736 if (LocalIdx < HalfEltsPerLane) {
737 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx);
738 } else {
739 LocalIdx -= HalfEltsPerLane;
740 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx);
741 }
742 }
743}
744
Charlie Turner54336a52015-11-26 20:39:51 +0000745MapVector<Instruction *, uint64_t>
James Molloy45f67d52015-11-09 14:32:05 +0000746llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
747 const TargetTransformInfo *TTI) {
James Molloy55d633b2015-10-12 12:34:45 +0000748
749 // DemandedBits will give us every value's live-out bits. But we want
750 // to ensure no extra casts would need to be inserted, so every DAG
751 // of connected values must have the same minimum bitwidth.
James Molloy45f67d52015-11-09 14:32:05 +0000752 EquivalenceClasses<Value *> ECs;
753 SmallVector<Value *, 16> Worklist;
754 SmallPtrSet<Value *, 4> Roots;
755 SmallPtrSet<Value *, 16> Visited;
756 DenseMap<Value *, uint64_t> DBits;
757 SmallPtrSet<Instruction *, 4> InstructionSet;
Charlie Turner54336a52015-11-26 20:39:51 +0000758 MapVector<Instruction *, uint64_t> MinBWs;
James Molloy45f67d52015-11-09 14:32:05 +0000759
James Molloy55d633b2015-10-12 12:34:45 +0000760 // Determine the roots. We work bottom-up, from truncs or icmps.
761 bool SeenExtFromIllegalType = false;
762 for (auto *BB : Blocks)
763 for (auto &I : *BB) {
764 InstructionSet.insert(&I);
765
766 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
767 !TTI->isTypeLegal(I.getOperand(0)->getType()))
768 SeenExtFromIllegalType = true;
James Molloy45f67d52015-11-09 14:32:05 +0000769
James Molloy55d633b2015-10-12 12:34:45 +0000770 // Only deal with non-vector integers up to 64-bits wide.
771 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
772 !I.getType()->isVectorTy() &&
773 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
774 // Don't make work for ourselves. If we know the loaded type is legal,
775 // don't add it to the worklist.
776 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
777 continue;
James Molloy45f67d52015-11-09 14:32:05 +0000778
James Molloy55d633b2015-10-12 12:34:45 +0000779 Worklist.push_back(&I);
780 Roots.insert(&I);
781 }
782 }
783 // Early exit.
784 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
785 return MinBWs;
James Molloy45f67d52015-11-09 14:32:05 +0000786
James Molloy55d633b2015-10-12 12:34:45 +0000787 // Now proceed breadth-first, unioning values together.
788 while (!Worklist.empty()) {
789 Value *Val = Worklist.pop_back_val();
790 Value *Leader = ECs.getOrInsertLeaderValue(Val);
James Molloy45f67d52015-11-09 14:32:05 +0000791
Kazu Hiratab254d672022-06-18 08:32:54 -0700792 if (!Visited.insert(Val).second)
James Molloy55d633b2015-10-12 12:34:45 +0000793 continue;
James Molloy55d633b2015-10-12 12:34:45 +0000794
795 // Non-instructions terminate a chain successfully.
796 if (!isa<Instruction>(Val))
797 continue;
798 Instruction *I = cast<Instruction>(Val);
799
800 // If we encounter a type that is larger than 64 bits, we can't represent
801 // it so bail out.
James Molloyaa1d6382016-05-10 12:27:23 +0000802 if (DB.getDemandedBits(I).getBitWidth() > 64)
Charlie Turner54336a52015-11-26 20:39:51 +0000803 return MapVector<Instruction *, uint64_t>();
James Molloy45f67d52015-11-09 14:32:05 +0000804
James Molloyaa1d6382016-05-10 12:27:23 +0000805 uint64_t V = DB.getDemandedBits(I).getZExtValue();
806 DBits[Leader] |= V;
807 DBits[I] = V;
James Molloy45f67d52015-11-09 14:32:05 +0000808
James Molloy55d633b2015-10-12 12:34:45 +0000809 // Casts, loads and instructions outside of our range terminate a chain
810 // successfully.
811 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
812 !InstructionSet.count(I))
813 continue;
814
815 // Unsafe casts terminate a chain unsuccessfully. We can't do anything
816 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
817 // transform anything that relies on them.
818 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
819 !I->getType()->isIntegerTy()) {
820 DBits[Leader] |= ~0ULL;
821 continue;
822 }
823
824 // We don't modify the types of PHIs. Reductions will already have been
825 // truncated if possible, and inductions' sizes will have been chosen by
826 // indvars.
827 if (isa<PHINode>(I))
828 continue;
829
Ramkumar Ramachandra49842422025-04-29 09:47:38 +0100830 // Don't modify the types of operands of a call, as doing that would cause a
831 // signature mismatch.
832 if (isa<CallBase>(I))
833 continue;
834
James Molloy55d633b2015-10-12 12:34:45 +0000835 if (DBits[Leader] == ~0ULL)
836 // All bits demanded, no point continuing.
837 continue;
838
839 for (Value *O : cast<User>(I)->operands()) {
840 ECs.unionSets(Leader, O);
841 Worklist.push_back(O);
842 }
843 }
844
845 // Now we've discovered all values, walk them to see if there are
846 // any users we didn't see. If there are, we can't optimize that
847 // chain.
848 for (auto &I : DBits)
849 for (auto *U : I.first->users())
850 if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
851 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
James Molloy45f67d52015-11-09 14:32:05 +0000852
Florian Hahn9e5bfbf2025-04-01 09:28:33 +0100853 for (const auto &E : ECs) {
Florian Hahn3bdf9a02025-04-02 20:27:43 +0100854 if (!E->isLeader())
Florian Hahn9e5bfbf2025-04-01 09:28:33 +0100855 continue;
James Molloy55d633b2015-10-12 12:34:45 +0000856 uint64_t LeaderDemandedBits = 0;
Ramkumar Ramachandrafd6260f2025-04-04 14:34:08 +0100857 for (Value *M : ECs.members(*E))
Kazu Hirata896d0e12021-02-22 20:17:18 -0800858 LeaderDemandedBits |= DBits[M];
James Molloy55d633b2015-10-12 12:34:45 +0000859
Kazu Hirata02a52b72023-01-28 15:04:20 -0800860 uint64_t MinBW = llvm::bit_width(LeaderDemandedBits);
James Molloy55d633b2015-10-12 12:34:45 +0000861 // Round up to a power of 2
Kazu Hirata526966d2023-01-28 16:13:09 -0800862 MinBW = llvm::bit_ceil(MinBW);
James Molloy8e46cd02016-03-30 10:11:43 +0000863
864 // We don't modify the types of PHIs. Reductions will already have been
865 // truncated if possible, and inductions' sizes will have been chosen by
866 // indvars.
867 // If we are required to shrink a PHI, abandon this entire equivalence class.
868 bool Abort = false;
Ramkumar Ramachandrafd6260f2025-04-04 14:34:08 +0100869 for (Value *M : ECs.members(*E))
Kazu Hirata896d0e12021-02-22 20:17:18 -0800870 if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
James Molloy8e46cd02016-03-30 10:11:43 +0000871 Abort = true;
872 break;
873 }
874 if (Abort)
875 continue;
876
Ramkumar Ramachandrafd6260f2025-04-04 14:34:08 +0100877 for (Value *M : ECs.members(*E)) {
Florian Hahnd7e79bd2023-07-11 20:18:55 +0100878 auto *MI = dyn_cast<Instruction>(M);
879 if (!MI)
James Molloy55d633b2015-10-12 12:34:45 +0000880 continue;
Kazu Hirata896d0e12021-02-22 20:17:18 -0800881 Type *Ty = M->getType();
882 if (Roots.count(M))
Florian Hahnd7e79bd2023-07-11 20:18:55 +0100883 Ty = MI->getOperand(0)->getType();
884
885 if (MinBW >= Ty->getScalarSizeInBits())
886 continue;
887
888 // If any of M's operands demand more bits than MinBW then M cannot be
889 // performed safely in MinBW.
Ramkumar Ramachandra49842422025-04-29 09:47:38 +0100890 auto *Call = dyn_cast<CallBase>(MI);
891 auto Ops = Call ? Call->args() : MI->operands();
892 if (any_of(Ops, [&DB, MinBW](Use &U) {
Florian Hahnd7e79bd2023-07-11 20:18:55 +0100893 auto *CI = dyn_cast<ConstantInt>(U);
894 // For constants shift amounts, check if the shift would result in
895 // poison.
896 if (CI &&
897 isa<ShlOperator, LShrOperator, AShrOperator>(U.getUser()) &&
898 U.getOperandNo() == 1)
899 return CI->uge(MinBW);
900 uint64_t BW = bit_width(DB.getDemandedBits(&U).getZExtValue());
901 return bit_ceil(BW) > MinBW;
902 }))
903 continue;
904
905 MinBWs[MI] = MinBW;
James Molloy55d633b2015-10-12 12:34:45 +0000906 }
907 }
908
909 return MinBWs;
910}
Matt Arsenault727e2792016-06-30 21:17:59 +0000911
Michael Kruse978ba612018-12-20 04:58:07 +0000912/// Add all access groups in @p AccGroups to @p List.
913template <typename ListT>
914static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
915 // Interpret an access group as a list containing itself.
916 if (AccGroups->getNumOperands() == 0) {
917 assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
918 List.insert(AccGroups);
919 return;
920 }
921
Kazu Hirata601b3a12022-07-16 23:26:34 -0700922 for (const auto &AccGroupListOp : AccGroups->operands()) {
Michael Kruse978ba612018-12-20 04:58:07 +0000923 auto *Item = cast<MDNode>(AccGroupListOp.get());
924 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
925 List.insert(Item);
926 }
Clement Courbetd4bd3eb2018-12-20 09:20:07 +0000927}
Michael Kruse978ba612018-12-20 04:58:07 +0000928
929MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
930 if (!AccGroups1)
931 return AccGroups2;
932 if (!AccGroups2)
933 return AccGroups1;
934 if (AccGroups1 == AccGroups2)
935 return AccGroups1;
936
937 SmallSetVector<Metadata *, 4> Union;
938 addToAccessGroupList(Union, AccGroups1);
939 addToAccessGroupList(Union, AccGroups2);
940
941 if (Union.size() == 0)
942 return nullptr;
943 if (Union.size() == 1)
944 return cast<MDNode>(Union.front());
945
946 LLVMContext &Ctx = AccGroups1->getContext();
947 return MDNode::get(Ctx, Union.getArrayRef());
948}
949
950MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
951 const Instruction *Inst2) {
952 bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
953 bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
954
955 if (!MayAccessMem1 && !MayAccessMem2)
956 return nullptr;
957 if (!MayAccessMem1)
958 return Inst2->getMetadata(LLVMContext::MD_access_group);
959 if (!MayAccessMem2)
960 return Inst1->getMetadata(LLVMContext::MD_access_group);
961
962 MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
963 MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
964 if (!MD1 || !MD2)
965 return nullptr;
966 if (MD1 == MD2)
967 return MD1;
968
969 // Use set for scalable 'contains' check.
970 SmallPtrSet<Metadata *, 4> AccGroupSet2;
971 addToAccessGroupList(AccGroupSet2, MD2);
972
973 SmallVector<Metadata *, 4> Intersection;
974 if (MD1->getNumOperands() == 0) {
975 assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
976 if (AccGroupSet2.count(MD1))
977 Intersection.push_back(MD1);
978 } else {
979 for (const MDOperand &Node : MD1->operands()) {
980 auto *Item = cast<MDNode>(Node.get());
981 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
982 if (AccGroupSet2.count(Item))
983 Intersection.push_back(Item);
984 }
985 }
986
987 if (Intersection.size() == 0)
988 return nullptr;
989 if (Intersection.size() == 1)
990 return cast<MDNode>(Intersection.front());
991
992 LLVMContext &Ctx = Inst1->getContext();
993 return MDNode::get(Ctx, Intersection);
994}
995
Florian Hahn0fd81e52025-04-09 21:35:46 +0100996/// Add metadata from \p Inst to \p Metadata, if it can be preserved after
997/// vectorization.
Florian Hahn7cbf78e2025-04-09 22:03:43 +0100998void llvm::getMetadataToPropagate(
Florian Hahn0fd81e52025-04-09 21:35:46 +0100999 Instruction *Inst,
1000 SmallVectorImpl<std::pair<unsigned, MDNode *>> &Metadata) {
1001 Inst->getAllMetadataOtherThanDebugLoc(Metadata);
1002 static const unsigned SupportedIDs[] = {
1003 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
1004 LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
1005 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
1006 LLVMContext::MD_access_group, LLVMContext::MD_mmra};
1007
1008 // Remove any unsupported metadata kinds from Metadata.
1009 for (unsigned Idx = 0; Idx != Metadata.size();) {
1010 if (is_contained(SupportedIDs, Metadata[Idx].first)) {
1011 ++Idx;
1012 } else {
1013 // Swap element to end and remove it.
1014 std::swap(Metadata[Idx], Metadata.back());
1015 Metadata.pop_back();
1016 }
1017 }
1018}
1019
Matt Arsenault727e2792016-06-30 21:17:59 +00001020/// \returns \p I after propagating metadata from \p VL.
1021Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
Krzysztof Parzyszek50cf0a12021-05-07 12:52:20 -05001022 if (VL.empty())
1023 return Inst;
Florian Hahn0fd81e52025-04-09 21:35:46 +01001024 SmallVector<std::pair<unsigned, MDNode *>> Metadata;
1025 getMetadataToPropagate(cast<Instruction>(VL[0]), Metadata);
Matt Arsenault727e2792016-06-30 21:17:59 +00001026
Florian Hahn0fd81e52025-04-09 21:35:46 +01001027 for (auto &[Kind, MD] : Metadata) {
Matt Arsenault727e2792016-06-30 21:17:59 +00001028 for (int J = 1, E = VL.size(); MD && J != E; ++J) {
1029 const Instruction *IJ = cast<Instruction>(VL[J]);
1030 MDNode *IMD = IJ->getMetadata(Kind);
Pierre van Houtryvecf328ff2024-04-24 08:52:25 +02001031
Matt Arsenault727e2792016-06-30 21:17:59 +00001032 switch (Kind) {
Pierre van Houtryvecf328ff2024-04-24 08:52:25 +02001033 case LLVMContext::MD_mmra: {
1034 MD = MMRAMetadata::combine(Inst->getContext(), MD, IMD);
1035 break;
1036 }
Matt Arsenault727e2792016-06-30 21:17:59 +00001037 case LLVMContext::MD_tbaa:
1038 MD = MDNode::getMostGenericTBAA(MD, IMD);
1039 break;
1040 case LLVMContext::MD_alias_scope:
1041 MD = MDNode::getMostGenericAliasScope(MD, IMD);
1042 break;
Matt Arsenault727e2792016-06-30 21:17:59 +00001043 case LLVMContext::MD_fpmath:
1044 MD = MDNode::getMostGenericFPMath(MD, IMD);
1045 break;
Justin Lebar11a32042016-09-11 01:39:08 +00001046 case LLVMContext::MD_noalias:
Matt Arsenault727e2792016-06-30 21:17:59 +00001047 case LLVMContext::MD_nontemporal:
Justin Lebar11a32042016-09-11 01:39:08 +00001048 case LLVMContext::MD_invariant_load:
Matt Arsenault727e2792016-06-30 21:17:59 +00001049 MD = MDNode::intersect(MD, IMD);
1050 break;
Michael Kruse978ba612018-12-20 04:58:07 +00001051 case LLVMContext::MD_access_group:
1052 MD = intersectAccessGroups(Inst, IJ);
1053 break;
Matt Arsenault727e2792016-06-30 21:17:59 +00001054 default:
1055 llvm_unreachable("unhandled metadata");
1056 }
1057 }
1058
1059 Inst->setMetadata(Kind, MD);
1060 }
1061
1062 return Inst;
1063}
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001064
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001065Constant *
Nikita Popovf37e8992020-02-17 21:59:46 +01001066llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001067 const InterleaveGroup<Instruction> &Group) {
Dorit Nuzman34da6dd2018-10-31 09:57:56 +00001068 // All 1's means mask is not needed.
1069 if (Group.getNumMembers() == Group.getFactor())
1070 return nullptr;
1071
1072 // TODO: support reversed access.
1073 assert(!Group.isReverse() && "Reversed group not supported.");
1074
1075 SmallVector<Constant *, 16> Mask;
1076 for (unsigned i = 0; i < VF; i++)
1077 for (unsigned j = 0; j < Group.getFactor(); ++j) {
1078 unsigned HasMember = Group.getMember(j) ? 1 : 0;
1079 Mask.push_back(Builder.getInt1(HasMember));
1080 }
1081
1082 return ConstantVector::get(Mask);
1083}
1084
Benjamin Kramer166467e2020-04-17 15:28:00 +02001085llvm::SmallVector<int, 16>
1086llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) {
1087 SmallVector<int, 16> MaskVec;
Dorit Nuzman38bbf812018-10-14 08:50:06 +00001088 for (unsigned i = 0; i < VF; i++)
1089 for (unsigned j = 0; j < ReplicationFactor; j++)
Benjamin Kramer166467e2020-04-17 15:28:00 +02001090 MaskVec.push_back(i);
Dorit Nuzman38bbf812018-10-14 08:50:06 +00001091
Benjamin Kramer166467e2020-04-17 15:28:00 +02001092 return MaskVec;
Dorit Nuzman38bbf812018-10-14 08:50:06 +00001093}
1094
Benjamin Kramer166467e2020-04-17 15:28:00 +02001095llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
1096 unsigned NumVecs) {
1097 SmallVector<int, 16> Mask;
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001098 for (unsigned i = 0; i < VF; i++)
1099 for (unsigned j = 0; j < NumVecs; j++)
Benjamin Kramer166467e2020-04-17 15:28:00 +02001100 Mask.push_back(j * VF + i);
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001101
Benjamin Kramer166467e2020-04-17 15:28:00 +02001102 return Mask;
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001103}
1104
Benjamin Kramer166467e2020-04-17 15:28:00 +02001105llvm::SmallVector<int, 16>
1106llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) {
1107 SmallVector<int, 16> Mask;
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001108 for (unsigned i = 0; i < VF; i++)
Benjamin Kramer166467e2020-04-17 15:28:00 +02001109 Mask.push_back(Start + i * Stride);
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001110
Benjamin Kramer166467e2020-04-17 15:28:00 +02001111 return Mask;
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001112}
1113
Benjamin Kramer166467e2020-04-17 15:28:00 +02001114llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
1115 unsigned NumInts,
1116 unsigned NumUndefs) {
1117 SmallVector<int, 16> Mask;
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001118 for (unsigned i = 0; i < NumInts; i++)
Benjamin Kramer166467e2020-04-17 15:28:00 +02001119 Mask.push_back(Start + i);
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001120
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001121 for (unsigned i = 0; i < NumUndefs; i++)
Benjamin Kramer166467e2020-04-17 15:28:00 +02001122 Mask.push_back(-1);
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001123
Benjamin Kramer166467e2020-04-17 15:28:00 +02001124 return Mask;
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001125}
1126
Sanjay Patel2a3cc4d2021-10-18 08:43:01 -04001127llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask,
1128 unsigned NumElts) {
1129 // Avoid casts in the loop and make sure we have a reasonable number.
1130 int NumEltsSigned = NumElts;
1131 assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count");
1132
1133 // If the mask chooses an element from operand 1, reduce it to choose from the
1134 // corresponding element of operand 0. Undef mask elements are unchanged.
1135 SmallVector<int, 16> UnaryMask;
1136 for (int MaskElt : Mask) {
1137 assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask");
1138 int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt;
1139 UnaryMask.push_back(UnaryElt);
1140 }
1141 return UnaryMask;
1142}
1143
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001144/// A helper function for concatenating vectors. This function concatenates two
1145/// vectors having the same element type. If the second vector has fewer
1146/// elements than the first, it is padded with undefs.
Nikita Popovf37e8992020-02-17 21:59:46 +01001147static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001148 Value *V2) {
1149 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
1150 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
1151 assert(VecTy1 && VecTy2 &&
1152 VecTy1->getScalarType() == VecTy2->getScalarType() &&
1153 "Expect two vectors with the same element type");
1154
Christopher Tetreault23c5e592020-07-22 14:36:48 -07001155 unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
1156 unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001157 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
1158
1159 if (NumElts1 > NumElts2) {
1160 // Extend with UNDEFs.
Benjamin Kramer166467e2020-04-17 15:28:00 +02001161 V2 = Builder.CreateShuffleVector(
Juneyoung Lee9b296102020-12-30 07:28:17 +09001162 V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001163 }
1164
Benjamin Kramer166467e2020-04-17 15:28:00 +02001165 return Builder.CreateShuffleVector(
1166 V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0));
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001167}
1168
Nikita Popovf37e8992020-02-17 21:59:46 +01001169Value *llvm::concatenateVectors(IRBuilderBase &Builder,
1170 ArrayRef<Value *> Vecs) {
Matthew Simpsonba5cf9d2017-02-01 17:45:46 +00001171 unsigned NumVecs = Vecs.size();
1172 assert(NumVecs > 1 && "Should be at least two vectors");
1173
1174 SmallVector<Value *, 8> ResList;
1175 ResList.append(Vecs.begin(), Vecs.end());
1176 do {
1177 SmallVector<Value *, 8> TmpList;
1178 for (unsigned i = 0; i < NumVecs - 1; i += 2) {
1179 Value *V0 = ResList[i], *V1 = ResList[i + 1];
1180 assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
1181 "Only the last vector may have a different type");
1182
1183 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
1184 }
1185
1186 // Push the last vector if the total number of vectors is odd.
1187 if (NumVecs % 2 != 0)
1188 TmpList.push_back(ResList[NumVecs - 1]);
1189
1190 ResList = TmpList;
1191 NumVecs = ResList.size();
1192 } while (NumVecs > 1);
1193
1194 return ResList[0];
1195}
Florian Hahn1086ce22018-09-12 08:01:57 +00001196
Philip Reames88cd69b2019-04-25 02:30:17 +00001197bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
Christopher Tetreault7ddfd9b2020-09-10 11:29:16 -07001198 assert(isa<VectorType>(Mask->getType()) &&
1199 isa<IntegerType>(Mask->getType()->getScalarType()) &&
1200 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1201 1 &&
1202 "Mask must be a vector of i1");
1203
Philip Reames88cd69b2019-04-25 02:30:17 +00001204 auto *ConstMask = dyn_cast<Constant>(Mask);
1205 if (!ConstMask)
1206 return false;
1207 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
1208 return true;
Christopher Tetreault7ddfd9b2020-09-10 11:29:16 -07001209 if (isa<ScalableVectorType>(ConstMask->getType()))
1210 return false;
Christopher Tetreault23c5e592020-07-22 14:36:48 -07001211 for (unsigned
1212 I = 0,
1213 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
Christopher Tetreaultb96558f2020-04-09 12:19:23 -07001214 I != E; ++I) {
Philip Reames88cd69b2019-04-25 02:30:17 +00001215 if (auto *MaskElt = ConstMask->getAggregateElement(I))
1216 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
1217 continue;
1218 return false;
1219 }
1220 return true;
1221}
1222
Philip Reames88cd69b2019-04-25 02:30:17 +00001223bool llvm::maskIsAllOneOrUndef(Value *Mask) {
Christopher Tetreault7ddfd9b2020-09-10 11:29:16 -07001224 assert(isa<VectorType>(Mask->getType()) &&
1225 isa<IntegerType>(Mask->getType()->getScalarType()) &&
1226 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1227 1 &&
1228 "Mask must be a vector of i1");
1229
Philip Reames88cd69b2019-04-25 02:30:17 +00001230 auto *ConstMask = dyn_cast<Constant>(Mask);
1231 if (!ConstMask)
1232 return false;
1233 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1234 return true;
Christopher Tetreault7ddfd9b2020-09-10 11:29:16 -07001235 if (isa<ScalableVectorType>(ConstMask->getType()))
1236 return false;
Christopher Tetreault23c5e592020-07-22 14:36:48 -07001237 for (unsigned
1238 I = 0,
1239 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
Christopher Tetreaultb96558f2020-04-09 12:19:23 -07001240 I != E; ++I) {
Philip Reames88cd69b2019-04-25 02:30:17 +00001241 if (auto *MaskElt = ConstMask->getAggregateElement(I))
1242 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1243 continue;
1244 return false;
1245 }
1246 return true;
1247}
1248
Yingwei Zhenga1a590e2024-03-05 22:34:04 +08001249bool llvm::maskContainsAllOneOrUndef(Value *Mask) {
1250 assert(isa<VectorType>(Mask->getType()) &&
1251 isa<IntegerType>(Mask->getType()->getScalarType()) &&
1252 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1253 1 &&
1254 "Mask must be a vector of i1");
1255
1256 auto *ConstMask = dyn_cast<Constant>(Mask);
1257 if (!ConstMask)
1258 return false;
1259 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1260 return true;
1261 if (isa<ScalableVectorType>(ConstMask->getType()))
1262 return false;
1263 for (unsigned
1264 I = 0,
1265 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1266 I != E; ++I) {
1267 if (auto *MaskElt = ConstMask->getAggregateElement(I))
1268 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1269 return true;
1270 }
1271 return false;
1272}
1273
Philip Reames88cd69b2019-04-25 02:30:17 +00001274/// TODO: This is a lot like known bits, but for
1275/// vectors. Is there something we can common this with?
1276APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
Christopher Tetreault7ddfd9b2020-09-10 11:29:16 -07001277 assert(isa<FixedVectorType>(Mask->getType()) &&
1278 isa<IntegerType>(Mask->getType()->getScalarType()) &&
1279 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1280 1 &&
1281 "Mask must be a fixed width vector of i1");
Philip Reames88cd69b2019-04-25 02:30:17 +00001282
Christopher Tetreault23c5e592020-07-22 14:36:48 -07001283 const unsigned VWidth =
1284 cast<FixedVectorType>(Mask->getType())->getNumElements();
Chris Lattner735f4672021-09-08 22:13:13 -07001285 APInt DemandedElts = APInt::getAllOnes(VWidth);
Philip Reames88cd69b2019-04-25 02:30:17 +00001286 if (auto *CV = dyn_cast<ConstantVector>(Mask))
1287 for (unsigned i = 0; i < VWidth; i++)
1288 if (CV->getAggregateElement(i)->isNullValue())
1289 DemandedElts.clearBit(i);
1290 return DemandedElts;
1291}
1292
Florian Hahn1086ce22018-09-12 08:01:57 +00001293bool InterleavedAccessInfo::isStrided(int Stride) {
1294 unsigned Factor = std::abs(Stride);
1295 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1296}
1297
1298void InterleavedAccessInfo::collectConstStrideAccesses(
1299 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
Philip Reamese41dce42023-05-11 09:47:37 -07001300 const DenseMap<Value*, const SCEV*> &Strides) {
Nikita Popov2d209d92024-06-27 16:38:15 +02001301 auto &DL = TheLoop->getHeader()->getDataLayout();
Florian Hahn1086ce22018-09-12 08:01:57 +00001302
1303 // Since it's desired that the load/store instructions be maintained in
1304 // "program order" for the interleaved access analysis, we have to visit the
1305 // blocks in the loop in reverse postorder (i.e., in a topological order).
1306 // Such an ordering will ensure that any load/store that may be executed
1307 // before a second load/store will precede the second load/store in
1308 // AccessStrideInfo.
1309 LoopBlocksDFS DFS(TheLoop);
1310 DFS.perform(LI);
1311 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
1312 for (auto &I : *BB) {
Florian Hahn1086ce22018-09-12 08:01:57 +00001313 Value *Ptr = getLoadStorePointerOperand(&I);
Arthur Eubankscc64ece2021-05-14 14:01:05 -07001314 if (!Ptr)
1315 continue;
1316 Type *ElementTy = getLoadStoreType(&I);
1317
Florian Hahn758699c2022-11-13 22:05:37 +00001318 // Currently, codegen doesn't support cases where the type size doesn't
1319 // match the alloc size. Skip them for now.
1320 uint64_t Size = DL.getTypeAllocSize(ElementTy);
1321 if (Size * 8 != DL.getTypeSizeInBits(ElementTy))
1322 continue;
1323
Florian Hahn1086ce22018-09-12 08:01:57 +00001324 // We don't check wrapping here because we don't know yet if Ptr will be
1325 // part of a full group or a group with gaps. Checking wrapping for all
1326 // pointers (even those that end up in groups with no gaps) will be overly
1327 // conservative. For full groups, wrapping should be ok since if we would
1328 // wrap around the address space we would do a memory access at nullptr
1329 // even without the transformation. The wrapping checks are therefore
1330 // deferred until after we've formed the interleaved groups.
Philip Reamesf6d110e2022-09-27 15:55:44 -07001331 int64_t Stride =
1332 getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides,
1333 /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0);
Florian Hahn1086ce22018-09-12 08:01:57 +00001334
1335 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
Nikita Popov52e98f62020-05-17 22:14:42 +02001336 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
1337 getLoadStoreAlignment(&I));
Florian Hahn1086ce22018-09-12 08:01:57 +00001338 }
1339}
1340
1341// Analyze interleaved accesses and collect them into interleaved load and
1342// store groups.
1343//
1344// When generating code for an interleaved load group, we effectively hoist all
1345// loads in the group to the location of the first load in program order. When
1346// generating code for an interleaved store group, we sink all stores to the
1347// location of the last store. This code motion can change the order of load
1348// and store instructions and may break dependences.
1349//
1350// The code generation strategy mentioned above ensures that we won't violate
1351// any write-after-read (WAR) dependences.
1352//
1353// E.g., for the WAR dependence: a = A[i]; // (1)
1354// A[i] = b; // (2)
1355//
1356// The store group of (2) is always inserted at or below (2), and the load
1357// group of (1) is always inserted at or above (1). Thus, the instructions will
1358// never be reordered. All other dependences are checked to ensure the
1359// correctness of the instruction reordering.
1360//
1361// The algorithm visits all memory accesses in the loop in bottom-up program
1362// order. Program order is established by traversing the blocks in the loop in
1363// reverse postorder when collecting the accesses.
1364//
1365// We visit the memory accesses in bottom-up order because it can simplify the
1366// construction of store groups in the presence of write-after-write (WAW)
1367// dependences.
1368//
1369// E.g., for the WAW dependence: A[i] = a; // (1)
1370// A[i] = b; // (2)
1371// A[i + 1] = c; // (3)
1372//
1373// We will first create a store group with (3) and (2). (1) can't be added to
1374// this group because it and (2) are dependent. However, (1) can be grouped
1375// with other accesses that may precede it in program order. Note that a
1376// bottom-up order does not imply that WAW dependences should not be checked.
Dorit Nuzman38bbf812018-10-14 08:50:06 +00001377void InterleavedAccessInfo::analyzeInterleaving(
1378 bool EnablePredicatedInterleavedMemAccesses) {
Florian Hahn1086ce22018-09-12 08:01:57 +00001379 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
Philip Reamese41dce42023-05-11 09:47:37 -07001380 const auto &Strides = LAI->getSymbolicStrides();
Florian Hahn1086ce22018-09-12 08:01:57 +00001381
1382 // Holds all accesses with a constant stride.
1383 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
1384 collectConstStrideAccesses(AccessStrideInfo, Strides);
1385
1386 if (AccessStrideInfo.empty())
1387 return;
1388
1389 // Collect the dependences in the loop.
1390 collectDependences();
1391
1392 // Holds all interleaved store groups temporarily.
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001393 SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
Florian Hahn1086ce22018-09-12 08:01:57 +00001394 // Holds all interleaved load groups temporarily.
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001395 SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
Florian Hahn4d847bf2023-07-07 11:06:30 +01001396 // Groups added to this set cannot have new members added.
1397 SmallPtrSet<InterleaveGroup<Instruction> *, 4> CompletedLoadGroups;
Florian Hahn1086ce22018-09-12 08:01:57 +00001398
1399 // Search in bottom-up program order for pairs of accesses (A and B) that can
1400 // form interleaved load or store groups. In the algorithm below, access A
1401 // precedes access B in program order. We initialize a group for B in the
1402 // outer loop of the algorithm, and then in the inner loop, we attempt to
1403 // insert each A into B's group if:
1404 //
1405 // 1. A and B have the same stride,
1406 // 2. A and B have the same memory object size, and
1407 // 3. A belongs in B's group according to its distance from B.
1408 //
1409 // Special care is taken to ensure group formation will not break any
1410 // dependences.
1411 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
1412 BI != E; ++BI) {
1413 Instruction *B = BI->first;
1414 StrideDescriptor DesB = BI->second;
1415
1416 // Initialize a group for B if it has an allowable stride. Even if we don't
1417 // create a group for B, we continue with the bottom-up algorithm to ensure
1418 // we don't break any of B's dependences.
Anna Thomas9675e3f2023-07-14 16:24:04 -04001419 InterleaveGroup<Instruction> *GroupB = nullptr;
Jim Lin466f8842020-02-18 10:48:38 +08001420 if (isStrided(DesB.Stride) &&
Dorit Nuzman38bbf812018-10-14 08:50:06 +00001421 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
Anna Thomas9675e3f2023-07-14 16:24:04 -04001422 GroupB = getInterleaveGroup(B);
1423 if (!GroupB) {
Florian Hahn1086ce22018-09-12 08:01:57 +00001424 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1425 << '\n');
Anna Thomas9675e3f2023-07-14 16:24:04 -04001426 GroupB = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
Anna Thomas3cf24db2023-07-26 15:08:06 -04001427 if (B->mayWriteToMemory())
1428 StoreGroups.insert(GroupB);
1429 else
1430 LoadGroups.insert(GroupB);
Florian Hahn1086ce22018-09-12 08:01:57 +00001431 }
Florian Hahn1086ce22018-09-12 08:01:57 +00001432 }
1433
1434 for (auto AI = std::next(BI); AI != E; ++AI) {
1435 Instruction *A = AI->first;
1436 StrideDescriptor DesA = AI->second;
1437
1438 // Our code motion strategy implies that we can't have dependences
1439 // between accesses in an interleaved group and other accesses located
1440 // between the first and last member of the group. Note that this also
1441 // means that a group can't have more than one member at a given offset.
1442 // The accesses in a group can have dependences with other accesses, but
1443 // we must ensure we don't extend the boundaries of the group such that
1444 // we encompass those dependent accesses.
1445 //
1446 // For example, assume we have the sequence of accesses shown below in a
1447 // stride-2 loop:
1448 //
1449 // (1, 2) is a group | A[i] = a; // (1)
1450 // | A[i-1] = b; // (2) |
1451 // A[i-3] = c; // (3)
1452 // A[i] = d; // (4) | (2, 4) is not a group
1453 //
1454 // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1455 // but not with (4). If we did, the dependent access (3) would be within
1456 // the boundaries of the (2, 4) group.
Anna Thomas3cf24db2023-07-26 15:08:06 -04001457 auto DependentMember = [&](InterleaveGroup<Instruction> *Group,
1458 StrideEntry *A) -> Instruction * {
1459 for (uint32_t Index = 0; Index < Group->getFactor(); ++Index) {
1460 Instruction *MemberOfGroupB = Group->getMember(Index);
1461 if (MemberOfGroupB && !canReorderMemAccessesForInterleavedGroups(
1462 A, &*AccessStrideInfo.find(MemberOfGroupB)))
1463 return MemberOfGroupB;
Florian Hahn4d847bf2023-07-07 11:06:30 +01001464 }
Anna Thomas3cf24db2023-07-26 15:08:06 -04001465 return nullptr;
1466 };
Anna Thomase85fd3c2023-07-26 15:05:13 -04001467
Anna Thomas3cf24db2023-07-26 15:08:06 -04001468 auto GroupA = getInterleaveGroup(A);
1469 // If A is a load, dependencies are tolerable, there's nothing to do here.
1470 // If both A and B belong to the same (store) group, they are independent,
1471 // even if dependencies have not been recorded.
1472 // If both GroupA and GroupB are null, there's nothing to do here.
1473 if (A->mayWriteToMemory() && GroupA != GroupB) {
1474 Instruction *DependentInst = nullptr;
1475 // If GroupB is a load group, we have to compare AI against all
1476 // members of GroupB because if any load within GroupB has a dependency
1477 // on AI, we need to mark GroupB as complete and also release the
1478 // store GroupA (if A belongs to one). The former prevents incorrect
1479 // hoisting of load B above store A while the latter prevents incorrect
1480 // sinking of store A below load B.
1481 if (GroupB && LoadGroups.contains(GroupB))
1482 DependentInst = DependentMember(GroupB, &*AI);
1483 else if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI))
1484 DependentInst = B;
1485
1486 if (DependentInst) {
1487 // A has a store dependence on B (or on some load within GroupB) and
1488 // is part of a store group. Release A's group to prevent illegal
1489 // sinking of A below B. A will then be free to form another group
1490 // with instructions that precede it.
1491 if (GroupA && StoreGroups.contains(GroupA)) {
1492 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1493 "dependence between "
1494 << *A << " and " << *DependentInst << '\n');
1495 StoreGroups.remove(GroupA);
1496 releaseGroup(GroupA);
1497 }
1498 // If B is a load and part of an interleave group, no earlier loads
1499 // can be added to B's interleave group, because this would mean the
1500 // DependentInst would move across store A. Mark the interleave group
1501 // as complete.
1502 if (GroupB && LoadGroups.contains(GroupB)) {
1503 LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B
1504 << " as complete.\n");
1505 CompletedLoadGroups.insert(GroupB);
1506 }
Anna Thomase85fd3c2023-07-26 15:05:13 -04001507 }
Anna Thomas3cf24db2023-07-26 15:08:06 -04001508 }
1509 if (CompletedLoadGroups.contains(GroupB)) {
1510 // Skip trying to add A to B, continue to look for other conflicting A's
1511 // in groups to be released.
1512 continue;
Florian Hahn1086ce22018-09-12 08:01:57 +00001513 }
1514
1515 // At this point, we've checked for illegal code motion. If either A or B
1516 // isn't strided, there's nothing left to do.
1517 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1518 continue;
1519
1520 // Ignore A if it's already in a group or isn't the same kind of memory
1521 // operation as B.
1522 // Note that mayReadFromMemory() isn't mutually exclusive to
1523 // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1524 // here, canVectorizeMemory() should have returned false - except for the
1525 // case we asked for optimization remarks.
1526 if (isInterleaved(A) ||
1527 (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1528 (A->mayWriteToMemory() != B->mayWriteToMemory()))
1529 continue;
1530
1531 // Check rules 1 and 2. Ignore A if its stride or size is different from
1532 // that of B.
1533 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1534 continue;
1535
1536 // Ignore A if the memory object of A and B don't belong to the same
1537 // address space
1538 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1539 continue;
1540
1541 // Calculate the distance from A to B.
1542 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1543 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1544 if (!DistToB)
1545 continue;
1546 int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1547
1548 // Check rule 3. Ignore A if its distance to B is not a multiple of the
1549 // size.
1550 if (DistanceToB % static_cast<int64_t>(DesB.Size))
1551 continue;
1552
Dorit Nuzman38bbf812018-10-14 08:50:06 +00001553 // All members of a predicated interleave-group must have the same predicate,
1554 // and currently must reside in the same BB.
Jim Lin466f8842020-02-18 10:48:38 +08001555 BasicBlock *BlockA = A->getParent();
1556 BasicBlock *BlockB = B->getParent();
Dorit Nuzman38bbf812018-10-14 08:50:06 +00001557 if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1558 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
Florian Hahn1086ce22018-09-12 08:01:57 +00001559 continue;
1560
1561 // The index of A is the index of B plus A's distance to B in multiples
1562 // of the size.
1563 int IndexA =
Anna Thomas9675e3f2023-07-14 16:24:04 -04001564 GroupB->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
Florian Hahn1086ce22018-09-12 08:01:57 +00001565
1566 // Try to insert A into B's group.
Anna Thomas9675e3f2023-07-14 16:24:04 -04001567 if (GroupB->insertMember(A, IndexA, DesA.Alignment)) {
Florian Hahn1086ce22018-09-12 08:01:57 +00001568 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1569 << " into the interleave group with" << *B
1570 << '\n');
Anna Thomas9675e3f2023-07-14 16:24:04 -04001571 InterleaveGroupMap[A] = GroupB;
Florian Hahn1086ce22018-09-12 08:01:57 +00001572
1573 // Set the first load in program order as the insert position.
1574 if (A->mayReadFromMemory())
Anna Thomas9675e3f2023-07-14 16:24:04 -04001575 GroupB->setInsertPos(A);
Florian Hahn1086ce22018-09-12 08:01:57 +00001576 }
1577 } // Iteration over A accesses.
1578 } // Iteration over B accesses.
1579
Dorit Nuzman67278b82021-06-17 18:39:09 +03001580 auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group,
1581 int Index,
Amr Hesham4ba18002024-10-16 10:55:01 +02001582 const char *FirstOrLast) -> bool {
Dorit Nuzman67278b82021-06-17 18:39:09 +03001583 Instruction *Member = Group->getMember(Index);
1584 assert(Member && "Group member does not exist");
1585 Value *MemberPtr = getLoadStorePointerOperand(Member);
Nikita Popov45c46732021-09-11 19:00:37 +02001586 Type *AccessTy = getLoadStoreType(Member);
1587 if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides,
Philip Reamesf6d110e2022-09-27 15:55:44 -07001588 /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0))
Dorit Nuzman67278b82021-06-17 18:39:09 +03001589 return false;
1590 LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1591 << FirstOrLast
1592 << " group member potentially pointer-wrapping.\n");
1593 releaseGroup(Group);
1594 return true;
1595 };
1596
1597 // Remove interleaved groups with gaps whose memory
Florian Hahn1086ce22018-09-12 08:01:57 +00001598 // accesses may wrap around. We have to revisit the getPtrStride analysis,
1599 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1600 // not check wrapping (see documentation there).
1601 // FORNOW we use Assume=false;
1602 // TODO: Change to Assume=true but making sure we don't exceed the threshold
1603 // of runtime SCEV assumptions checks (thereby potentially failing to
1604 // vectorize altogether).
1605 // Additional optional optimizations:
1606 // TODO: If we are peeling the loop and we know that the first pointer doesn't
1607 // wrap then we can deduce that all pointers in the group don't wrap.
1608 // This means that we can forcefully peel the loop in order to only have to
1609 // check the first pointer for no-wrap. When we'll change to use Assume=true
1610 // we'll only need at most one runtime check per interleaved group.
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001611 for (auto *Group : LoadGroups) {
Florian Hahn1086ce22018-09-12 08:01:57 +00001612 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1613 // load would wrap around the address space we would do a memory access at
1614 // nullptr even without the transformation.
1615 if (Group->getNumMembers() == Group->getFactor())
1616 continue;
1617
1618 // Case 2: If first and last members of the group don't wrap this implies
1619 // that all the pointers in the group don't wrap.
1620 // So we check only group member 0 (which is always guaranteed to exist),
1621 // and group member Factor - 1; If the latter doesn't exist we rely on
Piotr Fusikcc7b24a2024-09-24 11:19:56 +02001622 // peeling (if it is a non-reversed access -- see Case 3).
Amr Hesham4ba18002024-10-16 10:55:01 +02001623 if (InvalidateGroupIfMemberMayWrap(Group, 0, "first"))
Florian Hahn1086ce22018-09-12 08:01:57 +00001624 continue;
Dorit Nuzman67278b82021-06-17 18:39:09 +03001625 if (Group->getMember(Group->getFactor() - 1))
Amr Hesham4ba18002024-10-16 10:55:01 +02001626 InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1, "last");
Dorit Nuzman67278b82021-06-17 18:39:09 +03001627 else {
Florian Hahn1086ce22018-09-12 08:01:57 +00001628 // Case 3: A non-reversed interleaved load group with gaps: We need
1629 // to execute at least one scalar epilogue iteration. This will ensure
1630 // we don't speculatively access memory out-of-bounds. We only need
1631 // to look for a member at index factor - 1, since every group must have
1632 // a member at index zero.
1633 if (Group->isReverse()) {
1634 LLVM_DEBUG(
1635 dbgs() << "LV: Invalidate candidate interleaved group due to "
1636 "a reverse access with gaps.\n");
1637 releaseGroup(Group);
1638 continue;
1639 }
1640 LLVM_DEBUG(
1641 dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1642 RequiresScalarEpilogue = true;
1643 }
1644 }
Dorit Nuzman67278b82021-06-17 18:39:09 +03001645
1646 for (auto *Group : StoreGroups) {
1647 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1648 // store would wrap around the address space we would do a memory access at
1649 // nullptr even without the transformation.
1650 if (Group->getNumMembers() == Group->getFactor())
1651 continue;
1652
1653 // Interleave-store-group with gaps is implemented using masked wide store.
1654 // Remove interleaved store groups with gaps if
1655 // masked-interleaved-accesses are not enabled by the target.
1656 if (!EnablePredicatedInterleavedMemAccesses) {
1657 LLVM_DEBUG(
1658 dbgs() << "LV: Invalidate candidate interleaved store group due "
1659 "to gaps.\n");
1660 releaseGroup(Group);
1661 continue;
1662 }
1663
1664 // Case 2: If first and last members of the group don't wrap this implies
1665 // that all the pointers in the group don't wrap.
1666 // So we check only group member 0 (which is always guaranteed to exist),
1667 // and the last group member. Case 3 (scalar epilog) is not relevant for
1668 // stores with gaps, which are implemented with masked-store (rather than
1669 // speculative access, as in loads).
Amr Hesham4ba18002024-10-16 10:55:01 +02001670 if (InvalidateGroupIfMemberMayWrap(Group, 0, "first"))
Dorit Nuzman67278b82021-06-17 18:39:09 +03001671 continue;
1672 for (int Index = Group->getFactor() - 1; Index > 0; Index--)
1673 if (Group->getMember(Index)) {
Amr Hesham4ba18002024-10-16 10:55:01 +02001674 InvalidateGroupIfMemberMayWrap(Group, Index, "last");
Dorit Nuzman67278b82021-06-17 18:39:09 +03001675 break;
1676 }
1677 }
Florian Hahn1086ce22018-09-12 08:01:57 +00001678}
Dorit Nuzman3ec99fe2018-10-22 06:17:09 +00001679
1680void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1681 // If no group had triggered the requirement to create an epilogue loop,
1682 // there is nothing to do.
1683 if (!requiresScalarEpilogue())
1684 return;
1685
Florian Hahn27373622020-04-20 16:03:21 +01001686 // Release groups requiring scalar epilogues. Note that this also removes them
1687 // from InterleaveGroups.
Nikita Popovd42b3922024-06-26 13:43:19 +02001688 bool ReleasedGroup = InterleaveGroups.remove_if([&](auto *Group) {
Florian Hahn27373622020-04-20 16:03:21 +01001689 if (!Group->requiresScalarEpilogue())
Nikita Popovd42b3922024-06-26 13:43:19 +02001690 return false;
Dorit Nuzman3ec99fe2018-10-22 06:17:09 +00001691 LLVM_DEBUG(
Dorit Nuzman34da6dd2018-10-31 09:57:56 +00001692 dbgs()
Dorit Nuzman3ec99fe2018-10-22 06:17:09 +00001693 << "LV: Invalidate candidate interleaved group due to gaps that "
Dorit Nuzman34da6dd2018-10-31 09:57:56 +00001694 "require a scalar epilogue (not allowed under optsize) and cannot "
1695 "be masked (not enabled). \n");
Nikita Popovd42b3922024-06-26 13:43:19 +02001696 releaseGroupWithoutRemovingFromSet(Group);
1697 return true;
1698 });
Florian Hahn27373622020-04-20 16:03:21 +01001699 assert(ReleasedGroup && "At least one group must be invalidated, as a "
1700 "scalar epilogue was required");
1701 (void)ReleasedGroup;
Dorit Nuzman3ec99fe2018-10-22 06:17:09 +00001702 RequiresScalarEpilogue = false;
1703}
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001704
Florian Hahn86ed3472018-11-13 16:26:34 +00001705template <typename InstT>
1706void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1707 llvm_unreachable("addMetadata can only be used for Instruction");
1708}
1709
1710namespace llvm {
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001711template <>
1712void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
Kazu Hirata799916a2025-04-16 19:29:47 -07001713 SmallVector<Value *, 4> VL(make_second_range(Members));
Florian Hahna4dc7fe2018-11-13 15:58:18 +00001714 propagateMetadata(NewInst, VL);
1715}
Alexandros Lamprineas92289db2024-01-17 09:55:30 +00001716} // namespace llvm