David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 1 | //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===// |
| 2 | // |
Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file defines vectorizer utilities. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 13 | #include "llvm/Analysis/VectorUtils.h" |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 14 | #include "llvm/ADT/EquivalenceClasses.h" |
Paschalis Mpeis | ddb6db4 | 2023-12-19 12:05:28 +0000 | [diff] [blame] | 15 | #include "llvm/ADT/SmallVector.h" |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 16 | #include "llvm/Analysis/DemandedBits.h" |
Hal Finkel | 9cf58c4 | 2015-07-11 10:52:42 +0000 | [diff] [blame] | 17 | #include "llvm/Analysis/LoopInfo.h" |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 18 | #include "llvm/Analysis/LoopIterator.h" |
Hal Finkel | 9cf58c4 | 2015-07-11 10:52:42 +0000 | [diff] [blame] | 19 | #include "llvm/Analysis/ScalarEvolution.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 20 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 21 | #include "llvm/Analysis/TargetTransformInfo.h" |
David Majnemer | b4b27230b | 2016-04-19 19:10:21 +0000 | [diff] [blame] | 22 | #include "llvm/Analysis/ValueTracking.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 23 | #include "llvm/IR/Constants.h" |
Paschalis Mpeis | ddb6db4 | 2023-12-19 12:05:28 +0000 | [diff] [blame] | 24 | #include "llvm/IR/DerivedTypes.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 25 | #include "llvm/IR/IRBuilder.h" |
Pierre van Houtryve | cf328ff | 2024-04-24 08:52:25 +0200 | [diff] [blame] | 26 | #include "llvm/IR/MemoryModelRelaxationAnnotations.h" |
Hal Finkel | 9cf58c4 | 2015-07-11 10:52:42 +0000 | [diff] [blame] | 27 | #include "llvm/IR/PatternMatch.h" |
| 28 | #include "llvm/IR/Value.h" |
Reid Kleckner | 4c1a1d3 | 2019-11-14 15:15:48 -0800 | [diff] [blame] | 29 | #include "llvm/Support/CommandLine.h" |
Renato Golin | 3b1d3b0 | 2015-08-30 10:49:04 +0000 | [diff] [blame] | 30 | |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 31 | #define DEBUG_TYPE "vectorutils" |
| 32 | |
David Majnemer | 5eaf08f | 2015-08-18 22:07:20 +0000 | [diff] [blame] | 33 | using namespace llvm; |
| 34 | using namespace llvm::PatternMatch; |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 35 | |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 36 | /// Maximum factor for an interleaved memory access. |
| 37 | static cl::opt<unsigned> MaxInterleaveGroupFactor( |
| 38 | "max-interleave-group-factor", cl::Hidden, |
| 39 | cl::desc("Maximum factor for an interleaved access group (default = 8)"), |
| 40 | cl::init(8)); |
| 41 | |
Sanjay Patel | 0f4f4806 | 2018-11-12 15:10:30 +0000 | [diff] [blame] | 42 | /// Return true if all of the intrinsic's arguments and return type are scalars |
Bjorn Pettersson | 512b118 | 2019-06-24 12:07:11 +0000 | [diff] [blame] | 43 | /// for the scalar form of the intrinsic, and vectors for the vector form of the |
| 44 | /// intrinsic (except operands that are marked as always being scalar by |
David Green | 6f81903 | 2022-05-03 09:32:34 +0100 | [diff] [blame] | 45 | /// isVectorIntrinsicWithScalarOpAtArg). |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 46 | bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { |
| 47 | switch (ID) { |
Craig Topper | 3efc978 | 2020-07-29 10:05:25 -0700 | [diff] [blame] | 48 | case Intrinsic::abs: // Begin integer bit-manipulation. |
| 49 | case Intrinsic::bswap: |
Sanjay Patel | 0f4f4806 | 2018-11-12 15:10:30 +0000 | [diff] [blame] | 50 | case Intrinsic::bitreverse: |
| 51 | case Intrinsic::ctpop: |
| 52 | case Intrinsic::ctlz: |
| 53 | case Intrinsic::cttz: |
Sanjay Patel | 1456fd7 | 2018-11-12 15:20:14 +0000 | [diff] [blame] | 54 | case Intrinsic::fshl: |
| 55 | case Intrinsic::fshr: |
Craig Topper | 3efc978 | 2020-07-29 10:05:25 -0700 | [diff] [blame] | 56 | case Intrinsic::smax: |
| 57 | case Intrinsic::smin: |
| 58 | case Intrinsic::umax: |
| 59 | case Intrinsic::umin: |
Simon Pilgrim | 0e08b6f | 2019-01-23 13:49:10 +0000 | [diff] [blame] | 60 | case Intrinsic::sadd_sat: |
| 61 | case Intrinsic::ssub_sat: |
| 62 | case Intrinsic::uadd_sat: |
| 63 | case Intrinsic::usub_sat: |
Simon Pilgrim | a066f1f | 2019-02-25 15:42:02 +0000 | [diff] [blame] | 64 | case Intrinsic::smul_fix: |
Bjorn Pettersson | 512b118 | 2019-06-24 12:07:11 +0000 | [diff] [blame] | 65 | case Intrinsic::smul_fix_sat: |
Simon Pilgrim | a066f1f | 2019-02-25 15:42:02 +0000 | [diff] [blame] | 66 | case Intrinsic::umul_fix: |
Bjorn Pettersson | 5e331e4 | 2019-09-07 12:16:14 +0000 | [diff] [blame] | 67 | case Intrinsic::umul_fix_sat: |
Sanjay Patel | 0f4f4806 | 2018-11-12 15:10:30 +0000 | [diff] [blame] | 68 | case Intrinsic::sqrt: // Begin floating-point. |
Simon Pilgrim | d58d105 | 2024-08-30 16:49:23 +0100 | [diff] [blame] | 69 | case Intrinsic::asin: |
| 70 | case Intrinsic::acos: |
| 71 | case Intrinsic::atan: |
Tex Riddell | 818d715 | 2024-11-08 16:07:38 -0800 | [diff] [blame] | 72 | case Intrinsic::atan2: |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 73 | case Intrinsic::sin: |
| 74 | case Intrinsic::cos: |
Benjamin Maxwell | 3307b03 | 2025-02-27 09:37:06 +0000 | [diff] [blame] | 75 | case Intrinsic::sincos: |
Benjamin Maxwell | 89e7f4d | 2025-02-28 12:56:12 +0000 | [diff] [blame] | 76 | case Intrinsic::sincospi: |
Farzon Lotfi | 1d87433 | 2024-06-05 15:01:33 -0400 | [diff] [blame] | 77 | case Intrinsic::tan: |
Simon Pilgrim | d58d105 | 2024-08-30 16:49:23 +0100 | [diff] [blame] | 78 | case Intrinsic::sinh: |
| 79 | case Intrinsic::cosh: |
| 80 | case Intrinsic::tanh: |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 81 | case Intrinsic::exp: |
Rohit Aggarwal | dfb60bb | 2024-10-29 15:39:55 +0530 | [diff] [blame] | 82 | case Intrinsic::exp10: |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 83 | case Intrinsic::exp2: |
| 84 | case Intrinsic::log: |
| 85 | case Intrinsic::log10: |
| 86 | case Intrinsic::log2: |
| 87 | case Intrinsic::fabs: |
| 88 | case Intrinsic::minnum: |
| 89 | case Intrinsic::maxnum: |
Thomas Lively | 8a91cf1 | 2018-10-19 21:11:43 +0000 | [diff] [blame] | 90 | case Intrinsic::minimum: |
| 91 | case Intrinsic::maximum: |
YunQiang Su | fe9e209 | 2025-04-15 08:08:45 +0800 | [diff] [blame] | 92 | case Intrinsic::minimumnum: |
| 93 | case Intrinsic::maximumnum: |
Benjamin Maxwell | 89e7f4d | 2025-02-28 12:56:12 +0000 | [diff] [blame] | 94 | case Intrinsic::modf: |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 95 | case Intrinsic::copysign: |
| 96 | case Intrinsic::floor: |
| 97 | case Intrinsic::ceil: |
| 98 | case Intrinsic::trunc: |
| 99 | case Intrinsic::rint: |
| 100 | case Intrinsic::nearbyint: |
| 101 | case Intrinsic::round: |
Serge Pavlov | 4d20e31 | 2020-05-26 19:24:05 +0700 | [diff] [blame] | 102 | case Intrinsic::roundeven: |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 103 | case Intrinsic::pow: |
| 104 | case Intrinsic::fma: |
| 105 | case Intrinsic::fmuladd: |
Jay Foad | 593e25f | 2023-04-21 14:11:31 +0100 | [diff] [blame] | 106 | case Intrinsic::is_fpclass: |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 107 | case Intrinsic::powi: |
Matt Arsenault | 80ea6dd | 2018-09-17 13:24:30 +0000 | [diff] [blame] | 108 | case Intrinsic::canonicalize: |
David Green | 6f81903 | 2022-05-03 09:32:34 +0100 | [diff] [blame] | 109 | case Intrinsic::fptosi_sat: |
| 110 | case Intrinsic::fptoui_sat: |
Ramkumar Ramachandra | 2302e4c | 2023-11-06 18:49:49 +0000 | [diff] [blame] | 111 | case Intrinsic::lrint: |
| 112 | case Intrinsic::llrint: |
Yingwei Zheng | a156b5a | 2024-09-02 17:06:07 +0800 | [diff] [blame] | 113 | case Intrinsic::ucmp: |
| 114 | case Intrinsic::scmp: |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 115 | return true; |
| 116 | default: |
| 117 | return false; |
| 118 | } |
| 119 | } |
| 120 | |
Finn Plummer | 45c01e8 | 2024-12-19 11:54:26 -0800 | [diff] [blame] | 121 | bool llvm::isTriviallyScalarizable(Intrinsic::ID ID, |
| 122 | const TargetTransformInfo *TTI) { |
| 123 | if (isTriviallyVectorizable(ID)) |
| 124 | return true; |
| 125 | |
| 126 | if (TTI && Intrinsic::isTargetIntrinsic(ID)) |
| 127 | return TTI->isTargetIntrinsicTriviallyScalarizable(ID); |
| 128 | |
| 129 | // TODO: Move frexp to isTriviallyVectorizable. |
| 130 | // https://github.com/llvm/llvm-project/issues/112408 |
| 131 | switch (ID) { |
| 132 | case Intrinsic::frexp: |
Deric Cheung | 37ed2e6 | 2025-02-13 13:31:39 -0800 | [diff] [blame] | 133 | case Intrinsic::uadd_with_overflow: |
| 134 | case Intrinsic::sadd_with_overflow: |
| 135 | case Intrinsic::ssub_with_overflow: |
| 136 | case Intrinsic::usub_with_overflow: |
| 137 | case Intrinsic::umul_with_overflow: |
| 138 | case Intrinsic::smul_with_overflow: |
Finn Plummer | 45c01e8 | 2024-12-19 11:54:26 -0800 | [diff] [blame] | 139 | return true; |
| 140 | } |
| 141 | return false; |
| 142 | } |
| 143 | |
Bjorn Pettersson | 512b118 | 2019-06-24 12:07:11 +0000 | [diff] [blame] | 144 | /// Identifies if the vector form of the intrinsic has a scalar operand. |
David Green | 6f81903 | 2022-05-03 09:32:34 +0100 | [diff] [blame] | 145 | bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, |
Finn Plummer | 45c01e8 | 2024-12-19 11:54:26 -0800 | [diff] [blame] | 146 | unsigned ScalarOpdIdx, |
| 147 | const TargetTransformInfo *TTI) { |
| 148 | |
| 149 | if (TTI && Intrinsic::isTargetIntrinsic(ID)) |
| 150 | return TTI->isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx); |
| 151 | |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 152 | switch (ID) { |
Craig Topper | 3efc978 | 2020-07-29 10:05:25 -0700 | [diff] [blame] | 153 | case Intrinsic::abs: |
LiqinWeng | 4a3f46d | 2024-11-28 10:05:08 +0800 | [diff] [blame] | 154 | case Intrinsic::vp_abs: |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 155 | case Intrinsic::ctlz: |
LiqinWeng | 4a3f46d | 2024-11-28 10:05:08 +0800 | [diff] [blame] | 156 | case Intrinsic::vp_ctlz: |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 157 | case Intrinsic::cttz: |
LiqinWeng | 4a3f46d | 2024-11-28 10:05:08 +0800 | [diff] [blame] | 158 | case Intrinsic::vp_cttz: |
Jay Foad | 593e25f | 2023-04-21 14:11:31 +0100 | [diff] [blame] | 159 | case Intrinsic::is_fpclass: |
LiqinWeng | 4a3f46d | 2024-11-28 10:05:08 +0800 | [diff] [blame] | 160 | case Intrinsic::vp_is_fpclass: |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 161 | case Intrinsic::powi: |
| 162 | return (ScalarOpdIdx == 1); |
Simon Pilgrim | a066f1f | 2019-02-25 15:42:02 +0000 | [diff] [blame] | 163 | case Intrinsic::smul_fix: |
Bjorn Pettersson | 512b118 | 2019-06-24 12:07:11 +0000 | [diff] [blame] | 164 | case Intrinsic::smul_fix_sat: |
Simon Pilgrim | a066f1f | 2019-02-25 15:42:02 +0000 | [diff] [blame] | 165 | case Intrinsic::umul_fix: |
Bjorn Pettersson | 5e331e4 | 2019-09-07 12:16:14 +0000 | [diff] [blame] | 166 | case Intrinsic::umul_fix_sat: |
Simon Pilgrim | a066f1f | 2019-02-25 15:42:02 +0000 | [diff] [blame] | 167 | return (ScalarOpdIdx == 2); |
Mel Chen | 9b4ad2f | 2025-03-03 21:27:13 +0800 | [diff] [blame] | 168 | case Intrinsic::experimental_vp_splice: |
| 169 | return ScalarOpdIdx == 2 || ScalarOpdIdx == 4 || ScalarOpdIdx == 5; |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 170 | default: |
| 171 | return false; |
| 172 | } |
| 173 | } |
| 174 | |
Finn Plummer | 8663b87 | 2024-11-21 11:04:25 -0800 | [diff] [blame] | 175 | bool llvm::isVectorIntrinsicWithOverloadTypeAtArg( |
| 176 | Intrinsic::ID ID, int OpdIdx, const TargetTransformInfo *TTI) { |
Alexandros Lamprineas | e512df3 | 2024-01-02 19:14:16 +0100 | [diff] [blame] | 177 | assert(ID != Intrinsic::not_intrinsic && "Not an intrinsic!"); |
| 178 | |
Finn Plummer | 8663b87 | 2024-11-21 11:04:25 -0800 | [diff] [blame] | 179 | if (TTI && Intrinsic::isTargetIntrinsic(ID)) |
Finn Plummer | 45c01e8 | 2024-12-19 11:54:26 -0800 | [diff] [blame] | 180 | return TTI->isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx); |
Finn Plummer | 8663b87 | 2024-11-21 11:04:25 -0800 | [diff] [blame] | 181 | |
LiqinWeng | b759020 | 2024-12-11 10:01:41 +0800 | [diff] [blame] | 182 | if (VPCastIntrinsic::isVPCast(ID)) |
| 183 | return OpdIdx == -1 || OpdIdx == 0; |
| 184 | |
Bjorn Pettersson | 4c7f820 | 2021-03-26 21:02:26 +0100 | [diff] [blame] | 185 | switch (ID) { |
David Green | 6f81903 | 2022-05-03 09:32:34 +0100 | [diff] [blame] | 186 | case Intrinsic::fptosi_sat: |
| 187 | case Intrinsic::fptoui_sat: |
Ramkumar Ramachandra | 2302e4c | 2023-11-06 18:49:49 +0000 | [diff] [blame] | 188 | case Intrinsic::lrint: |
| 189 | case Intrinsic::llrint: |
LiqinWeng | 4a3f46d | 2024-11-28 10:05:08 +0800 | [diff] [blame] | 190 | case Intrinsic::vp_lrint: |
| 191 | case Intrinsic::vp_llrint: |
Yingwei Zheng | a156b5a | 2024-09-02 17:06:07 +0800 | [diff] [blame] | 192 | case Intrinsic::ucmp: |
| 193 | case Intrinsic::scmp: |
Jay Foad | 593e25f | 2023-04-21 14:11:31 +0100 | [diff] [blame] | 194 | return OpdIdx == -1 || OpdIdx == 0; |
Benjamin Maxwell | 89e7f4d | 2025-02-28 12:56:12 +0000 | [diff] [blame] | 195 | case Intrinsic::modf: |
Benjamin Maxwell | 3307b03 | 2025-02-27 09:37:06 +0000 | [diff] [blame] | 196 | case Intrinsic::sincos: |
Benjamin Maxwell | 89e7f4d | 2025-02-28 12:56:12 +0000 | [diff] [blame] | 197 | case Intrinsic::sincospi: |
Jay Foad | 593e25f | 2023-04-21 14:11:31 +0100 | [diff] [blame] | 198 | case Intrinsic::is_fpclass: |
LiqinWeng | 4a3f46d | 2024-11-28 10:05:08 +0800 | [diff] [blame] | 199 | case Intrinsic::vp_is_fpclass: |
David Green | 6f81903 | 2022-05-03 09:32:34 +0100 | [diff] [blame] | 200 | return OpdIdx == 0; |
Bjorn Pettersson | 4c7f820 | 2021-03-26 21:02:26 +0100 | [diff] [blame] | 201 | case Intrinsic::powi: |
Jay Foad | 593e25f | 2023-04-21 14:11:31 +0100 | [diff] [blame] | 202 | return OpdIdx == -1 || OpdIdx == 1; |
Bjorn Pettersson | 4c7f820 | 2021-03-26 21:02:26 +0100 | [diff] [blame] | 203 | default: |
Jay Foad | 593e25f | 2023-04-21 14:11:31 +0100 | [diff] [blame] | 204 | return OpdIdx == -1; |
Bjorn Pettersson | 4c7f820 | 2021-03-26 21:02:26 +0100 | [diff] [blame] | 205 | } |
| 206 | } |
| 207 | |
Finn Plummer | 45c01e8 | 2024-12-19 11:54:26 -0800 | [diff] [blame] | 208 | bool llvm::isVectorIntrinsicWithStructReturnOverloadAtField( |
| 209 | Intrinsic::ID ID, int RetIdx, const TargetTransformInfo *TTI) { |
| 210 | |
| 211 | if (TTI && Intrinsic::isTargetIntrinsic(ID)) |
| 212 | return TTI->isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx); |
| 213 | |
Farzon Lotfi | dcbf2c2 | 2024-10-21 12:51:01 -0400 | [diff] [blame] | 214 | switch (ID) { |
| 215 | case Intrinsic::frexp: |
| 216 | return RetIdx == 0 || RetIdx == 1; |
| 217 | default: |
| 218 | return RetIdx == 0; |
| 219 | } |
| 220 | } |
| 221 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 222 | /// Returns intrinsic ID for call. |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 223 | /// For the input call instruction it finds mapping intrinsic and returns |
| 224 | /// its ID, in case it does not found it return not_intrinsic. |
David Majnemer | b4b27230b | 2016-04-19 19:10:21 +0000 | [diff] [blame] | 225 | Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI, |
| 226 | const TargetLibraryInfo *TLI) { |
Craig Topper | be04aba | 2020-04-22 10:48:09 -0700 | [diff] [blame] | 227 | Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI); |
David Majnemer | b4b27230b | 2016-04-19 19:10:21 +0000 | [diff] [blame] | 228 | if (ID == Intrinsic::not_intrinsic) |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 229 | return Intrinsic::not_intrinsic; |
| 230 | |
David Majnemer | b4b27230b | 2016-04-19 19:10:21 +0000 | [diff] [blame] | 231 | if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start || |
Dan Gohman | 2c74fe9 | 2017-11-08 21:59:51 +0000 | [diff] [blame] | 232 | ID == Intrinsic::lifetime_end || ID == Intrinsic::assume || |
Jeroen Dobbelaere | 121cac0 | 2021-01-19 20:04:52 +0100 | [diff] [blame] | 233 | ID == Intrinsic::experimental_noalias_scope_decl || |
Hongtao Yu | f3c4456 | 2020-11-18 12:42:51 -0800 | [diff] [blame] | 234 | ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe) |
David Majnemer | b4b27230b | 2016-04-19 19:10:21 +0000 | [diff] [blame] | 235 | return ID; |
David Blaikie | 1213dbf | 2015-06-26 16:57:30 +0000 | [diff] [blame] | 236 | return Intrinsic::not_intrinsic; |
| 237 | } |
Hal Finkel | 9cf58c4 | 2015-07-11 10:52:42 +0000 | [diff] [blame] | 238 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 239 | /// Given a vector and an element number, see if the scalar value is |
David Majnemer | 599ca44 | 2015-07-13 01:15:53 +0000 | [diff] [blame] | 240 | /// already around as a register, for example if it were inserted then extracted |
| 241 | /// from the vector. |
David Majnemer | 5eaf08f | 2015-08-18 22:07:20 +0000 | [diff] [blame] | 242 | Value *llvm::findScalarElement(Value *V, unsigned EltNo) { |
David Majnemer | 599ca44 | 2015-07-13 01:15:53 +0000 | [diff] [blame] | 243 | assert(V->getType()->isVectorTy() && "Not looking at a vector?"); |
| 244 | VectorType *VTy = cast<VectorType>(V->getType()); |
Nikita Popov | 605e184 | 2024-06-24 16:18:38 +0200 | [diff] [blame] | 245 | // For fixed-length vector, return poison for out of range access. |
Christopher Tetreault | 9174e02 | 2020-04-23 12:19:54 -0700 | [diff] [blame] | 246 | if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) { |
| 247 | unsigned Width = FVTy->getNumElements(); |
Huihui Zhang | 8f52573 | 2020-03-11 15:09:01 -0700 | [diff] [blame] | 248 | if (EltNo >= Width) |
Nikita Popov | 605e184 | 2024-06-24 16:18:38 +0200 | [diff] [blame] | 249 | return PoisonValue::get(FVTy->getElementType()); |
Huihui Zhang | 8f52573 | 2020-03-11 15:09:01 -0700 | [diff] [blame] | 250 | } |
David Majnemer | 599ca44 | 2015-07-13 01:15:53 +0000 | [diff] [blame] | 251 | |
| 252 | if (Constant *C = dyn_cast<Constant>(V)) |
| 253 | return C->getAggregateElement(EltNo); |
| 254 | |
| 255 | if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) { |
| 256 | // If this is an insert to a variable element, we don't know what it is. |
| 257 | if (!isa<ConstantInt>(III->getOperand(2))) |
| 258 | return nullptr; |
| 259 | unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue(); |
| 260 | |
| 261 | // If this is an insert to the element we are looking for, return the |
| 262 | // inserted value. |
| 263 | if (EltNo == IIElt) |
| 264 | return III->getOperand(1); |
| 265 | |
Sanjay Patel | 9d6d24c | 2020-12-02 13:35:05 -0500 | [diff] [blame] | 266 | // Guard against infinite loop on malformed, unreachable IR. |
| 267 | if (III == III->getOperand(0)) |
| 268 | return nullptr; |
| 269 | |
David Majnemer | 599ca44 | 2015-07-13 01:15:53 +0000 | [diff] [blame] | 270 | // Otherwise, the insertelement doesn't modify the value, recurse on its |
| 271 | // vector input. |
| 272 | return findScalarElement(III->getOperand(0), EltNo); |
| 273 | } |
| 274 | |
Huihui Zhang | 1ec0cc0 | 2020-05-07 13:03:26 -0700 | [diff] [blame] | 275 | ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V); |
| 276 | // Restrict the following transformation to fixed-length vector. |
| 277 | if (SVI && isa<FixedVectorType>(SVI->getType())) { |
Christopher Tetreault | b96558f | 2020-04-09 12:19:23 -0700 | [diff] [blame] | 278 | unsigned LHSWidth = |
Huihui Zhang | 1ec0cc0 | 2020-05-07 13:03:26 -0700 | [diff] [blame] | 279 | cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements(); |
David Majnemer | 599ca44 | 2015-07-13 01:15:53 +0000 | [diff] [blame] | 280 | int InEl = SVI->getMaskValue(EltNo); |
| 281 | if (InEl < 0) |
Nikita Popov | 605e184 | 2024-06-24 16:18:38 +0200 | [diff] [blame] | 282 | return PoisonValue::get(VTy->getElementType()); |
David Majnemer | 599ca44 | 2015-07-13 01:15:53 +0000 | [diff] [blame] | 283 | if (InEl < (int)LHSWidth) |
| 284 | return findScalarElement(SVI->getOperand(0), InEl); |
| 285 | return findScalarElement(SVI->getOperand(1), InEl - LHSWidth); |
| 286 | } |
| 287 | |
| 288 | // Extract a value from a vector add operation with a constant zero. |
Sanjay Patel | 3413a66 | 2018-09-24 17:18:32 +0000 | [diff] [blame] | 289 | // TODO: Use getBinOpIdentity() to generalize this. |
| 290 | Value *Val; Constant *C; |
| 291 | if (match(V, m_Add(m_Value(Val), m_Constant(C)))) |
| 292 | if (Constant *Elt = C->getAggregateElement(EltNo)) |
David Majnemer | c6bb0e2 | 2015-08-18 22:07:25 +0000 | [diff] [blame] | 293 | if (Elt->isNullValue()) |
| 294 | return findScalarElement(Val, EltNo); |
David Majnemer | 599ca44 | 2015-07-13 01:15:53 +0000 | [diff] [blame] | 295 | |
Sander de Smalen | 96f6785b | 2021-08-31 13:48:52 +0100 | [diff] [blame] | 296 | // If the vector is a splat then we can trivially find the scalar element. |
| 297 | if (isa<ScalableVectorType>(VTy)) |
| 298 | if (Value *Splat = getSplatValue(V)) |
| 299 | if (EltNo < VTy->getElementCount().getKnownMinValue()) |
| 300 | return Splat; |
| 301 | |
David Majnemer | 599ca44 | 2015-07-13 01:15:53 +0000 | [diff] [blame] | 302 | // Otherwise, we don't know. |
| 303 | return nullptr; |
| 304 | } |
Renato Golin | 3b1d3b0 | 2015-08-30 10:49:04 +0000 | [diff] [blame] | 305 | |
Sanjay Patel | 686a038 | 2020-02-05 14:18:13 -0500 | [diff] [blame] | 306 | int llvm::getSplatIndex(ArrayRef<int> Mask) { |
| 307 | int SplatIndex = -1; |
| 308 | for (int M : Mask) { |
| 309 | // Ignore invalid (undefined) mask elements. |
| 310 | if (M < 0) |
| 311 | continue; |
| 312 | |
| 313 | // There can be only 1 non-negative mask element value if this is a splat. |
| 314 | if (SplatIndex != -1 && SplatIndex != M) |
| 315 | return -1; |
| 316 | |
| 317 | // Initialize the splat index to the 1st non-negative mask element. |
| 318 | SplatIndex = M; |
| 319 | } |
| 320 | assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?"); |
| 321 | return SplatIndex; |
| 322 | } |
| 323 | |
Adrian Prantl | 5f8f34e4 | 2018-05-01 15:54:18 +0000 | [diff] [blame] | 324 | /// Get splat value if the input is a splat vector or return nullptr. |
Elena Demikhovsky | 63a7ca9 | 2015-08-30 13:48:02 +0000 | [diff] [blame] | 325 | /// This function is not fully general. It checks only 2 cases: |
Sanjay Patel | e490e4a | 2019-06-07 16:09:54 +0000 | [diff] [blame] | 326 | /// the input value is (1) a splat constant vector or (2) a sequence |
| 327 | /// of instructions that broadcasts a scalar at element 0. |
Craig Topper | b16e868 | 2020-09-02 20:44:12 -0700 | [diff] [blame] | 328 | Value *llvm::getSplatValue(const Value *V) { |
Sanjay Patel | e490e4a | 2019-06-07 16:09:54 +0000 | [diff] [blame] | 329 | if (isa<VectorType>(V->getType())) |
| 330 | if (auto *C = dyn_cast<Constant>(V)) |
Elena Demikhovsky | 47fa271 | 2015-12-01 12:30:40 +0000 | [diff] [blame] | 331 | return C->getSplatValue(); |
Elena Demikhovsky | 63a7ca9 | 2015-08-30 13:48:02 +0000 | [diff] [blame] | 332 | |
Sanjay Patel | e490e4a | 2019-06-07 16:09:54 +0000 | [diff] [blame] | 333 | // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...> |
| 334 | Value *Splat; |
Sanjay Patel | 7eed772 | 2020-05-23 10:13:50 -0400 | [diff] [blame] | 335 | if (match(V, |
| 336 | m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()), |
| 337 | m_Value(), m_ZeroMask()))) |
Sanjay Patel | e490e4a | 2019-06-07 16:09:54 +0000 | [diff] [blame] | 338 | return Splat; |
Renato Golin | 3b1d3b0 | 2015-08-30 10:49:04 +0000 | [diff] [blame] | 339 | |
Sanjay Patel | e490e4a | 2019-06-07 16:09:54 +0000 | [diff] [blame] | 340 | return nullptr; |
Renato Golin | 3b1d3b0 | 2015-08-30 10:49:04 +0000 | [diff] [blame] | 341 | } |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 342 | |
Sanjay Patel | 9b9e2da | 2020-02-02 09:16:42 -0500 | [diff] [blame] | 343 | bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) { |
Sanjay Patel | 6f3511a | 2020-08-19 16:32:24 -0400 | [diff] [blame] | 344 | assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); |
Sanjay Patel | 40e3bdf | 2019-06-11 22:25:18 +0000 | [diff] [blame] | 345 | |
| 346 | if (isa<VectorType>(V->getType())) { |
| 347 | if (isa<UndefValue>(V)) |
| 348 | return true; |
Sanjay Patel | 9b9e2da | 2020-02-02 09:16:42 -0500 | [diff] [blame] | 349 | // FIXME: We can allow undefs, but if Index was specified, we may want to |
| 350 | // check that the constant is defined at that index. |
Sanjay Patel | 40e3bdf | 2019-06-11 22:25:18 +0000 | [diff] [blame] | 351 | if (auto *C = dyn_cast<Constant>(V)) |
| 352 | return C->getSplatValue() != nullptr; |
| 353 | } |
| 354 | |
Sanjay Patel | 9b9e2da | 2020-02-02 09:16:42 -0500 | [diff] [blame] | 355 | if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) { |
| 356 | // FIXME: We can safely allow undefs here. If Index was specified, we will |
| 357 | // check that the mask elt is defined at the required index. |
Jakub Kuderski | 6fa87ec | 2022-08-23 11:36:12 -0400 | [diff] [blame] | 358 | if (!all_equal(Shuf->getShuffleMask())) |
Sanjay Patel | 9b9e2da | 2020-02-02 09:16:42 -0500 | [diff] [blame] | 359 | return false; |
| 360 | |
| 361 | // Match any index. |
| 362 | if (Index == -1) |
| 363 | return true; |
| 364 | |
| 365 | // Match a specific element. The mask should be defined at and match the |
| 366 | // specified index. |
| 367 | return Shuf->getMaskValue(Index) == Index; |
| 368 | } |
Sanjay Patel | 40e3bdf | 2019-06-11 22:25:18 +0000 | [diff] [blame] | 369 | |
| 370 | // The remaining tests are all recursive, so bail out if we hit the limit. |
Sanjay Patel | 6f3511a | 2020-08-19 16:32:24 -0400 | [diff] [blame] | 371 | if (Depth++ == MaxAnalysisRecursionDepth) |
Sanjay Patel | 40e3bdf | 2019-06-11 22:25:18 +0000 | [diff] [blame] | 372 | return false; |
| 373 | |
| 374 | // If both operands of a binop are splats, the result is a splat. |
| 375 | Value *X, *Y, *Z; |
| 376 | if (match(V, m_BinOp(m_Value(X), m_Value(Y)))) |
Sanjay Patel | 9b9e2da | 2020-02-02 09:16:42 -0500 | [diff] [blame] | 377 | return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth); |
Sanjay Patel | 40e3bdf | 2019-06-11 22:25:18 +0000 | [diff] [blame] | 378 | |
| 379 | // If all operands of a select are splats, the result is a splat. |
| 380 | if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z)))) |
Sanjay Patel | 9b9e2da | 2020-02-02 09:16:42 -0500 | [diff] [blame] | 381 | return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) && |
| 382 | isSplatValue(Z, Index, Depth); |
Sanjay Patel | 40e3bdf | 2019-06-11 22:25:18 +0000 | [diff] [blame] | 383 | |
| 384 | // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops). |
| 385 | |
| 386 | return false; |
| 387 | } |
| 388 | |
Simon Pilgrim | 55a11b5 | 2022-10-30 17:03:49 +0000 | [diff] [blame] | 389 | bool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask, |
| 390 | const APInt &DemandedElts, APInt &DemandedLHS, |
| 391 | APInt &DemandedRHS, bool AllowUndefElts) { |
| 392 | DemandedLHS = DemandedRHS = APInt::getZero(SrcWidth); |
| 393 | |
| 394 | // Early out if we don't demand any elements. |
| 395 | if (DemandedElts.isZero()) |
| 396 | return true; |
| 397 | |
| 398 | // Simple case of a shuffle with zeroinitializer. |
| 399 | if (all_of(Mask, [](int Elt) { return Elt == 0; })) { |
| 400 | DemandedLHS.setBit(0); |
| 401 | return true; |
| 402 | } |
| 403 | |
| 404 | for (unsigned I = 0, E = Mask.size(); I != E; ++I) { |
| 405 | int M = Mask[I]; |
| 406 | assert((-1 <= M) && (M < (SrcWidth * 2)) && |
| 407 | "Invalid shuffle mask constant"); |
| 408 | |
| 409 | if (!DemandedElts[I] || (AllowUndefElts && (M < 0))) |
| 410 | continue; |
| 411 | |
| 412 | // For undef elements, we don't know anything about the common state of |
| 413 | // the shuffle result. |
| 414 | if (M < 0) |
| 415 | return false; |
| 416 | |
| 417 | if (M < SrcWidth) |
| 418 | DemandedLHS.setBit(M); |
| 419 | else |
| 420 | DemandedRHS.setBit(M - SrcWidth); |
| 421 | } |
| 422 | |
| 423 | return true; |
| 424 | } |
| 425 | |
Philip Reames | 248be98 | 2025-02-28 07:21:40 -0800 | [diff] [blame] | 426 | bool llvm::isMaskedSlidePair(ArrayRef<int> Mask, int NumElts, |
| 427 | std::array<std::pair<int, int>, 2> &SrcInfo) { |
| 428 | const int SignalValue = NumElts * 2; |
| 429 | SrcInfo[0] = {-1, SignalValue}; |
| 430 | SrcInfo[1] = {-1, SignalValue}; |
| 431 | for (auto [i, M] : enumerate(Mask)) { |
| 432 | if (M < 0) |
| 433 | continue; |
| 434 | int Src = M >= (int)NumElts; |
| 435 | int Diff = (int)i - (M % NumElts); |
| 436 | bool Match = false; |
| 437 | for (int j = 0; j < 2; j++) { |
| 438 | auto &[SrcE, DiffE] = SrcInfo[j]; |
| 439 | if (SrcE == -1) { |
| 440 | assert(DiffE == SignalValue); |
| 441 | SrcE = Src; |
| 442 | DiffE = Diff; |
| 443 | } |
| 444 | if (SrcE == Src && DiffE == Diff) { |
| 445 | Match = true; |
| 446 | break; |
| 447 | } |
| 448 | } |
| 449 | if (!Match) |
| 450 | return false; |
| 451 | } |
| 452 | // Avoid all undef masks |
| 453 | return SrcInfo[0].first != -1; |
| 454 | } |
| 455 | |
Sanjay Patel | 1318ddb | 2020-04-11 10:05:49 -0400 | [diff] [blame] | 456 | void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask, |
| 457 | SmallVectorImpl<int> &ScaledMask) { |
Craig Topper | f92563f | 2020-03-31 23:15:10 -0700 | [diff] [blame] | 458 | assert(Scale > 0 && "Unexpected scaling factor"); |
| 459 | |
| 460 | // Fast-path: if no scaling, then it is just a copy. |
| 461 | if (Scale == 1) { |
| 462 | ScaledMask.assign(Mask.begin(), Mask.end()); |
| 463 | return; |
| 464 | } |
| 465 | |
| 466 | ScaledMask.clear(); |
Sanjay Patel | 1318ddb | 2020-04-11 10:05:49 -0400 | [diff] [blame] | 467 | for (int MaskElt : Mask) { |
| 468 | if (MaskElt >= 0) { |
Simon Pilgrim | 48b510c | 2020-09-11 15:32:03 +0100 | [diff] [blame] | 469 | assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX && |
Sanjay Patel | 1318ddb | 2020-04-11 10:05:49 -0400 | [diff] [blame] | 470 | "Overflowed 32-bits"); |
| 471 | } |
| 472 | for (int SliceElt = 0; SliceElt != Scale; ++SliceElt) |
| 473 | ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt); |
| 474 | } |
Craig Topper | f92563f | 2020-03-31 23:15:10 -0700 | [diff] [blame] | 475 | } |
| 476 | |
Sanjay Patel | c23cbef | 2020-04-12 09:17:59 -0400 | [diff] [blame] | 477 | bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask, |
| 478 | SmallVectorImpl<int> &ScaledMask) { |
| 479 | assert(Scale > 0 && "Unexpected scaling factor"); |
| 480 | |
| 481 | // Fast-path: if no scaling, then it is just a copy. |
| 482 | if (Scale == 1) { |
| 483 | ScaledMask.assign(Mask.begin(), Mask.end()); |
| 484 | return true; |
| 485 | } |
| 486 | |
| 487 | // We must map the original elements down evenly to a type with less elements. |
| 488 | int NumElts = Mask.size(); |
| 489 | if (NumElts % Scale != 0) |
| 490 | return false; |
| 491 | |
| 492 | ScaledMask.clear(); |
| 493 | ScaledMask.reserve(NumElts / Scale); |
| 494 | |
| 495 | // Step through the input mask by splitting into Scale-sized slices. |
| 496 | do { |
| 497 | ArrayRef<int> MaskSlice = Mask.take_front(Scale); |
| 498 | assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice."); |
| 499 | |
| 500 | // The first element of the slice determines how we evaluate this slice. |
| 501 | int SliceFront = MaskSlice.front(); |
| 502 | if (SliceFront < 0) { |
| 503 | // Negative values (undef or other "sentinel" values) must be equal across |
| 504 | // the entire slice. |
Jakub Kuderski | 6fa87ec | 2022-08-23 11:36:12 -0400 | [diff] [blame] | 505 | if (!all_equal(MaskSlice)) |
Sanjay Patel | c23cbef | 2020-04-12 09:17:59 -0400 | [diff] [blame] | 506 | return false; |
| 507 | ScaledMask.push_back(SliceFront); |
| 508 | } else { |
| 509 | // A positive mask element must be cleanly divisible. |
| 510 | if (SliceFront % Scale != 0) |
| 511 | return false; |
| 512 | // Elements of the slice must be consecutive. |
| 513 | for (int i = 1; i < Scale; ++i) |
| 514 | if (MaskSlice[i] != SliceFront + i) |
| 515 | return false; |
| 516 | ScaledMask.push_back(SliceFront / Scale); |
| 517 | } |
| 518 | Mask = Mask.drop_front(Scale); |
| 519 | } while (!Mask.empty()); |
| 520 | |
| 521 | assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask"); |
| 522 | |
| 523 | // All elements of the original mask can be scaled down to map to the elements |
| 524 | // of a mask with wider elements. |
| 525 | return true; |
| 526 | } |
| 527 | |
Philip Reames | 24bb180 | 2025-01-10 07:12:24 -0800 | [diff] [blame] | 528 | bool llvm::widenShuffleMaskElts(ArrayRef<int> M, |
| 529 | SmallVectorImpl<int> &NewMask) { |
| 530 | unsigned NumElts = M.size(); |
| 531 | if (NumElts % 2 != 0) |
| 532 | return false; |
| 533 | |
| 534 | NewMask.clear(); |
| 535 | for (unsigned i = 0; i < NumElts; i += 2) { |
| 536 | int M0 = M[i]; |
| 537 | int M1 = M[i + 1]; |
| 538 | |
| 539 | // If both elements are undef, new mask is undef too. |
| 540 | if (M0 == -1 && M1 == -1) { |
| 541 | NewMask.push_back(-1); |
| 542 | continue; |
| 543 | } |
| 544 | |
| 545 | if (M0 == -1 && M1 != -1 && (M1 % 2) == 1) { |
| 546 | NewMask.push_back(M1 / 2); |
| 547 | continue; |
| 548 | } |
| 549 | |
| 550 | if (M0 != -1 && (M0 % 2) == 0 && ((M0 + 1) == M1 || M1 == -1)) { |
| 551 | NewMask.push_back(M0 / 2); |
| 552 | continue; |
| 553 | } |
| 554 | |
| 555 | NewMask.clear(); |
| 556 | return false; |
| 557 | } |
| 558 | |
| 559 | assert(NewMask.size() == NumElts / 2 && "Incorrect size for mask!"); |
| 560 | return true; |
| 561 | } |
| 562 | |
Simon Pilgrim | 5b4000d | 2024-06-26 10:43:58 +0100 | [diff] [blame] | 563 | bool llvm::scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask, |
| 564 | SmallVectorImpl<int> &ScaledMask) { |
| 565 | unsigned NumSrcElts = Mask.size(); |
| 566 | assert(NumSrcElts > 0 && NumDstElts > 0 && "Unexpected scaling factor"); |
| 567 | |
| 568 | // Fast-path: if no scaling, then it is just a copy. |
| 569 | if (NumSrcElts == NumDstElts) { |
| 570 | ScaledMask.assign(Mask.begin(), Mask.end()); |
| 571 | return true; |
| 572 | } |
| 573 | |
| 574 | // Ensure we can find a whole scale factor. |
| 575 | assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) && |
| 576 | "Unexpected scaling factor"); |
| 577 | |
| 578 | if (NumSrcElts > NumDstElts) { |
| 579 | int Scale = NumSrcElts / NumDstElts; |
| 580 | return widenShuffleMaskElts(Scale, Mask, ScaledMask); |
| 581 | } |
| 582 | |
| 583 | int Scale = NumDstElts / NumSrcElts; |
| 584 | narrowShuffleMaskElts(Scale, Mask, ScaledMask); |
| 585 | return true; |
| 586 | } |
| 587 | |
Roman Lebedev | f487dfd | 2022-12-26 00:41:12 +0300 | [diff] [blame] | 588 | void llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask, |
| 589 | SmallVectorImpl<int> &ScaledMask) { |
| 590 | std::array<SmallVector<int, 16>, 2> TmpMasks; |
| 591 | SmallVectorImpl<int> *Output = &TmpMasks[0], *Tmp = &TmpMasks[1]; |
| 592 | ArrayRef<int> InputMask = Mask; |
| 593 | for (unsigned Scale = 2; Scale <= InputMask.size(); ++Scale) { |
| 594 | while (widenShuffleMaskElts(Scale, InputMask, *Output)) { |
| 595 | InputMask = *Output; |
| 596 | std::swap(Output, Tmp); |
| 597 | } |
| 598 | } |
| 599 | ScaledMask.assign(InputMask.begin(), InputMask.end()); |
| 600 | } |
| 601 | |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 602 | void llvm::processShuffleMasks( |
| 603 | ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs, |
| 604 | unsigned NumOfUsedRegs, function_ref<void()> NoInputAction, |
Alexey Bataev | 75e1cf4 | 2021-04-14 07:49:32 -0700 | [diff] [blame] | 605 | function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction, |
Alexey Bataev | bab7920 | 2025-01-13 17:06:25 -0500 | [diff] [blame] | 606 | function_ref<void(ArrayRef<int>, unsigned, unsigned, bool)> |
| 607 | ManyInputsAction) { |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 608 | SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs); |
| 609 | // Try to perform better estimation of the permutation. |
| 610 | // 1. Split the source/destination vectors into real registers. |
| 611 | // 2. Do the mask analysis to identify which real registers are |
| 612 | // permuted. |
| 613 | int Sz = Mask.size(); |
| 614 | unsigned SzDest = Sz / NumOfDestRegs; |
| 615 | unsigned SzSrc = Sz / NumOfSrcRegs; |
| 616 | for (unsigned I = 0; I < NumOfDestRegs; ++I) { |
| 617 | auto &RegMasks = Res[I]; |
Alexey Bataev | b9aa155 | 2024-12-06 12:27:00 -0500 | [diff] [blame] | 618 | RegMasks.assign(2 * NumOfSrcRegs, {}); |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 619 | // Check that the values in dest registers are in the one src |
| 620 | // register. |
| 621 | for (unsigned K = 0; K < SzDest; ++K) { |
| 622 | int Idx = I * SzDest + K; |
| 623 | if (Idx == Sz) |
| 624 | break; |
Alexey Bataev | b9aa155 | 2024-12-06 12:27:00 -0500 | [diff] [blame] | 625 | if (Mask[Idx] >= 2 * Sz || Mask[Idx] == PoisonMaskElem) |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 626 | continue; |
Alexey Bataev | b9aa155 | 2024-12-06 12:27:00 -0500 | [diff] [blame] | 627 | int MaskIdx = Mask[Idx] % Sz; |
| 628 | int SrcRegIdx = MaskIdx / SzSrc + (Mask[Idx] >= Sz ? NumOfSrcRegs : 0); |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 629 | // Add a cost of PermuteTwoSrc for each new source register permute, |
| 630 | // if we have more than one source registers. |
| 631 | if (RegMasks[SrcRegIdx].empty()) |
ManuelJBrito | d22edb9 | 2023-04-27 16:22:57 +0100 | [diff] [blame] | 632 | RegMasks[SrcRegIdx].assign(SzDest, PoisonMaskElem); |
Alexey Bataev | b9aa155 | 2024-12-06 12:27:00 -0500 | [diff] [blame] | 633 | RegMasks[SrcRegIdx][K] = MaskIdx % SzSrc; |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 634 | } |
| 635 | } |
| 636 | // Process split mask. |
Alexey Bataev | b9aa155 | 2024-12-06 12:27:00 -0500 | [diff] [blame] | 637 | for (unsigned I : seq<unsigned>(NumOfUsedRegs)) { |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 638 | auto &Dest = Res[I]; |
| 639 | int NumSrcRegs = |
| 640 | count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); }); |
| 641 | switch (NumSrcRegs) { |
| 642 | case 0: |
| 643 | // No input vectors were used! |
| 644 | NoInputAction(); |
| 645 | break; |
| 646 | case 1: { |
| 647 | // Find the only mask with at least single undef mask elem. |
| 648 | auto *It = |
| 649 | find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); }); |
| 650 | unsigned SrcReg = std::distance(Dest.begin(), It); |
Alexey Bataev | 75e1cf4 | 2021-04-14 07:49:32 -0700 | [diff] [blame] | 651 | SingleInputAction(*It, SrcReg, I); |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 652 | break; |
| 653 | } |
| 654 | default: { |
| 655 | // The first mask is a permutation of a single register. Since we have >2 |
| 656 | // input registers to shuffle, we merge the masks for 2 first registers |
| 657 | // and generate a shuffle of 2 registers rather than the reordering of the |
| 658 | // first register and then shuffle with the second register. Next, |
| 659 | // generate the shuffles of the resulting register + the remaining |
| 660 | // registers from the list. |
| 661 | auto &&CombineMasks = [](MutableArrayRef<int> FirstMask, |
| 662 | ArrayRef<int> SecondMask) { |
| 663 | for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) { |
ManuelJBrito | d22edb9 | 2023-04-27 16:22:57 +0100 | [diff] [blame] | 664 | if (SecondMask[Idx] != PoisonMaskElem) { |
| 665 | assert(FirstMask[Idx] == PoisonMaskElem && |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 666 | "Expected undefined mask element."); |
| 667 | FirstMask[Idx] = SecondMask[Idx] + VF; |
| 668 | } |
| 669 | } |
| 670 | }; |
| 671 | auto &&NormalizeMask = [](MutableArrayRef<int> Mask) { |
| 672 | for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) { |
ManuelJBrito | d22edb9 | 2023-04-27 16:22:57 +0100 | [diff] [blame] | 673 | if (Mask[Idx] != PoisonMaskElem) |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 674 | Mask[Idx] = Idx; |
| 675 | } |
| 676 | }; |
| 677 | int SecondIdx; |
Alexey Bataev | bab7920 | 2025-01-13 17:06:25 -0500 | [diff] [blame] | 678 | bool NewReg = true; |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 679 | do { |
| 680 | int FirstIdx = -1; |
| 681 | SecondIdx = -1; |
| 682 | MutableArrayRef<int> FirstMask, SecondMask; |
Alexey Bataev | b9aa155 | 2024-12-06 12:27:00 -0500 | [diff] [blame] | 683 | for (unsigned I : seq<unsigned>(2 * NumOfSrcRegs)) { |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 684 | SmallVectorImpl<int> &RegMask = Dest[I]; |
| 685 | if (RegMask.empty()) |
| 686 | continue; |
| 687 | |
| 688 | if (FirstIdx == SecondIdx) { |
| 689 | FirstIdx = I; |
| 690 | FirstMask = RegMask; |
| 691 | continue; |
| 692 | } |
| 693 | SecondIdx = I; |
| 694 | SecondMask = RegMask; |
| 695 | CombineMasks(FirstMask, SecondMask); |
Alexey Bataev | bab7920 | 2025-01-13 17:06:25 -0500 | [diff] [blame] | 696 | ManyInputsAction(FirstMask, FirstIdx, SecondIdx, NewReg); |
| 697 | NewReg = false; |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 698 | NormalizeMask(FirstMask); |
| 699 | RegMask.clear(); |
| 700 | SecondMask = FirstMask; |
| 701 | SecondIdx = FirstIdx; |
| 702 | } |
| 703 | if (FirstIdx != SecondIdx && SecondIdx >= 0) { |
| 704 | CombineMasks(SecondMask, FirstMask); |
Alexey Bataev | bab7920 | 2025-01-13 17:06:25 -0500 | [diff] [blame] | 705 | ManyInputsAction(SecondMask, SecondIdx, FirstIdx, NewReg); |
| 706 | NewReg = false; |
Alexey Bataev | 2cca53c | 2021-12-13 10:38:28 -0800 | [diff] [blame] | 707 | Dest[FirstIdx].clear(); |
| 708 | NormalizeMask(SecondMask); |
| 709 | } |
| 710 | } while (SecondIdx >= 0); |
| 711 | break; |
| 712 | } |
| 713 | } |
| 714 | } |
| 715 | } |
| 716 | |
mskamp | b22fa90 | 2024-07-16 16:50:21 +0200 | [diff] [blame] | 717 | void llvm::getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth, |
| 718 | const APInt &DemandedElts, |
| 719 | APInt &DemandedLHS, |
| 720 | APInt &DemandedRHS) { |
| 721 | assert(VectorBitWidth >= 128 && "Vectors smaller than 128 bit not supported"); |
| 722 | int NumLanes = VectorBitWidth / 128; |
| 723 | int NumElts = DemandedElts.getBitWidth(); |
| 724 | int NumEltsPerLane = NumElts / NumLanes; |
| 725 | int HalfEltsPerLane = NumEltsPerLane / 2; |
| 726 | |
| 727 | DemandedLHS = APInt::getZero(NumElts); |
| 728 | DemandedRHS = APInt::getZero(NumElts); |
| 729 | |
| 730 | // Map DemandedElts to the horizontal operands. |
| 731 | for (int Idx = 0; Idx != NumElts; ++Idx) { |
| 732 | if (!DemandedElts[Idx]) |
| 733 | continue; |
| 734 | int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane; |
| 735 | int LocalIdx = Idx % NumEltsPerLane; |
| 736 | if (LocalIdx < HalfEltsPerLane) { |
| 737 | DemandedLHS.setBit(LaneIdx + 2 * LocalIdx); |
| 738 | } else { |
| 739 | LocalIdx -= HalfEltsPerLane; |
| 740 | DemandedRHS.setBit(LaneIdx + 2 * LocalIdx); |
| 741 | } |
| 742 | } |
| 743 | } |
| 744 | |
Charlie Turner | 54336a5 | 2015-11-26 20:39:51 +0000 | [diff] [blame] | 745 | MapVector<Instruction *, uint64_t> |
James Molloy | 45f67d5 | 2015-11-09 14:32:05 +0000 | [diff] [blame] | 746 | llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB, |
| 747 | const TargetTransformInfo *TTI) { |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 748 | |
| 749 | // DemandedBits will give us every value's live-out bits. But we want |
| 750 | // to ensure no extra casts would need to be inserted, so every DAG |
| 751 | // of connected values must have the same minimum bitwidth. |
James Molloy | 45f67d5 | 2015-11-09 14:32:05 +0000 | [diff] [blame] | 752 | EquivalenceClasses<Value *> ECs; |
| 753 | SmallVector<Value *, 16> Worklist; |
| 754 | SmallPtrSet<Value *, 4> Roots; |
| 755 | SmallPtrSet<Value *, 16> Visited; |
| 756 | DenseMap<Value *, uint64_t> DBits; |
| 757 | SmallPtrSet<Instruction *, 4> InstructionSet; |
Charlie Turner | 54336a5 | 2015-11-26 20:39:51 +0000 | [diff] [blame] | 758 | MapVector<Instruction *, uint64_t> MinBWs; |
James Molloy | 45f67d5 | 2015-11-09 14:32:05 +0000 | [diff] [blame] | 759 | |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 760 | // Determine the roots. We work bottom-up, from truncs or icmps. |
| 761 | bool SeenExtFromIllegalType = false; |
| 762 | for (auto *BB : Blocks) |
| 763 | for (auto &I : *BB) { |
| 764 | InstructionSet.insert(&I); |
| 765 | |
| 766 | if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) && |
| 767 | !TTI->isTypeLegal(I.getOperand(0)->getType())) |
| 768 | SeenExtFromIllegalType = true; |
James Molloy | 45f67d5 | 2015-11-09 14:32:05 +0000 | [diff] [blame] | 769 | |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 770 | // Only deal with non-vector integers up to 64-bits wide. |
| 771 | if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) && |
| 772 | !I.getType()->isVectorTy() && |
| 773 | I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) { |
| 774 | // Don't make work for ourselves. If we know the loaded type is legal, |
| 775 | // don't add it to the worklist. |
| 776 | if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType())) |
| 777 | continue; |
James Molloy | 45f67d5 | 2015-11-09 14:32:05 +0000 | [diff] [blame] | 778 | |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 779 | Worklist.push_back(&I); |
| 780 | Roots.insert(&I); |
| 781 | } |
| 782 | } |
| 783 | // Early exit. |
| 784 | if (Worklist.empty() || (TTI && !SeenExtFromIllegalType)) |
| 785 | return MinBWs; |
James Molloy | 45f67d5 | 2015-11-09 14:32:05 +0000 | [diff] [blame] | 786 | |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 787 | // Now proceed breadth-first, unioning values together. |
| 788 | while (!Worklist.empty()) { |
| 789 | Value *Val = Worklist.pop_back_val(); |
| 790 | Value *Leader = ECs.getOrInsertLeaderValue(Val); |
James Molloy | 45f67d5 | 2015-11-09 14:32:05 +0000 | [diff] [blame] | 791 | |
Kazu Hirata | b254d67 | 2022-06-18 08:32:54 -0700 | [diff] [blame] | 792 | if (!Visited.insert(Val).second) |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 793 | continue; |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 794 | |
| 795 | // Non-instructions terminate a chain successfully. |
| 796 | if (!isa<Instruction>(Val)) |
| 797 | continue; |
| 798 | Instruction *I = cast<Instruction>(Val); |
| 799 | |
| 800 | // If we encounter a type that is larger than 64 bits, we can't represent |
| 801 | // it so bail out. |
James Molloy | aa1d638 | 2016-05-10 12:27:23 +0000 | [diff] [blame] | 802 | if (DB.getDemandedBits(I).getBitWidth() > 64) |
Charlie Turner | 54336a5 | 2015-11-26 20:39:51 +0000 | [diff] [blame] | 803 | return MapVector<Instruction *, uint64_t>(); |
James Molloy | 45f67d5 | 2015-11-09 14:32:05 +0000 | [diff] [blame] | 804 | |
James Molloy | aa1d638 | 2016-05-10 12:27:23 +0000 | [diff] [blame] | 805 | uint64_t V = DB.getDemandedBits(I).getZExtValue(); |
| 806 | DBits[Leader] |= V; |
| 807 | DBits[I] = V; |
James Molloy | 45f67d5 | 2015-11-09 14:32:05 +0000 | [diff] [blame] | 808 | |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 809 | // Casts, loads and instructions outside of our range terminate a chain |
| 810 | // successfully. |
| 811 | if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) || |
| 812 | !InstructionSet.count(I)) |
| 813 | continue; |
| 814 | |
| 815 | // Unsafe casts terminate a chain unsuccessfully. We can't do anything |
| 816 | // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to |
| 817 | // transform anything that relies on them. |
| 818 | if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) || |
| 819 | !I->getType()->isIntegerTy()) { |
| 820 | DBits[Leader] |= ~0ULL; |
| 821 | continue; |
| 822 | } |
| 823 | |
| 824 | // We don't modify the types of PHIs. Reductions will already have been |
| 825 | // truncated if possible, and inductions' sizes will have been chosen by |
| 826 | // indvars. |
| 827 | if (isa<PHINode>(I)) |
| 828 | continue; |
| 829 | |
Ramkumar Ramachandra | 4984242 | 2025-04-29 09:47:38 +0100 | [diff] [blame^] | 830 | // Don't modify the types of operands of a call, as doing that would cause a |
| 831 | // signature mismatch. |
| 832 | if (isa<CallBase>(I)) |
| 833 | continue; |
| 834 | |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 835 | if (DBits[Leader] == ~0ULL) |
| 836 | // All bits demanded, no point continuing. |
| 837 | continue; |
| 838 | |
| 839 | for (Value *O : cast<User>(I)->operands()) { |
| 840 | ECs.unionSets(Leader, O); |
| 841 | Worklist.push_back(O); |
| 842 | } |
| 843 | } |
| 844 | |
| 845 | // Now we've discovered all values, walk them to see if there are |
| 846 | // any users we didn't see. If there are, we can't optimize that |
| 847 | // chain. |
| 848 | for (auto &I : DBits) |
| 849 | for (auto *U : I.first->users()) |
| 850 | if (U->getType()->isIntegerTy() && DBits.count(U) == 0) |
| 851 | DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL; |
James Molloy | 45f67d5 | 2015-11-09 14:32:05 +0000 | [diff] [blame] | 852 | |
Florian Hahn | 9e5bfbf | 2025-04-01 09:28:33 +0100 | [diff] [blame] | 853 | for (const auto &E : ECs) { |
Florian Hahn | 3bdf9a0 | 2025-04-02 20:27:43 +0100 | [diff] [blame] | 854 | if (!E->isLeader()) |
Florian Hahn | 9e5bfbf | 2025-04-01 09:28:33 +0100 | [diff] [blame] | 855 | continue; |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 856 | uint64_t LeaderDemandedBits = 0; |
Ramkumar Ramachandra | fd6260f | 2025-04-04 14:34:08 +0100 | [diff] [blame] | 857 | for (Value *M : ECs.members(*E)) |
Kazu Hirata | 896d0e1 | 2021-02-22 20:17:18 -0800 | [diff] [blame] | 858 | LeaderDemandedBits |= DBits[M]; |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 859 | |
Kazu Hirata | 02a52b7 | 2023-01-28 15:04:20 -0800 | [diff] [blame] | 860 | uint64_t MinBW = llvm::bit_width(LeaderDemandedBits); |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 861 | // Round up to a power of 2 |
Kazu Hirata | 526966d | 2023-01-28 16:13:09 -0800 | [diff] [blame] | 862 | MinBW = llvm::bit_ceil(MinBW); |
James Molloy | 8e46cd0 | 2016-03-30 10:11:43 +0000 | [diff] [blame] | 863 | |
| 864 | // We don't modify the types of PHIs. Reductions will already have been |
| 865 | // truncated if possible, and inductions' sizes will have been chosen by |
| 866 | // indvars. |
| 867 | // If we are required to shrink a PHI, abandon this entire equivalence class. |
| 868 | bool Abort = false; |
Ramkumar Ramachandra | fd6260f | 2025-04-04 14:34:08 +0100 | [diff] [blame] | 869 | for (Value *M : ECs.members(*E)) |
Kazu Hirata | 896d0e1 | 2021-02-22 20:17:18 -0800 | [diff] [blame] | 870 | if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) { |
James Molloy | 8e46cd0 | 2016-03-30 10:11:43 +0000 | [diff] [blame] | 871 | Abort = true; |
| 872 | break; |
| 873 | } |
| 874 | if (Abort) |
| 875 | continue; |
| 876 | |
Ramkumar Ramachandra | fd6260f | 2025-04-04 14:34:08 +0100 | [diff] [blame] | 877 | for (Value *M : ECs.members(*E)) { |
Florian Hahn | d7e79bd | 2023-07-11 20:18:55 +0100 | [diff] [blame] | 878 | auto *MI = dyn_cast<Instruction>(M); |
| 879 | if (!MI) |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 880 | continue; |
Kazu Hirata | 896d0e1 | 2021-02-22 20:17:18 -0800 | [diff] [blame] | 881 | Type *Ty = M->getType(); |
| 882 | if (Roots.count(M)) |
Florian Hahn | d7e79bd | 2023-07-11 20:18:55 +0100 | [diff] [blame] | 883 | Ty = MI->getOperand(0)->getType(); |
| 884 | |
| 885 | if (MinBW >= Ty->getScalarSizeInBits()) |
| 886 | continue; |
| 887 | |
| 888 | // If any of M's operands demand more bits than MinBW then M cannot be |
| 889 | // performed safely in MinBW. |
Ramkumar Ramachandra | 4984242 | 2025-04-29 09:47:38 +0100 | [diff] [blame^] | 890 | auto *Call = dyn_cast<CallBase>(MI); |
| 891 | auto Ops = Call ? Call->args() : MI->operands(); |
| 892 | if (any_of(Ops, [&DB, MinBW](Use &U) { |
Florian Hahn | d7e79bd | 2023-07-11 20:18:55 +0100 | [diff] [blame] | 893 | auto *CI = dyn_cast<ConstantInt>(U); |
| 894 | // For constants shift amounts, check if the shift would result in |
| 895 | // poison. |
| 896 | if (CI && |
| 897 | isa<ShlOperator, LShrOperator, AShrOperator>(U.getUser()) && |
| 898 | U.getOperandNo() == 1) |
| 899 | return CI->uge(MinBW); |
| 900 | uint64_t BW = bit_width(DB.getDemandedBits(&U).getZExtValue()); |
| 901 | return bit_ceil(BW) > MinBW; |
| 902 | })) |
| 903 | continue; |
| 904 | |
| 905 | MinBWs[MI] = MinBW; |
James Molloy | 55d633b | 2015-10-12 12:34:45 +0000 | [diff] [blame] | 906 | } |
| 907 | } |
| 908 | |
| 909 | return MinBWs; |
| 910 | } |
Matt Arsenault | 727e279 | 2016-06-30 21:17:59 +0000 | [diff] [blame] | 911 | |
Michael Kruse | 978ba61 | 2018-12-20 04:58:07 +0000 | [diff] [blame] | 912 | /// Add all access groups in @p AccGroups to @p List. |
| 913 | template <typename ListT> |
| 914 | static void addToAccessGroupList(ListT &List, MDNode *AccGroups) { |
| 915 | // Interpret an access group as a list containing itself. |
| 916 | if (AccGroups->getNumOperands() == 0) { |
| 917 | assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group"); |
| 918 | List.insert(AccGroups); |
| 919 | return; |
| 920 | } |
| 921 | |
Kazu Hirata | 601b3a1 | 2022-07-16 23:26:34 -0700 | [diff] [blame] | 922 | for (const auto &AccGroupListOp : AccGroups->operands()) { |
Michael Kruse | 978ba61 | 2018-12-20 04:58:07 +0000 | [diff] [blame] | 923 | auto *Item = cast<MDNode>(AccGroupListOp.get()); |
| 924 | assert(isValidAsAccessGroup(Item) && "List item must be an access group"); |
| 925 | List.insert(Item); |
| 926 | } |
Clement Courbet | d4bd3eb | 2018-12-20 09:20:07 +0000 | [diff] [blame] | 927 | } |
Michael Kruse | 978ba61 | 2018-12-20 04:58:07 +0000 | [diff] [blame] | 928 | |
| 929 | MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) { |
| 930 | if (!AccGroups1) |
| 931 | return AccGroups2; |
| 932 | if (!AccGroups2) |
| 933 | return AccGroups1; |
| 934 | if (AccGroups1 == AccGroups2) |
| 935 | return AccGroups1; |
| 936 | |
| 937 | SmallSetVector<Metadata *, 4> Union; |
| 938 | addToAccessGroupList(Union, AccGroups1); |
| 939 | addToAccessGroupList(Union, AccGroups2); |
| 940 | |
| 941 | if (Union.size() == 0) |
| 942 | return nullptr; |
| 943 | if (Union.size() == 1) |
| 944 | return cast<MDNode>(Union.front()); |
| 945 | |
| 946 | LLVMContext &Ctx = AccGroups1->getContext(); |
| 947 | return MDNode::get(Ctx, Union.getArrayRef()); |
| 948 | } |
| 949 | |
| 950 | MDNode *llvm::intersectAccessGroups(const Instruction *Inst1, |
| 951 | const Instruction *Inst2) { |
| 952 | bool MayAccessMem1 = Inst1->mayReadOrWriteMemory(); |
| 953 | bool MayAccessMem2 = Inst2->mayReadOrWriteMemory(); |
| 954 | |
| 955 | if (!MayAccessMem1 && !MayAccessMem2) |
| 956 | return nullptr; |
| 957 | if (!MayAccessMem1) |
| 958 | return Inst2->getMetadata(LLVMContext::MD_access_group); |
| 959 | if (!MayAccessMem2) |
| 960 | return Inst1->getMetadata(LLVMContext::MD_access_group); |
| 961 | |
| 962 | MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group); |
| 963 | MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group); |
| 964 | if (!MD1 || !MD2) |
| 965 | return nullptr; |
| 966 | if (MD1 == MD2) |
| 967 | return MD1; |
| 968 | |
| 969 | // Use set for scalable 'contains' check. |
| 970 | SmallPtrSet<Metadata *, 4> AccGroupSet2; |
| 971 | addToAccessGroupList(AccGroupSet2, MD2); |
| 972 | |
| 973 | SmallVector<Metadata *, 4> Intersection; |
| 974 | if (MD1->getNumOperands() == 0) { |
| 975 | assert(isValidAsAccessGroup(MD1) && "Node must be an access group"); |
| 976 | if (AccGroupSet2.count(MD1)) |
| 977 | Intersection.push_back(MD1); |
| 978 | } else { |
| 979 | for (const MDOperand &Node : MD1->operands()) { |
| 980 | auto *Item = cast<MDNode>(Node.get()); |
| 981 | assert(isValidAsAccessGroup(Item) && "List item must be an access group"); |
| 982 | if (AccGroupSet2.count(Item)) |
| 983 | Intersection.push_back(Item); |
| 984 | } |
| 985 | } |
| 986 | |
| 987 | if (Intersection.size() == 0) |
| 988 | return nullptr; |
| 989 | if (Intersection.size() == 1) |
| 990 | return cast<MDNode>(Intersection.front()); |
| 991 | |
| 992 | LLVMContext &Ctx = Inst1->getContext(); |
| 993 | return MDNode::get(Ctx, Intersection); |
| 994 | } |
| 995 | |
Florian Hahn | 0fd81e5 | 2025-04-09 21:35:46 +0100 | [diff] [blame] | 996 | /// Add metadata from \p Inst to \p Metadata, if it can be preserved after |
| 997 | /// vectorization. |
Florian Hahn | 7cbf78e | 2025-04-09 22:03:43 +0100 | [diff] [blame] | 998 | void llvm::getMetadataToPropagate( |
Florian Hahn | 0fd81e5 | 2025-04-09 21:35:46 +0100 | [diff] [blame] | 999 | Instruction *Inst, |
| 1000 | SmallVectorImpl<std::pair<unsigned, MDNode *>> &Metadata) { |
| 1001 | Inst->getAllMetadataOtherThanDebugLoc(Metadata); |
| 1002 | static const unsigned SupportedIDs[] = { |
| 1003 | LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, |
| 1004 | LLVMContext::MD_noalias, LLVMContext::MD_fpmath, |
| 1005 | LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load, |
| 1006 | LLVMContext::MD_access_group, LLVMContext::MD_mmra}; |
| 1007 | |
| 1008 | // Remove any unsupported metadata kinds from Metadata. |
| 1009 | for (unsigned Idx = 0; Idx != Metadata.size();) { |
| 1010 | if (is_contained(SupportedIDs, Metadata[Idx].first)) { |
| 1011 | ++Idx; |
| 1012 | } else { |
| 1013 | // Swap element to end and remove it. |
| 1014 | std::swap(Metadata[Idx], Metadata.back()); |
| 1015 | Metadata.pop_back(); |
| 1016 | } |
| 1017 | } |
| 1018 | } |
| 1019 | |
Matt Arsenault | 727e279 | 2016-06-30 21:17:59 +0000 | [diff] [blame] | 1020 | /// \returns \p I after propagating metadata from \p VL. |
| 1021 | Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) { |
Krzysztof Parzyszek | 50cf0a1 | 2021-05-07 12:52:20 -0500 | [diff] [blame] | 1022 | if (VL.empty()) |
| 1023 | return Inst; |
Florian Hahn | 0fd81e5 | 2025-04-09 21:35:46 +0100 | [diff] [blame] | 1024 | SmallVector<std::pair<unsigned, MDNode *>> Metadata; |
| 1025 | getMetadataToPropagate(cast<Instruction>(VL[0]), Metadata); |
Matt Arsenault | 727e279 | 2016-06-30 21:17:59 +0000 | [diff] [blame] | 1026 | |
Florian Hahn | 0fd81e5 | 2025-04-09 21:35:46 +0100 | [diff] [blame] | 1027 | for (auto &[Kind, MD] : Metadata) { |
Matt Arsenault | 727e279 | 2016-06-30 21:17:59 +0000 | [diff] [blame] | 1028 | for (int J = 1, E = VL.size(); MD && J != E; ++J) { |
| 1029 | const Instruction *IJ = cast<Instruction>(VL[J]); |
| 1030 | MDNode *IMD = IJ->getMetadata(Kind); |
Pierre van Houtryve | cf328ff | 2024-04-24 08:52:25 +0200 | [diff] [blame] | 1031 | |
Matt Arsenault | 727e279 | 2016-06-30 21:17:59 +0000 | [diff] [blame] | 1032 | switch (Kind) { |
Pierre van Houtryve | cf328ff | 2024-04-24 08:52:25 +0200 | [diff] [blame] | 1033 | case LLVMContext::MD_mmra: { |
| 1034 | MD = MMRAMetadata::combine(Inst->getContext(), MD, IMD); |
| 1035 | break; |
| 1036 | } |
Matt Arsenault | 727e279 | 2016-06-30 21:17:59 +0000 | [diff] [blame] | 1037 | case LLVMContext::MD_tbaa: |
| 1038 | MD = MDNode::getMostGenericTBAA(MD, IMD); |
| 1039 | break; |
| 1040 | case LLVMContext::MD_alias_scope: |
| 1041 | MD = MDNode::getMostGenericAliasScope(MD, IMD); |
| 1042 | break; |
Matt Arsenault | 727e279 | 2016-06-30 21:17:59 +0000 | [diff] [blame] | 1043 | case LLVMContext::MD_fpmath: |
| 1044 | MD = MDNode::getMostGenericFPMath(MD, IMD); |
| 1045 | break; |
Justin Lebar | 11a3204 | 2016-09-11 01:39:08 +0000 | [diff] [blame] | 1046 | case LLVMContext::MD_noalias: |
Matt Arsenault | 727e279 | 2016-06-30 21:17:59 +0000 | [diff] [blame] | 1047 | case LLVMContext::MD_nontemporal: |
Justin Lebar | 11a3204 | 2016-09-11 01:39:08 +0000 | [diff] [blame] | 1048 | case LLVMContext::MD_invariant_load: |
Matt Arsenault | 727e279 | 2016-06-30 21:17:59 +0000 | [diff] [blame] | 1049 | MD = MDNode::intersect(MD, IMD); |
| 1050 | break; |
Michael Kruse | 978ba61 | 2018-12-20 04:58:07 +0000 | [diff] [blame] | 1051 | case LLVMContext::MD_access_group: |
| 1052 | MD = intersectAccessGroups(Inst, IJ); |
| 1053 | break; |
Matt Arsenault | 727e279 | 2016-06-30 21:17:59 +0000 | [diff] [blame] | 1054 | default: |
| 1055 | llvm_unreachable("unhandled metadata"); |
| 1056 | } |
| 1057 | } |
| 1058 | |
| 1059 | Inst->setMetadata(Kind, MD); |
| 1060 | } |
| 1061 | |
| 1062 | return Inst; |
| 1063 | } |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1064 | |
Florian Hahn | a4dc7fe | 2018-11-13 15:58:18 +0000 | [diff] [blame] | 1065 | Constant * |
Nikita Popov | f37e899 | 2020-02-17 21:59:46 +0100 | [diff] [blame] | 1066 | llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF, |
Florian Hahn | a4dc7fe | 2018-11-13 15:58:18 +0000 | [diff] [blame] | 1067 | const InterleaveGroup<Instruction> &Group) { |
Dorit Nuzman | 34da6dd | 2018-10-31 09:57:56 +0000 | [diff] [blame] | 1068 | // All 1's means mask is not needed. |
| 1069 | if (Group.getNumMembers() == Group.getFactor()) |
| 1070 | return nullptr; |
| 1071 | |
| 1072 | // TODO: support reversed access. |
| 1073 | assert(!Group.isReverse() && "Reversed group not supported."); |
| 1074 | |
| 1075 | SmallVector<Constant *, 16> Mask; |
| 1076 | for (unsigned i = 0; i < VF; i++) |
| 1077 | for (unsigned j = 0; j < Group.getFactor(); ++j) { |
| 1078 | unsigned HasMember = Group.getMember(j) ? 1 : 0; |
| 1079 | Mask.push_back(Builder.getInt1(HasMember)); |
| 1080 | } |
| 1081 | |
| 1082 | return ConstantVector::get(Mask); |
| 1083 | } |
| 1084 | |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1085 | llvm::SmallVector<int, 16> |
| 1086 | llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) { |
| 1087 | SmallVector<int, 16> MaskVec; |
Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 1088 | for (unsigned i = 0; i < VF; i++) |
| 1089 | for (unsigned j = 0; j < ReplicationFactor; j++) |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1090 | MaskVec.push_back(i); |
Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 1091 | |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1092 | return MaskVec; |
Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 1093 | } |
| 1094 | |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1095 | llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF, |
| 1096 | unsigned NumVecs) { |
| 1097 | SmallVector<int, 16> Mask; |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1098 | for (unsigned i = 0; i < VF; i++) |
| 1099 | for (unsigned j = 0; j < NumVecs; j++) |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1100 | Mask.push_back(j * VF + i); |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1101 | |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1102 | return Mask; |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1103 | } |
| 1104 | |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1105 | llvm::SmallVector<int, 16> |
| 1106 | llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) { |
| 1107 | SmallVector<int, 16> Mask; |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1108 | for (unsigned i = 0; i < VF; i++) |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1109 | Mask.push_back(Start + i * Stride); |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1110 | |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1111 | return Mask; |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1112 | } |
| 1113 | |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1114 | llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start, |
| 1115 | unsigned NumInts, |
| 1116 | unsigned NumUndefs) { |
| 1117 | SmallVector<int, 16> Mask; |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1118 | for (unsigned i = 0; i < NumInts; i++) |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1119 | Mask.push_back(Start + i); |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1120 | |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1121 | for (unsigned i = 0; i < NumUndefs; i++) |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1122 | Mask.push_back(-1); |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1123 | |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1124 | return Mask; |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1125 | } |
| 1126 | |
Sanjay Patel | 2a3cc4d | 2021-10-18 08:43:01 -0400 | [diff] [blame] | 1127 | llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask, |
| 1128 | unsigned NumElts) { |
| 1129 | // Avoid casts in the loop and make sure we have a reasonable number. |
| 1130 | int NumEltsSigned = NumElts; |
| 1131 | assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count"); |
| 1132 | |
| 1133 | // If the mask chooses an element from operand 1, reduce it to choose from the |
| 1134 | // corresponding element of operand 0. Undef mask elements are unchanged. |
| 1135 | SmallVector<int, 16> UnaryMask; |
| 1136 | for (int MaskElt : Mask) { |
| 1137 | assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask"); |
| 1138 | int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt; |
| 1139 | UnaryMask.push_back(UnaryElt); |
| 1140 | } |
| 1141 | return UnaryMask; |
| 1142 | } |
| 1143 | |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1144 | /// A helper function for concatenating vectors. This function concatenates two |
| 1145 | /// vectors having the same element type. If the second vector has fewer |
| 1146 | /// elements than the first, it is padded with undefs. |
Nikita Popov | f37e899 | 2020-02-17 21:59:46 +0100 | [diff] [blame] | 1147 | static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1, |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1148 | Value *V2) { |
| 1149 | VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType()); |
| 1150 | VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType()); |
| 1151 | assert(VecTy1 && VecTy2 && |
| 1152 | VecTy1->getScalarType() == VecTy2->getScalarType() && |
| 1153 | "Expect two vectors with the same element type"); |
| 1154 | |
Christopher Tetreault | 23c5e59 | 2020-07-22 14:36:48 -0700 | [diff] [blame] | 1155 | unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements(); |
| 1156 | unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements(); |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1157 | assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements"); |
| 1158 | |
| 1159 | if (NumElts1 > NumElts2) { |
| 1160 | // Extend with UNDEFs. |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1161 | V2 = Builder.CreateShuffleVector( |
Juneyoung Lee | 9b29610 | 2020-12-30 07:28:17 +0900 | [diff] [blame] | 1162 | V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2)); |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1163 | } |
| 1164 | |
Benjamin Kramer | 166467e | 2020-04-17 15:28:00 +0200 | [diff] [blame] | 1165 | return Builder.CreateShuffleVector( |
| 1166 | V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0)); |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1167 | } |
| 1168 | |
Nikita Popov | f37e899 | 2020-02-17 21:59:46 +0100 | [diff] [blame] | 1169 | Value *llvm::concatenateVectors(IRBuilderBase &Builder, |
| 1170 | ArrayRef<Value *> Vecs) { |
Matthew Simpson | ba5cf9d | 2017-02-01 17:45:46 +0000 | [diff] [blame] | 1171 | unsigned NumVecs = Vecs.size(); |
| 1172 | assert(NumVecs > 1 && "Should be at least two vectors"); |
| 1173 | |
| 1174 | SmallVector<Value *, 8> ResList; |
| 1175 | ResList.append(Vecs.begin(), Vecs.end()); |
| 1176 | do { |
| 1177 | SmallVector<Value *, 8> TmpList; |
| 1178 | for (unsigned i = 0; i < NumVecs - 1; i += 2) { |
| 1179 | Value *V0 = ResList[i], *V1 = ResList[i + 1]; |
| 1180 | assert((V0->getType() == V1->getType() || i == NumVecs - 2) && |
| 1181 | "Only the last vector may have a different type"); |
| 1182 | |
| 1183 | TmpList.push_back(concatenateTwoVectors(Builder, V0, V1)); |
| 1184 | } |
| 1185 | |
| 1186 | // Push the last vector if the total number of vectors is odd. |
| 1187 | if (NumVecs % 2 != 0) |
| 1188 | TmpList.push_back(ResList[NumVecs - 1]); |
| 1189 | |
| 1190 | ResList = TmpList; |
| 1191 | NumVecs = ResList.size(); |
| 1192 | } while (NumVecs > 1); |
| 1193 | |
| 1194 | return ResList[0]; |
| 1195 | } |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1196 | |
Philip Reames | 88cd69b | 2019-04-25 02:30:17 +0000 | [diff] [blame] | 1197 | bool llvm::maskIsAllZeroOrUndef(Value *Mask) { |
Christopher Tetreault | 7ddfd9b | 2020-09-10 11:29:16 -0700 | [diff] [blame] | 1198 | assert(isa<VectorType>(Mask->getType()) && |
| 1199 | isa<IntegerType>(Mask->getType()->getScalarType()) && |
| 1200 | cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == |
| 1201 | 1 && |
| 1202 | "Mask must be a vector of i1"); |
| 1203 | |
Philip Reames | 88cd69b | 2019-04-25 02:30:17 +0000 | [diff] [blame] | 1204 | auto *ConstMask = dyn_cast<Constant>(Mask); |
| 1205 | if (!ConstMask) |
| 1206 | return false; |
| 1207 | if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask)) |
| 1208 | return true; |
Christopher Tetreault | 7ddfd9b | 2020-09-10 11:29:16 -0700 | [diff] [blame] | 1209 | if (isa<ScalableVectorType>(ConstMask->getType())) |
| 1210 | return false; |
Christopher Tetreault | 23c5e59 | 2020-07-22 14:36:48 -0700 | [diff] [blame] | 1211 | for (unsigned |
| 1212 | I = 0, |
| 1213 | E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); |
Christopher Tetreault | b96558f | 2020-04-09 12:19:23 -0700 | [diff] [blame] | 1214 | I != E; ++I) { |
Philip Reames | 88cd69b | 2019-04-25 02:30:17 +0000 | [diff] [blame] | 1215 | if (auto *MaskElt = ConstMask->getAggregateElement(I)) |
| 1216 | if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt)) |
| 1217 | continue; |
| 1218 | return false; |
| 1219 | } |
| 1220 | return true; |
| 1221 | } |
| 1222 | |
Philip Reames | 88cd69b | 2019-04-25 02:30:17 +0000 | [diff] [blame] | 1223 | bool llvm::maskIsAllOneOrUndef(Value *Mask) { |
Christopher Tetreault | 7ddfd9b | 2020-09-10 11:29:16 -0700 | [diff] [blame] | 1224 | assert(isa<VectorType>(Mask->getType()) && |
| 1225 | isa<IntegerType>(Mask->getType()->getScalarType()) && |
| 1226 | cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == |
| 1227 | 1 && |
| 1228 | "Mask must be a vector of i1"); |
| 1229 | |
Philip Reames | 88cd69b | 2019-04-25 02:30:17 +0000 | [diff] [blame] | 1230 | auto *ConstMask = dyn_cast<Constant>(Mask); |
| 1231 | if (!ConstMask) |
| 1232 | return false; |
| 1233 | if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask)) |
| 1234 | return true; |
Christopher Tetreault | 7ddfd9b | 2020-09-10 11:29:16 -0700 | [diff] [blame] | 1235 | if (isa<ScalableVectorType>(ConstMask->getType())) |
| 1236 | return false; |
Christopher Tetreault | 23c5e59 | 2020-07-22 14:36:48 -0700 | [diff] [blame] | 1237 | for (unsigned |
| 1238 | I = 0, |
| 1239 | E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); |
Christopher Tetreault | b96558f | 2020-04-09 12:19:23 -0700 | [diff] [blame] | 1240 | I != E; ++I) { |
Philip Reames | 88cd69b | 2019-04-25 02:30:17 +0000 | [diff] [blame] | 1241 | if (auto *MaskElt = ConstMask->getAggregateElement(I)) |
| 1242 | if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt)) |
| 1243 | continue; |
| 1244 | return false; |
| 1245 | } |
| 1246 | return true; |
| 1247 | } |
| 1248 | |
Yingwei Zheng | a1a590e | 2024-03-05 22:34:04 +0800 | [diff] [blame] | 1249 | bool llvm::maskContainsAllOneOrUndef(Value *Mask) { |
| 1250 | assert(isa<VectorType>(Mask->getType()) && |
| 1251 | isa<IntegerType>(Mask->getType()->getScalarType()) && |
| 1252 | cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == |
| 1253 | 1 && |
| 1254 | "Mask must be a vector of i1"); |
| 1255 | |
| 1256 | auto *ConstMask = dyn_cast<Constant>(Mask); |
| 1257 | if (!ConstMask) |
| 1258 | return false; |
| 1259 | if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask)) |
| 1260 | return true; |
| 1261 | if (isa<ScalableVectorType>(ConstMask->getType())) |
| 1262 | return false; |
| 1263 | for (unsigned |
| 1264 | I = 0, |
| 1265 | E = cast<FixedVectorType>(ConstMask->getType())->getNumElements(); |
| 1266 | I != E; ++I) { |
| 1267 | if (auto *MaskElt = ConstMask->getAggregateElement(I)) |
| 1268 | if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt)) |
| 1269 | return true; |
| 1270 | } |
| 1271 | return false; |
| 1272 | } |
| 1273 | |
Philip Reames | 88cd69b | 2019-04-25 02:30:17 +0000 | [diff] [blame] | 1274 | /// TODO: This is a lot like known bits, but for |
| 1275 | /// vectors. Is there something we can common this with? |
| 1276 | APInt llvm::possiblyDemandedEltsInMask(Value *Mask) { |
Christopher Tetreault | 7ddfd9b | 2020-09-10 11:29:16 -0700 | [diff] [blame] | 1277 | assert(isa<FixedVectorType>(Mask->getType()) && |
| 1278 | isa<IntegerType>(Mask->getType()->getScalarType()) && |
| 1279 | cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() == |
| 1280 | 1 && |
| 1281 | "Mask must be a fixed width vector of i1"); |
Philip Reames | 88cd69b | 2019-04-25 02:30:17 +0000 | [diff] [blame] | 1282 | |
Christopher Tetreault | 23c5e59 | 2020-07-22 14:36:48 -0700 | [diff] [blame] | 1283 | const unsigned VWidth = |
| 1284 | cast<FixedVectorType>(Mask->getType())->getNumElements(); |
Chris Lattner | 735f467 | 2021-09-08 22:13:13 -0700 | [diff] [blame] | 1285 | APInt DemandedElts = APInt::getAllOnes(VWidth); |
Philip Reames | 88cd69b | 2019-04-25 02:30:17 +0000 | [diff] [blame] | 1286 | if (auto *CV = dyn_cast<ConstantVector>(Mask)) |
| 1287 | for (unsigned i = 0; i < VWidth; i++) |
| 1288 | if (CV->getAggregateElement(i)->isNullValue()) |
| 1289 | DemandedElts.clearBit(i); |
| 1290 | return DemandedElts; |
| 1291 | } |
| 1292 | |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1293 | bool InterleavedAccessInfo::isStrided(int Stride) { |
| 1294 | unsigned Factor = std::abs(Stride); |
| 1295 | return Factor >= 2 && Factor <= MaxInterleaveGroupFactor; |
| 1296 | } |
| 1297 | |
| 1298 | void InterleavedAccessInfo::collectConstStrideAccesses( |
| 1299 | MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo, |
Philip Reames | e41dce4 | 2023-05-11 09:47:37 -0700 | [diff] [blame] | 1300 | const DenseMap<Value*, const SCEV*> &Strides) { |
Nikita Popov | 2d209d9 | 2024-06-27 16:38:15 +0200 | [diff] [blame] | 1301 | auto &DL = TheLoop->getHeader()->getDataLayout(); |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1302 | |
| 1303 | // Since it's desired that the load/store instructions be maintained in |
| 1304 | // "program order" for the interleaved access analysis, we have to visit the |
| 1305 | // blocks in the loop in reverse postorder (i.e., in a topological order). |
| 1306 | // Such an ordering will ensure that any load/store that may be executed |
| 1307 | // before a second load/store will precede the second load/store in |
| 1308 | // AccessStrideInfo. |
| 1309 | LoopBlocksDFS DFS(TheLoop); |
| 1310 | DFS.perform(LI); |
| 1311 | for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) |
| 1312 | for (auto &I : *BB) { |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1313 | Value *Ptr = getLoadStorePointerOperand(&I); |
Arthur Eubanks | cc64ece | 2021-05-14 14:01:05 -0700 | [diff] [blame] | 1314 | if (!Ptr) |
| 1315 | continue; |
| 1316 | Type *ElementTy = getLoadStoreType(&I); |
| 1317 | |
Florian Hahn | 758699c | 2022-11-13 22:05:37 +0000 | [diff] [blame] | 1318 | // Currently, codegen doesn't support cases where the type size doesn't |
| 1319 | // match the alloc size. Skip them for now. |
| 1320 | uint64_t Size = DL.getTypeAllocSize(ElementTy); |
| 1321 | if (Size * 8 != DL.getTypeSizeInBits(ElementTy)) |
| 1322 | continue; |
| 1323 | |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1324 | // We don't check wrapping here because we don't know yet if Ptr will be |
| 1325 | // part of a full group or a group with gaps. Checking wrapping for all |
| 1326 | // pointers (even those that end up in groups with no gaps) will be overly |
| 1327 | // conservative. For full groups, wrapping should be ok since if we would |
| 1328 | // wrap around the address space we would do a memory access at nullptr |
| 1329 | // even without the transformation. The wrapping checks are therefore |
| 1330 | // deferred until after we've formed the interleaved groups. |
Philip Reames | f6d110e | 2022-09-27 15:55:44 -0700 | [diff] [blame] | 1331 | int64_t Stride = |
| 1332 | getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides, |
| 1333 | /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0); |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1334 | |
| 1335 | const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); |
Nikita Popov | 52e98f6 | 2020-05-17 22:14:42 +0200 | [diff] [blame] | 1336 | AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, |
| 1337 | getLoadStoreAlignment(&I)); |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1338 | } |
| 1339 | } |
| 1340 | |
| 1341 | // Analyze interleaved accesses and collect them into interleaved load and |
| 1342 | // store groups. |
| 1343 | // |
| 1344 | // When generating code for an interleaved load group, we effectively hoist all |
| 1345 | // loads in the group to the location of the first load in program order. When |
| 1346 | // generating code for an interleaved store group, we sink all stores to the |
| 1347 | // location of the last store. This code motion can change the order of load |
| 1348 | // and store instructions and may break dependences. |
| 1349 | // |
| 1350 | // The code generation strategy mentioned above ensures that we won't violate |
| 1351 | // any write-after-read (WAR) dependences. |
| 1352 | // |
| 1353 | // E.g., for the WAR dependence: a = A[i]; // (1) |
| 1354 | // A[i] = b; // (2) |
| 1355 | // |
| 1356 | // The store group of (2) is always inserted at or below (2), and the load |
| 1357 | // group of (1) is always inserted at or above (1). Thus, the instructions will |
| 1358 | // never be reordered. All other dependences are checked to ensure the |
| 1359 | // correctness of the instruction reordering. |
| 1360 | // |
| 1361 | // The algorithm visits all memory accesses in the loop in bottom-up program |
| 1362 | // order. Program order is established by traversing the blocks in the loop in |
| 1363 | // reverse postorder when collecting the accesses. |
| 1364 | // |
| 1365 | // We visit the memory accesses in bottom-up order because it can simplify the |
| 1366 | // construction of store groups in the presence of write-after-write (WAW) |
| 1367 | // dependences. |
| 1368 | // |
| 1369 | // E.g., for the WAW dependence: A[i] = a; // (1) |
| 1370 | // A[i] = b; // (2) |
| 1371 | // A[i + 1] = c; // (3) |
| 1372 | // |
| 1373 | // We will first create a store group with (3) and (2). (1) can't be added to |
| 1374 | // this group because it and (2) are dependent. However, (1) can be grouped |
| 1375 | // with other accesses that may precede it in program order. Note that a |
| 1376 | // bottom-up order does not imply that WAW dependences should not be checked. |
Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 1377 | void InterleavedAccessInfo::analyzeInterleaving( |
| 1378 | bool EnablePredicatedInterleavedMemAccesses) { |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1379 | LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n"); |
Philip Reames | e41dce4 | 2023-05-11 09:47:37 -0700 | [diff] [blame] | 1380 | const auto &Strides = LAI->getSymbolicStrides(); |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1381 | |
| 1382 | // Holds all accesses with a constant stride. |
| 1383 | MapVector<Instruction *, StrideDescriptor> AccessStrideInfo; |
| 1384 | collectConstStrideAccesses(AccessStrideInfo, Strides); |
| 1385 | |
| 1386 | if (AccessStrideInfo.empty()) |
| 1387 | return; |
| 1388 | |
| 1389 | // Collect the dependences in the loop. |
| 1390 | collectDependences(); |
| 1391 | |
| 1392 | // Holds all interleaved store groups temporarily. |
Florian Hahn | a4dc7fe | 2018-11-13 15:58:18 +0000 | [diff] [blame] | 1393 | SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups; |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1394 | // Holds all interleaved load groups temporarily. |
Florian Hahn | a4dc7fe | 2018-11-13 15:58:18 +0000 | [diff] [blame] | 1395 | SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups; |
Florian Hahn | 4d847bf | 2023-07-07 11:06:30 +0100 | [diff] [blame] | 1396 | // Groups added to this set cannot have new members added. |
| 1397 | SmallPtrSet<InterleaveGroup<Instruction> *, 4> CompletedLoadGroups; |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1398 | |
| 1399 | // Search in bottom-up program order for pairs of accesses (A and B) that can |
| 1400 | // form interleaved load or store groups. In the algorithm below, access A |
| 1401 | // precedes access B in program order. We initialize a group for B in the |
| 1402 | // outer loop of the algorithm, and then in the inner loop, we attempt to |
| 1403 | // insert each A into B's group if: |
| 1404 | // |
| 1405 | // 1. A and B have the same stride, |
| 1406 | // 2. A and B have the same memory object size, and |
| 1407 | // 3. A belongs in B's group according to its distance from B. |
| 1408 | // |
| 1409 | // Special care is taken to ensure group formation will not break any |
| 1410 | // dependences. |
| 1411 | for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend(); |
| 1412 | BI != E; ++BI) { |
| 1413 | Instruction *B = BI->first; |
| 1414 | StrideDescriptor DesB = BI->second; |
| 1415 | |
| 1416 | // Initialize a group for B if it has an allowable stride. Even if we don't |
| 1417 | // create a group for B, we continue with the bottom-up algorithm to ensure |
| 1418 | // we don't break any of B's dependences. |
Anna Thomas | 9675e3f | 2023-07-14 16:24:04 -0400 | [diff] [blame] | 1419 | InterleaveGroup<Instruction> *GroupB = nullptr; |
Jim Lin | 466f884 | 2020-02-18 10:48:38 +0800 | [diff] [blame] | 1420 | if (isStrided(DesB.Stride) && |
Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 1421 | (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) { |
Anna Thomas | 9675e3f | 2023-07-14 16:24:04 -0400 | [diff] [blame] | 1422 | GroupB = getInterleaveGroup(B); |
| 1423 | if (!GroupB) { |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1424 | LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B |
| 1425 | << '\n'); |
Anna Thomas | 9675e3f | 2023-07-14 16:24:04 -0400 | [diff] [blame] | 1426 | GroupB = createInterleaveGroup(B, DesB.Stride, DesB.Alignment); |
Anna Thomas | 3cf24db | 2023-07-26 15:08:06 -0400 | [diff] [blame] | 1427 | if (B->mayWriteToMemory()) |
| 1428 | StoreGroups.insert(GroupB); |
| 1429 | else |
| 1430 | LoadGroups.insert(GroupB); |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1431 | } |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1432 | } |
| 1433 | |
| 1434 | for (auto AI = std::next(BI); AI != E; ++AI) { |
| 1435 | Instruction *A = AI->first; |
| 1436 | StrideDescriptor DesA = AI->second; |
| 1437 | |
| 1438 | // Our code motion strategy implies that we can't have dependences |
| 1439 | // between accesses in an interleaved group and other accesses located |
| 1440 | // between the first and last member of the group. Note that this also |
| 1441 | // means that a group can't have more than one member at a given offset. |
| 1442 | // The accesses in a group can have dependences with other accesses, but |
| 1443 | // we must ensure we don't extend the boundaries of the group such that |
| 1444 | // we encompass those dependent accesses. |
| 1445 | // |
| 1446 | // For example, assume we have the sequence of accesses shown below in a |
| 1447 | // stride-2 loop: |
| 1448 | // |
| 1449 | // (1, 2) is a group | A[i] = a; // (1) |
| 1450 | // | A[i-1] = b; // (2) | |
| 1451 | // A[i-3] = c; // (3) |
| 1452 | // A[i] = d; // (4) | (2, 4) is not a group |
| 1453 | // |
| 1454 | // Because accesses (2) and (3) are dependent, we can group (2) with (1) |
| 1455 | // but not with (4). If we did, the dependent access (3) would be within |
| 1456 | // the boundaries of the (2, 4) group. |
Anna Thomas | 3cf24db | 2023-07-26 15:08:06 -0400 | [diff] [blame] | 1457 | auto DependentMember = [&](InterleaveGroup<Instruction> *Group, |
| 1458 | StrideEntry *A) -> Instruction * { |
| 1459 | for (uint32_t Index = 0; Index < Group->getFactor(); ++Index) { |
| 1460 | Instruction *MemberOfGroupB = Group->getMember(Index); |
| 1461 | if (MemberOfGroupB && !canReorderMemAccessesForInterleavedGroups( |
| 1462 | A, &*AccessStrideInfo.find(MemberOfGroupB))) |
| 1463 | return MemberOfGroupB; |
Florian Hahn | 4d847bf | 2023-07-07 11:06:30 +0100 | [diff] [blame] | 1464 | } |
Anna Thomas | 3cf24db | 2023-07-26 15:08:06 -0400 | [diff] [blame] | 1465 | return nullptr; |
| 1466 | }; |
Anna Thomas | e85fd3c | 2023-07-26 15:05:13 -0400 | [diff] [blame] | 1467 | |
Anna Thomas | 3cf24db | 2023-07-26 15:08:06 -0400 | [diff] [blame] | 1468 | auto GroupA = getInterleaveGroup(A); |
| 1469 | // If A is a load, dependencies are tolerable, there's nothing to do here. |
| 1470 | // If both A and B belong to the same (store) group, they are independent, |
| 1471 | // even if dependencies have not been recorded. |
| 1472 | // If both GroupA and GroupB are null, there's nothing to do here. |
| 1473 | if (A->mayWriteToMemory() && GroupA != GroupB) { |
| 1474 | Instruction *DependentInst = nullptr; |
| 1475 | // If GroupB is a load group, we have to compare AI against all |
| 1476 | // members of GroupB because if any load within GroupB has a dependency |
| 1477 | // on AI, we need to mark GroupB as complete and also release the |
| 1478 | // store GroupA (if A belongs to one). The former prevents incorrect |
| 1479 | // hoisting of load B above store A while the latter prevents incorrect |
| 1480 | // sinking of store A below load B. |
| 1481 | if (GroupB && LoadGroups.contains(GroupB)) |
| 1482 | DependentInst = DependentMember(GroupB, &*AI); |
| 1483 | else if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) |
| 1484 | DependentInst = B; |
| 1485 | |
| 1486 | if (DependentInst) { |
| 1487 | // A has a store dependence on B (or on some load within GroupB) and |
| 1488 | // is part of a store group. Release A's group to prevent illegal |
| 1489 | // sinking of A below B. A will then be free to form another group |
| 1490 | // with instructions that precede it. |
| 1491 | if (GroupA && StoreGroups.contains(GroupA)) { |
| 1492 | LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to " |
| 1493 | "dependence between " |
| 1494 | << *A << " and " << *DependentInst << '\n'); |
| 1495 | StoreGroups.remove(GroupA); |
| 1496 | releaseGroup(GroupA); |
| 1497 | } |
| 1498 | // If B is a load and part of an interleave group, no earlier loads |
| 1499 | // can be added to B's interleave group, because this would mean the |
| 1500 | // DependentInst would move across store A. Mark the interleave group |
| 1501 | // as complete. |
| 1502 | if (GroupB && LoadGroups.contains(GroupB)) { |
| 1503 | LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B |
| 1504 | << " as complete.\n"); |
| 1505 | CompletedLoadGroups.insert(GroupB); |
| 1506 | } |
Anna Thomas | e85fd3c | 2023-07-26 15:05:13 -0400 | [diff] [blame] | 1507 | } |
Anna Thomas | 3cf24db | 2023-07-26 15:08:06 -0400 | [diff] [blame] | 1508 | } |
| 1509 | if (CompletedLoadGroups.contains(GroupB)) { |
| 1510 | // Skip trying to add A to B, continue to look for other conflicting A's |
| 1511 | // in groups to be released. |
| 1512 | continue; |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1513 | } |
| 1514 | |
| 1515 | // At this point, we've checked for illegal code motion. If either A or B |
| 1516 | // isn't strided, there's nothing left to do. |
| 1517 | if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) |
| 1518 | continue; |
| 1519 | |
| 1520 | // Ignore A if it's already in a group or isn't the same kind of memory |
| 1521 | // operation as B. |
| 1522 | // Note that mayReadFromMemory() isn't mutually exclusive to |
| 1523 | // mayWriteToMemory in the case of atomic loads. We shouldn't see those |
| 1524 | // here, canVectorizeMemory() should have returned false - except for the |
| 1525 | // case we asked for optimization remarks. |
| 1526 | if (isInterleaved(A) || |
| 1527 | (A->mayReadFromMemory() != B->mayReadFromMemory()) || |
| 1528 | (A->mayWriteToMemory() != B->mayWriteToMemory())) |
| 1529 | continue; |
| 1530 | |
| 1531 | // Check rules 1 and 2. Ignore A if its stride or size is different from |
| 1532 | // that of B. |
| 1533 | if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size) |
| 1534 | continue; |
| 1535 | |
| 1536 | // Ignore A if the memory object of A and B don't belong to the same |
| 1537 | // address space |
| 1538 | if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B)) |
| 1539 | continue; |
| 1540 | |
| 1541 | // Calculate the distance from A to B. |
| 1542 | const SCEVConstant *DistToB = dyn_cast<SCEVConstant>( |
| 1543 | PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev)); |
| 1544 | if (!DistToB) |
| 1545 | continue; |
| 1546 | int64_t DistanceToB = DistToB->getAPInt().getSExtValue(); |
| 1547 | |
| 1548 | // Check rule 3. Ignore A if its distance to B is not a multiple of the |
| 1549 | // size. |
| 1550 | if (DistanceToB % static_cast<int64_t>(DesB.Size)) |
| 1551 | continue; |
| 1552 | |
Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 1553 | // All members of a predicated interleave-group must have the same predicate, |
| 1554 | // and currently must reside in the same BB. |
Jim Lin | 466f884 | 2020-02-18 10:48:38 +0800 | [diff] [blame] | 1555 | BasicBlock *BlockA = A->getParent(); |
| 1556 | BasicBlock *BlockB = B->getParent(); |
Dorit Nuzman | 38bbf81 | 2018-10-14 08:50:06 +0000 | [diff] [blame] | 1557 | if ((isPredicated(BlockA) || isPredicated(BlockB)) && |
| 1558 | (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB)) |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1559 | continue; |
| 1560 | |
| 1561 | // The index of A is the index of B plus A's distance to B in multiples |
| 1562 | // of the size. |
| 1563 | int IndexA = |
Anna Thomas | 9675e3f | 2023-07-14 16:24:04 -0400 | [diff] [blame] | 1564 | GroupB->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size); |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1565 | |
| 1566 | // Try to insert A into B's group. |
Anna Thomas | 9675e3f | 2023-07-14 16:24:04 -0400 | [diff] [blame] | 1567 | if (GroupB->insertMember(A, IndexA, DesA.Alignment)) { |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1568 | LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n' |
| 1569 | << " into the interleave group with" << *B |
| 1570 | << '\n'); |
Anna Thomas | 9675e3f | 2023-07-14 16:24:04 -0400 | [diff] [blame] | 1571 | InterleaveGroupMap[A] = GroupB; |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1572 | |
| 1573 | // Set the first load in program order as the insert position. |
| 1574 | if (A->mayReadFromMemory()) |
Anna Thomas | 9675e3f | 2023-07-14 16:24:04 -0400 | [diff] [blame] | 1575 | GroupB->setInsertPos(A); |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1576 | } |
| 1577 | } // Iteration over A accesses. |
| 1578 | } // Iteration over B accesses. |
| 1579 | |
Dorit Nuzman | 67278b8 | 2021-06-17 18:39:09 +0300 | [diff] [blame] | 1580 | auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group, |
| 1581 | int Index, |
Amr Hesham | 4ba1800 | 2024-10-16 10:55:01 +0200 | [diff] [blame] | 1582 | const char *FirstOrLast) -> bool { |
Dorit Nuzman | 67278b8 | 2021-06-17 18:39:09 +0300 | [diff] [blame] | 1583 | Instruction *Member = Group->getMember(Index); |
| 1584 | assert(Member && "Group member does not exist"); |
| 1585 | Value *MemberPtr = getLoadStorePointerOperand(Member); |
Nikita Popov | 45c4673 | 2021-09-11 19:00:37 +0200 | [diff] [blame] | 1586 | Type *AccessTy = getLoadStoreType(Member); |
| 1587 | if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides, |
Philip Reames | f6d110e | 2022-09-27 15:55:44 -0700 | [diff] [blame] | 1588 | /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0)) |
Dorit Nuzman | 67278b8 | 2021-06-17 18:39:09 +0300 | [diff] [blame] | 1589 | return false; |
| 1590 | LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to " |
| 1591 | << FirstOrLast |
| 1592 | << " group member potentially pointer-wrapping.\n"); |
| 1593 | releaseGroup(Group); |
| 1594 | return true; |
| 1595 | }; |
| 1596 | |
| 1597 | // Remove interleaved groups with gaps whose memory |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1598 | // accesses may wrap around. We have to revisit the getPtrStride analysis, |
| 1599 | // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does |
| 1600 | // not check wrapping (see documentation there). |
| 1601 | // FORNOW we use Assume=false; |
| 1602 | // TODO: Change to Assume=true but making sure we don't exceed the threshold |
| 1603 | // of runtime SCEV assumptions checks (thereby potentially failing to |
| 1604 | // vectorize altogether). |
| 1605 | // Additional optional optimizations: |
| 1606 | // TODO: If we are peeling the loop and we know that the first pointer doesn't |
| 1607 | // wrap then we can deduce that all pointers in the group don't wrap. |
| 1608 | // This means that we can forcefully peel the loop in order to only have to |
| 1609 | // check the first pointer for no-wrap. When we'll change to use Assume=true |
| 1610 | // we'll only need at most one runtime check per interleaved group. |
Florian Hahn | a4dc7fe | 2018-11-13 15:58:18 +0000 | [diff] [blame] | 1611 | for (auto *Group : LoadGroups) { |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1612 | // Case 1: A full group. Can Skip the checks; For full groups, if the wide |
| 1613 | // load would wrap around the address space we would do a memory access at |
| 1614 | // nullptr even without the transformation. |
| 1615 | if (Group->getNumMembers() == Group->getFactor()) |
| 1616 | continue; |
| 1617 | |
| 1618 | // Case 2: If first and last members of the group don't wrap this implies |
| 1619 | // that all the pointers in the group don't wrap. |
| 1620 | // So we check only group member 0 (which is always guaranteed to exist), |
| 1621 | // and group member Factor - 1; If the latter doesn't exist we rely on |
Piotr Fusik | cc7b24a | 2024-09-24 11:19:56 +0200 | [diff] [blame] | 1622 | // peeling (if it is a non-reversed access -- see Case 3). |
Amr Hesham | 4ba1800 | 2024-10-16 10:55:01 +0200 | [diff] [blame] | 1623 | if (InvalidateGroupIfMemberMayWrap(Group, 0, "first")) |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1624 | continue; |
Dorit Nuzman | 67278b8 | 2021-06-17 18:39:09 +0300 | [diff] [blame] | 1625 | if (Group->getMember(Group->getFactor() - 1)) |
Amr Hesham | 4ba1800 | 2024-10-16 10:55:01 +0200 | [diff] [blame] | 1626 | InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1, "last"); |
Dorit Nuzman | 67278b8 | 2021-06-17 18:39:09 +0300 | [diff] [blame] | 1627 | else { |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1628 | // Case 3: A non-reversed interleaved load group with gaps: We need |
| 1629 | // to execute at least one scalar epilogue iteration. This will ensure |
| 1630 | // we don't speculatively access memory out-of-bounds. We only need |
| 1631 | // to look for a member at index factor - 1, since every group must have |
| 1632 | // a member at index zero. |
| 1633 | if (Group->isReverse()) { |
| 1634 | LLVM_DEBUG( |
| 1635 | dbgs() << "LV: Invalidate candidate interleaved group due to " |
| 1636 | "a reverse access with gaps.\n"); |
| 1637 | releaseGroup(Group); |
| 1638 | continue; |
| 1639 | } |
| 1640 | LLVM_DEBUG( |
| 1641 | dbgs() << "LV: Interleaved group requires epilogue iteration.\n"); |
| 1642 | RequiresScalarEpilogue = true; |
| 1643 | } |
| 1644 | } |
Dorit Nuzman | 67278b8 | 2021-06-17 18:39:09 +0300 | [diff] [blame] | 1645 | |
| 1646 | for (auto *Group : StoreGroups) { |
| 1647 | // Case 1: A full group. Can Skip the checks; For full groups, if the wide |
| 1648 | // store would wrap around the address space we would do a memory access at |
| 1649 | // nullptr even without the transformation. |
| 1650 | if (Group->getNumMembers() == Group->getFactor()) |
| 1651 | continue; |
| 1652 | |
| 1653 | // Interleave-store-group with gaps is implemented using masked wide store. |
| 1654 | // Remove interleaved store groups with gaps if |
| 1655 | // masked-interleaved-accesses are not enabled by the target. |
| 1656 | if (!EnablePredicatedInterleavedMemAccesses) { |
| 1657 | LLVM_DEBUG( |
| 1658 | dbgs() << "LV: Invalidate candidate interleaved store group due " |
| 1659 | "to gaps.\n"); |
| 1660 | releaseGroup(Group); |
| 1661 | continue; |
| 1662 | } |
| 1663 | |
| 1664 | // Case 2: If first and last members of the group don't wrap this implies |
| 1665 | // that all the pointers in the group don't wrap. |
| 1666 | // So we check only group member 0 (which is always guaranteed to exist), |
| 1667 | // and the last group member. Case 3 (scalar epilog) is not relevant for |
| 1668 | // stores with gaps, which are implemented with masked-store (rather than |
| 1669 | // speculative access, as in loads). |
Amr Hesham | 4ba1800 | 2024-10-16 10:55:01 +0200 | [diff] [blame] | 1670 | if (InvalidateGroupIfMemberMayWrap(Group, 0, "first")) |
Dorit Nuzman | 67278b8 | 2021-06-17 18:39:09 +0300 | [diff] [blame] | 1671 | continue; |
| 1672 | for (int Index = Group->getFactor() - 1; Index > 0; Index--) |
| 1673 | if (Group->getMember(Index)) { |
Amr Hesham | 4ba1800 | 2024-10-16 10:55:01 +0200 | [diff] [blame] | 1674 | InvalidateGroupIfMemberMayWrap(Group, Index, "last"); |
Dorit Nuzman | 67278b8 | 2021-06-17 18:39:09 +0300 | [diff] [blame] | 1675 | break; |
| 1676 | } |
| 1677 | } |
Florian Hahn | 1086ce2 | 2018-09-12 08:01:57 +0000 | [diff] [blame] | 1678 | } |
Dorit Nuzman | 3ec99fe | 2018-10-22 06:17:09 +0000 | [diff] [blame] | 1679 | |
| 1680 | void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() { |
| 1681 | // If no group had triggered the requirement to create an epilogue loop, |
| 1682 | // there is nothing to do. |
| 1683 | if (!requiresScalarEpilogue()) |
| 1684 | return; |
| 1685 | |
Florian Hahn | 2737362 | 2020-04-20 16:03:21 +0100 | [diff] [blame] | 1686 | // Release groups requiring scalar epilogues. Note that this also removes them |
| 1687 | // from InterleaveGroups. |
Nikita Popov | d42b392 | 2024-06-26 13:43:19 +0200 | [diff] [blame] | 1688 | bool ReleasedGroup = InterleaveGroups.remove_if([&](auto *Group) { |
Florian Hahn | 2737362 | 2020-04-20 16:03:21 +0100 | [diff] [blame] | 1689 | if (!Group->requiresScalarEpilogue()) |
Nikita Popov | d42b392 | 2024-06-26 13:43:19 +0200 | [diff] [blame] | 1690 | return false; |
Dorit Nuzman | 3ec99fe | 2018-10-22 06:17:09 +0000 | [diff] [blame] | 1691 | LLVM_DEBUG( |
Dorit Nuzman | 34da6dd | 2018-10-31 09:57:56 +0000 | [diff] [blame] | 1692 | dbgs() |
Dorit Nuzman | 3ec99fe | 2018-10-22 06:17:09 +0000 | [diff] [blame] | 1693 | << "LV: Invalidate candidate interleaved group due to gaps that " |
Dorit Nuzman | 34da6dd | 2018-10-31 09:57:56 +0000 | [diff] [blame] | 1694 | "require a scalar epilogue (not allowed under optsize) and cannot " |
| 1695 | "be masked (not enabled). \n"); |
Nikita Popov | d42b392 | 2024-06-26 13:43:19 +0200 | [diff] [blame] | 1696 | releaseGroupWithoutRemovingFromSet(Group); |
| 1697 | return true; |
| 1698 | }); |
Florian Hahn | 2737362 | 2020-04-20 16:03:21 +0100 | [diff] [blame] | 1699 | assert(ReleasedGroup && "At least one group must be invalidated, as a " |
| 1700 | "scalar epilogue was required"); |
| 1701 | (void)ReleasedGroup; |
Dorit Nuzman | 3ec99fe | 2018-10-22 06:17:09 +0000 | [diff] [blame] | 1702 | RequiresScalarEpilogue = false; |
| 1703 | } |
Florian Hahn | a4dc7fe | 2018-11-13 15:58:18 +0000 | [diff] [blame] | 1704 | |
Florian Hahn | 86ed347 | 2018-11-13 16:26:34 +0000 | [diff] [blame] | 1705 | template <typename InstT> |
| 1706 | void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const { |
| 1707 | llvm_unreachable("addMetadata can only be used for Instruction"); |
| 1708 | } |
| 1709 | |
| 1710 | namespace llvm { |
Florian Hahn | a4dc7fe | 2018-11-13 15:58:18 +0000 | [diff] [blame] | 1711 | template <> |
| 1712 | void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const { |
Kazu Hirata | 799916a | 2025-04-16 19:29:47 -0700 | [diff] [blame] | 1713 | SmallVector<Value *, 4> VL(make_second_range(Members)); |
Florian Hahn | a4dc7fe | 2018-11-13 15:58:18 +0000 | [diff] [blame] | 1714 | propagateMetadata(NewInst, VL); |
| 1715 | } |
Alexandros Lamprineas | 92289db | 2024-01-17 09:55:30 +0000 | [diff] [blame] | 1716 | } // namespace llvm |