| //===- X86ManualFoldTables.def ----------------------------*- C++ -*-==// |
| // |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| // See https://llvm.org/LICENSE.txt for license information. |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| // |
| //===----------------------------------------------------------------------===// |
| // \file |
| // This file defines all the entries in X86 memory folding tables that need |
| // special handling. |
| //===----------------------------------------------------------------------===// |
| |
| #ifndef NOFOLD |
| #define NOFOLD(INSN) |
| #endif |
| NOFOLD(BTC16rr) |
| NOFOLD(BTC32rr) |
| NOFOLD(BTC64rr) |
| NOFOLD(BTR16rr) |
| NOFOLD(BTR32rr) |
| NOFOLD(BTR64rr) |
| NOFOLD(BTS16rr) |
| NOFOLD(BTS32rr) |
| NOFOLD(BTS64rr) |
| NOFOLD(VCOMPRESSPDZ128rrk) |
| NOFOLD(VCOMPRESSPDZ256rrk) |
| NOFOLD(VCOMPRESSPDZrrk) |
| NOFOLD(VCOMPRESSPSZ128rrk) |
| NOFOLD(VCOMPRESSPSZ256rrk) |
| NOFOLD(VCOMPRESSPSZrrk) |
| NOFOLD(VCVTPS2PHZ128rrk) |
| NOFOLD(VCVTPS2PHZ256rrk) |
| NOFOLD(VCVTPS2PHZrrk) |
| NOFOLD(VEXTRACTF32X4Z256rrik) |
| NOFOLD(VEXTRACTF32X4Zrrik) |
| NOFOLD(VEXTRACTF32X8Zrrik) |
| NOFOLD(VEXTRACTF64X2Z256rrik) |
| NOFOLD(VEXTRACTF64X2Zrrik) |
| NOFOLD(VEXTRACTF64X4Zrrik) |
| NOFOLD(VEXTRACTI32X4Z256rrik) |
| NOFOLD(VEXTRACTI32X4Zrrik) |
| NOFOLD(VEXTRACTI32X8Zrrik) |
| NOFOLD(VEXTRACTI64X2Z256rrik) |
| NOFOLD(VEXTRACTI64X2Zrrik) |
| NOFOLD(VEXTRACTI64X4Zrrik) |
| NOFOLD(VMOVAPDZ128mrk) |
| NOFOLD(VMOVAPDZ256mrk) |
| NOFOLD(VMOVAPDZmrk) |
| NOFOLD(VMOVAPSZ128mrk) |
| NOFOLD(VMOVAPSZ256mrk) |
| NOFOLD(VMOVAPSZmrk) |
| NOFOLD(VMOVDQA32Z128mrk) |
| NOFOLD(VMOVDQA32Z256mrk) |
| NOFOLD(VMOVDQA32Zmrk) |
| NOFOLD(VMOVDQA64Z128mrk) |
| NOFOLD(VMOVDQA64Z256mrk) |
| NOFOLD(VMOVDQA64Zmrk) |
| NOFOLD(VMOVDQU16Z128mrk) |
| NOFOLD(VMOVDQU16Z256mrk) |
| NOFOLD(VMOVDQU16Zmrk) |
| NOFOLD(VMOVDQU32Z128mrk) |
| NOFOLD(VMOVDQU32Z256mrk) |
| NOFOLD(VMOVDQU32Zmrk) |
| NOFOLD(VMOVDQU64Z128mrk) |
| NOFOLD(VMOVDQU64Z256mrk) |
| NOFOLD(VMOVDQU64Zmrk) |
| NOFOLD(VMOVDQU8Z128mrk) |
| NOFOLD(VMOVDQU8Z256mrk) |
| NOFOLD(VMOVDQU8Zmrk) |
| NOFOLD(VMOVUPDZ128mrk) |
| NOFOLD(VMOVUPDZ256mrk) |
| NOFOLD(VMOVUPDZmrk) |
| NOFOLD(VMOVUPSZ128mrk) |
| NOFOLD(VMOVUPSZ256mrk) |
| NOFOLD(VMOVUPSZmrk) |
| NOFOLD(VPCOMPRESSBZ128rrk) |
| NOFOLD(VPCOMPRESSBZ256rrk) |
| NOFOLD(VPCOMPRESSBZrrk) |
| NOFOLD(VPCOMPRESSDZ128rrk) |
| NOFOLD(VPCOMPRESSDZ256rrk) |
| NOFOLD(VPCOMPRESSDZrrk) |
| NOFOLD(VPCOMPRESSQZ128rrk) |
| NOFOLD(VPCOMPRESSQZ256rrk) |
| NOFOLD(VPCOMPRESSQZrrk) |
| NOFOLD(VPCOMPRESSWZ128rrk) |
| NOFOLD(VPCOMPRESSWZ256rrk) |
| NOFOLD(VPCOMPRESSWZrrk) |
| NOFOLD(VPMOVDBZ128rrk) |
| NOFOLD(VPMOVDBZ256rrk) |
| NOFOLD(VPMOVDBZrrk) |
| NOFOLD(VPMOVDWZ128rrk) |
| NOFOLD(VPMOVDWZ256rrk) |
| NOFOLD(VPMOVDWZrrk) |
| NOFOLD(VPMOVQBZ128rrk) |
| NOFOLD(VPMOVQBZ256rrk) |
| NOFOLD(VPMOVQBZrrk) |
| NOFOLD(VPMOVQDZ128rrk) |
| NOFOLD(VPMOVQDZ256rrk) |
| NOFOLD(VPMOVQDZrrk) |
| NOFOLD(VPMOVQWZ128rrk) |
| NOFOLD(VPMOVQWZ256rrk) |
| NOFOLD(VPMOVQWZrrk) |
| NOFOLD(VPMOVSDBZ128rrk) |
| NOFOLD(VPMOVSDBZ256rrk) |
| NOFOLD(VPMOVSDBZrrk) |
| NOFOLD(VPMOVSDWZ128rrk) |
| NOFOLD(VPMOVSDWZ256rrk) |
| NOFOLD(VPMOVSDWZrrk) |
| NOFOLD(VPMOVSQBZ128rrk) |
| NOFOLD(VPMOVSQBZ256rrk) |
| NOFOLD(VPMOVSQBZrrk) |
| NOFOLD(VPMOVSQDZ128rrk) |
| NOFOLD(VPMOVSQDZ256rrk) |
| NOFOLD(VPMOVSQDZrrk) |
| NOFOLD(VPMOVSQWZ128rrk) |
| NOFOLD(VPMOVSQWZ256rrk) |
| NOFOLD(VPMOVSQWZrrk) |
| NOFOLD(VPMOVSWBZ128rrk) |
| NOFOLD(VPMOVSWBZ256rrk) |
| NOFOLD(VPMOVSWBZrrk) |
| NOFOLD(VPMOVUSDBZ128rrk) |
| NOFOLD(VPMOVUSDBZ256rrk) |
| NOFOLD(VPMOVUSDBZrrk) |
| NOFOLD(VPMOVUSDWZ128rrk) |
| NOFOLD(VPMOVUSDWZ256rrk) |
| NOFOLD(VPMOVUSDWZrrk) |
| NOFOLD(VPMOVUSQBZ128rrk) |
| NOFOLD(VPMOVUSQBZ256rrk) |
| NOFOLD(VPMOVUSQBZrrk) |
| NOFOLD(VPMOVUSQDZ128rrk) |
| NOFOLD(VPMOVUSQDZ256rrk) |
| NOFOLD(VPMOVUSQDZrrk) |
| NOFOLD(VPMOVUSQWZ128rrk) |
| NOFOLD(VPMOVUSQWZ256rrk) |
| NOFOLD(VPMOVUSQWZrrk) |
| NOFOLD(VPMOVUSWBZ128rrk) |
| NOFOLD(VPMOVUSWBZ256rrk) |
| NOFOLD(VPMOVUSWBZrrk) |
| NOFOLD(VPMOVWBZ128rrk) |
| NOFOLD(VPMOVWBZ256rrk) |
| NOFOLD(VPMOVWBZrrk) |
| NOFOLD(ARPL16rr) |
| NOFOLD(BT16rr) |
| NOFOLD(BT32rr) |
| NOFOLD(BT64rr) |
| NOFOLD(CMPXCHG16rr) |
| NOFOLD(CMPXCHG32rr) |
| NOFOLD(CMPXCHG64rr) |
| NOFOLD(CMPXCHG8rr) |
| NOFOLD(LLDT16r) |
| NOFOLD(LMSW16r) |
| NOFOLD(LTRr) |
| NOFOLD(NOOPLr) |
| NOFOLD(NOOPQr) |
| NOFOLD(NOOPWr) |
| NOFOLD(POP16rmr) |
| NOFOLD(POP32rmr) |
| NOFOLD(POP64rmr) |
| NOFOLD(PUSH16rmr) |
| NOFOLD(PUSH32rmr) |
| NOFOLD(PUSH64rmr) |
| NOFOLD(VCOMPRESSPDZ128rr) |
| NOFOLD(VCOMPRESSPDZ256rr) |
| NOFOLD(VCOMPRESSPDZrr) |
| NOFOLD(VCOMPRESSPSZ128rr) |
| NOFOLD(VCOMPRESSPSZ256rr) |
| NOFOLD(VCOMPRESSPSZrr) |
| NOFOLD(VERRr) |
| NOFOLD(VERWr) |
| NOFOLD(VMREAD32rr) |
| NOFOLD(VMREAD64rr) |
| NOFOLD(VPCOMPRESSBZ128rr) |
| NOFOLD(VPCOMPRESSBZ256rr) |
| NOFOLD(VPCOMPRESSBZrr) |
| NOFOLD(VPCOMPRESSDZ128rr) |
| NOFOLD(VPCOMPRESSDZ256rr) |
| NOFOLD(VPCOMPRESSDZrr) |
| NOFOLD(VPCOMPRESSQZ128rr) |
| NOFOLD(VPCOMPRESSQZ256rr) |
| NOFOLD(VPCOMPRESSQZrr) |
| NOFOLD(VPCOMPRESSWZ128rr) |
| NOFOLD(VPCOMPRESSWZ256rr) |
| NOFOLD(VPCOMPRESSWZrr) |
| NOFOLD(LAR16rr) |
| NOFOLD(LAR32rr) |
| NOFOLD(LAR64rr) |
| NOFOLD(LSL16rr) |
| NOFOLD(LSL32rr) |
| NOFOLD(LSL64rr) |
| NOFOLD(MOVSX16rr16) |
| NOFOLD(MOVZX16rr16) |
| NOFOLD(VMWRITE32rr) |
| NOFOLD(VMWRITE64rr) |
| NOFOLD(VBLENDMPDZ128rrkz) |
| NOFOLD(VBLENDMPDZ256rrkz) |
| NOFOLD(VBLENDMPDZrrkz) |
| NOFOLD(VBLENDMPSZ128rrkz) |
| NOFOLD(VBLENDMPSZ256rrkz) |
| NOFOLD(VBLENDMPSZrrkz) |
| NOFOLD(VPBLENDMBZ128rrkz) |
| NOFOLD(VPBLENDMBZ256rrkz) |
| NOFOLD(VPBLENDMBZrrkz) |
| NOFOLD(VPBLENDMDZ128rrkz) |
| NOFOLD(VPBLENDMDZ256rrkz) |
| NOFOLD(VPBLENDMDZrrkz) |
| NOFOLD(VPBLENDMQZ128rrkz) |
| NOFOLD(VPBLENDMQZ256rrkz) |
| NOFOLD(VPBLENDMQZrrkz) |
| NOFOLD(VPBLENDMWZ128rrkz) |
| NOFOLD(VPBLENDMWZ256rrkz) |
| NOFOLD(VPBLENDMWZrrkz) |
| NOFOLD(UD1Lr) |
| NOFOLD(UD1Qr) |
| NOFOLD(UD1Wr) |
| // Exclude these two b/c they would conflict with {MMX_MOVD64from64rr, MMX_MOVQ64mr} in unfolding table |
| NOFOLD(MMX_MOVQ64rr) |
| NOFOLD(MMX_MOVQ64rr_REV) |
| // INSERTPSrmi has no count_s while INSERTPSrri has count_s. |
| // count_s is to indicate which element in dst vector is inserted. |
| // if count_s!=0, we can not fold INSERTPSrri into INSERTPSrmi |
| // |
| // the following folding can happen when count_s==0 |
| // load xmm0, m32 |
| // INSERTPSrri xmm1, xmm0, imm |
| // => |
| // INSERTPSrmi xmm1, m32, imm |
| NOFOLD(INSERTPSrri) |
| NOFOLD(VINSERTPSZrri) |
| NOFOLD(VINSERTPSrri) |
| // Memory faults are suppressed for CFCMOV with memory operand. |
| NOFOLD(CFCMOV16rr_REV) |
| NOFOLD(CFCMOV32rr_REV) |
| NOFOLD(CFCMOV64rr_REV) |
| NOFOLD(CFCMOV16rr_ND) |
| NOFOLD(CFCMOV32rr_ND) |
| NOFOLD(CFCMOV64rr_ND) |
| #undef NOFOLD |
| |
| #ifndef ENTRY |
| #define ENTRY(REG, MEM, FLAGS) |
| #endif |
| // The following entries are added manually b/c the encodings of reg form does not match the |
| // encoding of memory form |
| ENTRY(ADD16ri_DB, ADD16mi, TB_NO_REVERSE) |
| ENTRY(ADD16rr_DB, ADD16mr, TB_NO_REVERSE) |
| ENTRY(ADD32ri_DB, ADD32mi, TB_NO_REVERSE) |
| ENTRY(ADD32rr_DB, ADD32mr, TB_NO_REVERSE) |
| ENTRY(ADD64ri32_DB, ADD64mi32, TB_NO_REVERSE) |
| ENTRY(ADD64rr_DB, ADD64mr, TB_NO_REVERSE) |
| ENTRY(ADD8ri_DB, ADD8mi, TB_NO_REVERSE) |
| ENTRY(ADD8rr_DB, ADD8mr, TB_NO_REVERSE) |
| ENTRY(ADD16rr_DB, ADD16rm, TB_NO_REVERSE) |
| ENTRY(ADD32rr_DB, ADD32rm, TB_NO_REVERSE) |
| ENTRY(ADD64rr_DB, ADD64rm, TB_NO_REVERSE) |
| ENTRY(ADD8rr_DB, ADD8rm, TB_NO_REVERSE) |
| ENTRY(MMX_MOVD64from64rr, MMX_MOVQ64mr, TB_FOLDED_STORE) |
| ENTRY(MMX_MOVD64grr, MMX_MOVD64mr, TB_FOLDED_STORE) |
| ENTRY(MOV64toSDrr, MOV64mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(MOVDI2SSrr, MOV32mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(MOVPQIto64rr, MOVPQI2QImr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(MOVSDto64rr, MOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(MOVSS2DIrr, MOVSSmr, TB_FOLDED_STORE) |
| ENTRY(MOVLHPSrr, MOVHPSrm, TB_NO_REVERSE) |
| ENTRY(PUSH16r, PUSH16rmm, TB_FOLDED_LOAD) |
| ENTRY(PUSH32r, PUSH32rmm, TB_FOLDED_LOAD) |
| ENTRY(PUSH64r, PUSH64rmm, TB_FOLDED_LOAD) |
| ENTRY(TAILJMPr, TAILJMPm, TB_FOLDED_LOAD) |
| ENTRY(TAILJMPr64, TAILJMPm64, TB_FOLDED_LOAD) |
| ENTRY(TAILJMPr64_REX, TAILJMPm64_REX, TB_FOLDED_LOAD) |
| ENTRY(TCRETURNri, TCRETURNmi, TB_FOLDED_LOAD | TB_NO_FORWARD) |
| ENTRY(TCRETURNri64, TCRETURNmi64, TB_FOLDED_LOAD | TB_NO_FORWARD) |
| ENTRY(VMOVLHPSZrr, VMOVHPSZ128rm, TB_NO_REVERSE) |
| ENTRY(VMOVLHPSrr, VMOVHPSrm, TB_NO_REVERSE) |
| ENTRY(VMOV64toSDZrr, MOV64mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOV64toSDrr, MOV64mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVDI2SSZrr, MOV32mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVDI2SSrr, MOV32mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVPQIto64Zrr, VMOVPQI2QIZmr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVPQIto64rr, VMOVPQI2QImr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVSDto64Zrr, VMOVSDZmr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVSDto64rr, VMOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVSS2DIZrr, VMOVSSZmr, TB_FOLDED_STORE) |
| ENTRY(VMOVSS2DIrr, VMOVSSmr, TB_FOLDED_STORE) |
| ENTRY(MMX_MOVD64to64rr, MMX_MOVQ64rm, 0) |
| ENTRY(MOV64toPQIrr, MOVQI2PQIrm, TB_NO_REVERSE) |
| ENTRY(MOV64toSDrr, MOVSDrm_alt, TB_NO_REVERSE) |
| ENTRY(MOVDI2SSrr, MOVSSrm_alt, 0) |
| ENTRY(VMOV64toPQIZrr, VMOVQI2PQIZrm, TB_NO_REVERSE) |
| ENTRY(VMOV64toPQIrr, VMOVQI2PQIrm, TB_NO_REVERSE) |
| ENTRY(VMOV64toSDZrr, VMOVSDZrm_alt, TB_NO_REVERSE) |
| ENTRY(VMOV64toSDrr, VMOVSDrm_alt, TB_NO_REVERSE) |
| ENTRY(VMOVDI2SSZrr, VMOVSSZrm_alt, 0) |
| ENTRY(VMOVDI2SSrr, VMOVSSrm_alt, 0) |
| ENTRY(MOVSDrr, MOVLPDrm, TB_NO_REVERSE) |
| ENTRY(VMOVSDZrr, VMOVLPDZ128rm, TB_NO_REVERSE) |
| ENTRY(VMOVSDrr, VMOVLPDrm, TB_NO_REVERSE) |
| #undef ENTRY |
| // Prefixes for instructions that are unsafe for masked-load folding. |
| // Folding with the same mask is only safe if every active destination |
| // element reads only from source elements that are also active under the same mask. |
| // These instructions perform element rearrangement/broadcasting that may cause |
| // active destination elements to read from masked-off source elements. |
| // Matches the following patterns: OPCODE[,Z,Z128,Z256][,rr,ri,rri][k,kz]. |
| #ifndef NOFOLD_SAME_MASK_PREFIX |
| #define NOFOLD_SAME_MASK_PREFIX(PREFIX) |
| #endif |
| NOFOLD_SAME_MASK_PREFIX(VALIGND) |
| NOFOLD_SAME_MASK_PREFIX(VALIGNQ) |
| NOFOLD_SAME_MASK_PREFIX(VBROADCASTF32X2) |
| NOFOLD_SAME_MASK_PREFIX(VBROADCASTI32X2) |
| NOFOLD_SAME_MASK_PREFIX(VBROADCASTSD) |
| NOFOLD_SAME_MASK_PREFIX(VBROADCASTSS) |
| NOFOLD_SAME_MASK_PREFIX(VDBPSADBW) |
| NOFOLD_SAME_MASK_PREFIX(VEXPANDPD) |
| NOFOLD_SAME_MASK_PREFIX(VEXPANDPS) |
| NOFOLD_SAME_MASK_PREFIX(VGF2P8AFFINEINVQB) |
| NOFOLD_SAME_MASK_PREFIX(VGF2P8AFFINEQB) |
| NOFOLD_SAME_MASK_PREFIX(VINSERTF32X4) |
| NOFOLD_SAME_MASK_PREFIX(VINSERTF32X8) |
| NOFOLD_SAME_MASK_PREFIX(VINSERTF64X2) |
| NOFOLD_SAME_MASK_PREFIX(VINSERTF64X4) |
| NOFOLD_SAME_MASK_PREFIX(VINSERTI32X4) |
| NOFOLD_SAME_MASK_PREFIX(VINSERTI32X8) |
| NOFOLD_SAME_MASK_PREFIX(VINSERTI64X2) |
| NOFOLD_SAME_MASK_PREFIX(VINSERTI64X4) |
| NOFOLD_SAME_MASK_PREFIX(VMOVDDUP) |
| NOFOLD_SAME_MASK_PREFIX(VMOVSHDUP) |
| NOFOLD_SAME_MASK_PREFIX(VMOVSLDUP) |
| NOFOLD_SAME_MASK_PREFIX(VMPSADBW) |
| NOFOLD_SAME_MASK_PREFIX(VPACKSSDW) |
| NOFOLD_SAME_MASK_PREFIX(VPACKSSWB) |
| NOFOLD_SAME_MASK_PREFIX(VPACKUSDW) |
| NOFOLD_SAME_MASK_PREFIX(VPACKUSWB) |
| NOFOLD_SAME_MASK_PREFIX(VPALIGNR) |
| NOFOLD_SAME_MASK_PREFIX(VPBROADCASTB) |
| NOFOLD_SAME_MASK_PREFIX(VPBROADCASTD) |
| NOFOLD_SAME_MASK_PREFIX(VPBROADCASTQ) |
| NOFOLD_SAME_MASK_PREFIX(VPBROADCASTW) |
| NOFOLD_SAME_MASK_PREFIX(VPCONFLICTD) |
| NOFOLD_SAME_MASK_PREFIX(VPCONFLICTQ) |
| NOFOLD_SAME_MASK_PREFIX(VPERMB) |
| NOFOLD_SAME_MASK_PREFIX(VPERMD) |
| NOFOLD_SAME_MASK_PREFIX(VPERMI2B) |
| NOFOLD_SAME_MASK_PREFIX(VPERMI2D) |
| NOFOLD_SAME_MASK_PREFIX(VPERMI2PD) |
| NOFOLD_SAME_MASK_PREFIX(VPERMI2PS) |
| NOFOLD_SAME_MASK_PREFIX(VPERMI2Q) |
| NOFOLD_SAME_MASK_PREFIX(VPERMI2W) |
| NOFOLD_SAME_MASK_PREFIX(VPERMPD) |
| NOFOLD_SAME_MASK_PREFIX(VPERMPS) |
| NOFOLD_SAME_MASK_PREFIX(VPERMQ) |
| NOFOLD_SAME_MASK_PREFIX(VPERMT2B) |
| NOFOLD_SAME_MASK_PREFIX(VPERMT2D) |
| NOFOLD_SAME_MASK_PREFIX(VPERMT2PD) |
| NOFOLD_SAME_MASK_PREFIX(VPERMT2PS) |
| NOFOLD_SAME_MASK_PREFIX(VPERMT2Q) |
| NOFOLD_SAME_MASK_PREFIX(VPERMT2W) |
| NOFOLD_SAME_MASK_PREFIX(VPERMW) |
| NOFOLD_SAME_MASK_PREFIX(VPEXPANDB) |
| NOFOLD_SAME_MASK_PREFIX(VPEXPANDD) |
| NOFOLD_SAME_MASK_PREFIX(VPEXPANDQ) |
| NOFOLD_SAME_MASK_PREFIX(VPEXPANDW) |
| NOFOLD_SAME_MASK_PREFIX(VPMULTISHIFTQB) |
| NOFOLD_SAME_MASK_PREFIX(VPSHUFD) |
| NOFOLD_SAME_MASK_PREFIX(VPSHUFHW) |
| NOFOLD_SAME_MASK_PREFIX(VPSHUFLW) |
| NOFOLD_SAME_MASK_PREFIX(VPUNPCKHBW) |
| NOFOLD_SAME_MASK_PREFIX(VPUNPCKHDQ) |
| NOFOLD_SAME_MASK_PREFIX(VPUNPCKHQDQ) |
| NOFOLD_SAME_MASK_PREFIX(VPUNPCKHWD) |
| NOFOLD_SAME_MASK_PREFIX(VPUNPCKLBW) |
| NOFOLD_SAME_MASK_PREFIX(VPUNPCKLDQ) |
| NOFOLD_SAME_MASK_PREFIX(VPUNPCKLQDQ) |
| NOFOLD_SAME_MASK_PREFIX(VPUNPCKLWD) |
| NOFOLD_SAME_MASK_PREFIX(VSHUFF32X4) |
| NOFOLD_SAME_MASK_PREFIX(VSHUFF64X2) |
| NOFOLD_SAME_MASK_PREFIX(VSHUFI32X4) |
| NOFOLD_SAME_MASK_PREFIX(VSHUFI64X2) |
| NOFOLD_SAME_MASK_PREFIX(VSHUFPD) |
| NOFOLD_SAME_MASK_PREFIX(VSHUFPS) |
| NOFOLD_SAME_MASK_PREFIX(VUNPCKHPD) |
| NOFOLD_SAME_MASK_PREFIX(VUNPCKHPS) |
| NOFOLD_SAME_MASK_PREFIX(VUNPCKLPD) |
| NOFOLD_SAME_MASK_PREFIX(VUNPCKLPS) |
| #undef NOFOLD_SAME_MASK_PREFIX |
| #ifndef NOFOLD_SAME_MASK |
| #define NOFOLD_SAME_MASK(INSN) |
| #endif |
| // VPERMILPD/VPERMILPS rik forms: Only rik forms are listed here; rrk forms are NOT blocked |
| NOFOLD_SAME_MASK(VPERMILPDZ128rik) |
| NOFOLD_SAME_MASK(VPERMILPDZ128rikz) |
| NOFOLD_SAME_MASK(VPERMILPDZ256rik) |
| NOFOLD_SAME_MASK(VPERMILPDZ256rikz) |
| NOFOLD_SAME_MASK(VPERMILPDZrik) |
| NOFOLD_SAME_MASK(VPERMILPDZrikz) |
| NOFOLD_SAME_MASK(VPERMILPSZ128rik) |
| NOFOLD_SAME_MASK(VPERMILPSZ128rikz) |
| NOFOLD_SAME_MASK(VPERMILPSZ256rik) |
| NOFOLD_SAME_MASK(VPERMILPSZ256rikz) |
| NOFOLD_SAME_MASK(VPERMILPSZrik) |
| NOFOLD_SAME_MASK(VPERMILPSZrikz) |
| #undef NOFOLD_SAME_MASK |