| //===- X86ManualFoldTables.def ----------------------------*- C++ -*-==// |
| // |
| // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| // See https://llvm.org/LICENSE.txt for license information. |
| // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| // |
| //===----------------------------------------------------------------------===// |
| // \file |
| // This file defines all the entries in X86 memory folding tables that need |
| // special handling. |
| //===----------------------------------------------------------------------===// |
| |
| #ifndef NOFOLD |
| #define NOFOLD(INSN) |
| #endif |
| NOFOLD(BTC16rr) |
| NOFOLD(BTC32rr) |
| NOFOLD(BTC64rr) |
| NOFOLD(BTR16rr) |
| NOFOLD(BTR32rr) |
| NOFOLD(BTR64rr) |
| NOFOLD(BTS16rr) |
| NOFOLD(BTS32rr) |
| NOFOLD(BTS64rr) |
| NOFOLD(VCOMPRESSPDZ128rrk) |
| NOFOLD(VCOMPRESSPDZ256rrk) |
| NOFOLD(VCOMPRESSPDZrrk) |
| NOFOLD(VCOMPRESSPSZ128rrk) |
| NOFOLD(VCOMPRESSPSZ256rrk) |
| NOFOLD(VCOMPRESSPSZrrk) |
| NOFOLD(VCVTPS2PHZ128rrk) |
| NOFOLD(VCVTPS2PHZ256rrk) |
| NOFOLD(VCVTPS2PHZrrk) |
| NOFOLD(VEXTRACTF32x4Z256rrik) |
| NOFOLD(VEXTRACTF32x4Zrrik) |
| NOFOLD(VEXTRACTF32x8Zrrik) |
| NOFOLD(VEXTRACTF64x2Z256rrik) |
| NOFOLD(VEXTRACTF64x2Zrrik) |
| NOFOLD(VEXTRACTF64x4Zrrik) |
| NOFOLD(VEXTRACTI32x4Z256rrik) |
| NOFOLD(VEXTRACTI32x4Zrrik) |
| NOFOLD(VEXTRACTI32x8Zrrik) |
| NOFOLD(VEXTRACTI64x2Z256rrik) |
| NOFOLD(VEXTRACTI64x2Zrrik) |
| NOFOLD(VEXTRACTI64x4Zrrik) |
| NOFOLD(VMOVAPDZ128mrk) |
| NOFOLD(VMOVAPDZ256mrk) |
| NOFOLD(VMOVAPDZmrk) |
| NOFOLD(VMOVAPSZ128mrk) |
| NOFOLD(VMOVAPSZ256mrk) |
| NOFOLD(VMOVAPSZmrk) |
| NOFOLD(VMOVDQA32Z128mrk) |
| NOFOLD(VMOVDQA32Z256mrk) |
| NOFOLD(VMOVDQA32Zmrk) |
| NOFOLD(VMOVDQA64Z128mrk) |
| NOFOLD(VMOVDQA64Z256mrk) |
| NOFOLD(VMOVDQA64Zmrk) |
| NOFOLD(VMOVDQU16Z128mrk) |
| NOFOLD(VMOVDQU16Z256mrk) |
| NOFOLD(VMOVDQU16Zmrk) |
| NOFOLD(VMOVDQU32Z128mrk) |
| NOFOLD(VMOVDQU32Z256mrk) |
| NOFOLD(VMOVDQU32Zmrk) |
| NOFOLD(VMOVDQU64Z128mrk) |
| NOFOLD(VMOVDQU64Z256mrk) |
| NOFOLD(VMOVDQU64Zmrk) |
| NOFOLD(VMOVDQU8Z128mrk) |
| NOFOLD(VMOVDQU8Z256mrk) |
| NOFOLD(VMOVDQU8Zmrk) |
| NOFOLD(VMOVUPDZ128mrk) |
| NOFOLD(VMOVUPDZ256mrk) |
| NOFOLD(VMOVUPDZmrk) |
| NOFOLD(VMOVUPSZ128mrk) |
| NOFOLD(VMOVUPSZ256mrk) |
| NOFOLD(VMOVUPSZmrk) |
| NOFOLD(VPCOMPRESSBZ128rrk) |
| NOFOLD(VPCOMPRESSBZ256rrk) |
| NOFOLD(VPCOMPRESSBZrrk) |
| NOFOLD(VPCOMPRESSDZ128rrk) |
| NOFOLD(VPCOMPRESSDZ256rrk) |
| NOFOLD(VPCOMPRESSDZrrk) |
| NOFOLD(VPCOMPRESSQZ128rrk) |
| NOFOLD(VPCOMPRESSQZ256rrk) |
| NOFOLD(VPCOMPRESSQZrrk) |
| NOFOLD(VPCOMPRESSWZ128rrk) |
| NOFOLD(VPCOMPRESSWZ256rrk) |
| NOFOLD(VPCOMPRESSWZrrk) |
| NOFOLD(VPMOVDBZ128rrk) |
| NOFOLD(VPMOVDBZ256rrk) |
| NOFOLD(VPMOVDBZrrk) |
| NOFOLD(VPMOVDWZ128rrk) |
| NOFOLD(VPMOVDWZ256rrk) |
| NOFOLD(VPMOVDWZrrk) |
| NOFOLD(VPMOVQBZ128rrk) |
| NOFOLD(VPMOVQBZ256rrk) |
| NOFOLD(VPMOVQBZrrk) |
| NOFOLD(VPMOVQDZ128rrk) |
| NOFOLD(VPMOVQDZ256rrk) |
| NOFOLD(VPMOVQDZrrk) |
| NOFOLD(VPMOVQWZ128rrk) |
| NOFOLD(VPMOVQWZ256rrk) |
| NOFOLD(VPMOVQWZrrk) |
| NOFOLD(VPMOVSDBZ128rrk) |
| NOFOLD(VPMOVSDBZ256rrk) |
| NOFOLD(VPMOVSDBZrrk) |
| NOFOLD(VPMOVSDWZ128rrk) |
| NOFOLD(VPMOVSDWZ256rrk) |
| NOFOLD(VPMOVSDWZrrk) |
| NOFOLD(VPMOVSQBZ128rrk) |
| NOFOLD(VPMOVSQBZ256rrk) |
| NOFOLD(VPMOVSQBZrrk) |
| NOFOLD(VPMOVSQDZ128rrk) |
| NOFOLD(VPMOVSQDZ256rrk) |
| NOFOLD(VPMOVSQDZrrk) |
| NOFOLD(VPMOVSQWZ128rrk) |
| NOFOLD(VPMOVSQWZ256rrk) |
| NOFOLD(VPMOVSQWZrrk) |
| NOFOLD(VPMOVSWBZ128rrk) |
| NOFOLD(VPMOVSWBZ256rrk) |
| NOFOLD(VPMOVSWBZrrk) |
| NOFOLD(VPMOVUSDBZ128rrk) |
| NOFOLD(VPMOVUSDBZ256rrk) |
| NOFOLD(VPMOVUSDBZrrk) |
| NOFOLD(VPMOVUSDWZ128rrk) |
| NOFOLD(VPMOVUSDWZ256rrk) |
| NOFOLD(VPMOVUSDWZrrk) |
| NOFOLD(VPMOVUSQBZ128rrk) |
| NOFOLD(VPMOVUSQBZ256rrk) |
| NOFOLD(VPMOVUSQBZrrk) |
| NOFOLD(VPMOVUSQDZ128rrk) |
| NOFOLD(VPMOVUSQDZ256rrk) |
| NOFOLD(VPMOVUSQDZrrk) |
| NOFOLD(VPMOVUSQWZ128rrk) |
| NOFOLD(VPMOVUSQWZ256rrk) |
| NOFOLD(VPMOVUSQWZrrk) |
| NOFOLD(VPMOVUSWBZ128rrk) |
| NOFOLD(VPMOVUSWBZ256rrk) |
| NOFOLD(VPMOVUSWBZrrk) |
| NOFOLD(VPMOVWBZ128rrk) |
| NOFOLD(VPMOVWBZ256rrk) |
| NOFOLD(VPMOVWBZrrk) |
| NOFOLD(ARPL16rr) |
| NOFOLD(BT16rr) |
| NOFOLD(BT32rr) |
| NOFOLD(BT64rr) |
| NOFOLD(CMPXCHG16rr) |
| NOFOLD(CMPXCHG32rr) |
| NOFOLD(CMPXCHG64rr) |
| NOFOLD(CMPXCHG8rr) |
| NOFOLD(LLDT16r) |
| NOFOLD(LMSW16r) |
| NOFOLD(LTRr) |
| NOFOLD(NOOPLr) |
| NOFOLD(NOOPQr) |
| NOFOLD(NOOPWr) |
| NOFOLD(POP16rmr) |
| NOFOLD(POP32rmr) |
| NOFOLD(POP64rmr) |
| NOFOLD(PUSH16rmr) |
| NOFOLD(PUSH32rmr) |
| NOFOLD(PUSH64rmr) |
| NOFOLD(VCOMPRESSPDZ128rr) |
| NOFOLD(VCOMPRESSPDZ256rr) |
| NOFOLD(VCOMPRESSPDZrr) |
| NOFOLD(VCOMPRESSPSZ128rr) |
| NOFOLD(VCOMPRESSPSZ256rr) |
| NOFOLD(VCOMPRESSPSZrr) |
| NOFOLD(VERRr) |
| NOFOLD(VERWr) |
| NOFOLD(VMREAD32rr) |
| NOFOLD(VMREAD64rr) |
| NOFOLD(VPCOMPRESSBZ128rr) |
| NOFOLD(VPCOMPRESSBZ256rr) |
| NOFOLD(VPCOMPRESSBZrr) |
| NOFOLD(VPCOMPRESSDZ128rr) |
| NOFOLD(VPCOMPRESSDZ256rr) |
| NOFOLD(VPCOMPRESSDZrr) |
| NOFOLD(VPCOMPRESSQZ128rr) |
| NOFOLD(VPCOMPRESSQZ256rr) |
| NOFOLD(VPCOMPRESSQZrr) |
| NOFOLD(VPCOMPRESSWZ128rr) |
| NOFOLD(VPCOMPRESSWZ256rr) |
| NOFOLD(VPCOMPRESSWZrr) |
| NOFOLD(LAR16rr) |
| NOFOLD(LAR32rr) |
| NOFOLD(LAR64rr) |
| NOFOLD(LSL16rr) |
| NOFOLD(LSL32rr) |
| NOFOLD(LSL64rr) |
| NOFOLD(MOVSX16rr16) |
| NOFOLD(MOVZX16rr16) |
| NOFOLD(VMWRITE32rr) |
| NOFOLD(VMWRITE64rr) |
| NOFOLD(VBLENDMPDZ128rrkz) |
| NOFOLD(VBLENDMPDZ256rrkz) |
| NOFOLD(VBLENDMPDZrrkz) |
| NOFOLD(VBLENDMPSZ128rrkz) |
| NOFOLD(VBLENDMPSZ256rrkz) |
| NOFOLD(VBLENDMPSZrrkz) |
| NOFOLD(VPBLENDMBZ128rrkz) |
| NOFOLD(VPBLENDMBZ256rrkz) |
| NOFOLD(VPBLENDMBZrrkz) |
| NOFOLD(VPBLENDMDZ128rrkz) |
| NOFOLD(VPBLENDMDZ256rrkz) |
| NOFOLD(VPBLENDMDZrrkz) |
| NOFOLD(VPBLENDMQZ128rrkz) |
| NOFOLD(VPBLENDMQZ256rrkz) |
| NOFOLD(VPBLENDMQZrrkz) |
| NOFOLD(VPBLENDMWZ128rrkz) |
| NOFOLD(VPBLENDMWZ256rrkz) |
| NOFOLD(VPBLENDMWZrrkz) |
| NOFOLD(UD1Lr) |
| NOFOLD(UD1Qr) |
| NOFOLD(UD1Wr) |
| // Exclude these two b/c they would conflict with {MMX_MOVD64from64rr, MMX_MOVQ64mr} in unfolding table |
| NOFOLD(MMX_MOVQ64rr) |
| NOFOLD(MMX_MOVQ64rr_REV) |
| // INSERTPSrmi has no count_s while INSERTPSrri has count_s. |
| // count_s is to indicate which element in dst vector is inserted. |
| // if count_s!=0, we can not fold INSERTPSrri into INSERTPSrmi |
| // |
| // the following folding can happen when count_s==0 |
| // load xmm0, m32 |
| // INSERTPSrri xmm1, xmm0, imm |
| // => |
| // INSERTPSrmi xmm1, m32, imm |
| NOFOLD(INSERTPSrri) |
| NOFOLD(VINSERTPSZrri) |
| NOFOLD(VINSERTPSrri) |
| // Memory faults are suppressed for CFCMOV with memory operand. |
| NOFOLD(CFCMOV16rr_REV) |
| NOFOLD(CFCMOV32rr_REV) |
| NOFOLD(CFCMOV64rr_REV) |
| NOFOLD(CFCMOV16rr_ND) |
| NOFOLD(CFCMOV32rr_ND) |
| NOFOLD(CFCMOV64rr_ND) |
| #undef NOFOLD |
| |
| #ifndef ENTRY |
| #define ENTRY(REG, MEM, FLAGS) |
| #endif |
| // The following entries are added manually b/c the encodings of reg form does not match the |
| // encoding of memory form |
| ENTRY(ADD16ri_DB, ADD16mi, TB_NO_REVERSE) |
| ENTRY(ADD16rr_DB, ADD16mr, TB_NO_REVERSE) |
| ENTRY(ADD32ri_DB, ADD32mi, TB_NO_REVERSE) |
| ENTRY(ADD32rr_DB, ADD32mr, TB_NO_REVERSE) |
| ENTRY(ADD64ri32_DB, ADD64mi32, TB_NO_REVERSE) |
| ENTRY(ADD64rr_DB, ADD64mr, TB_NO_REVERSE) |
| ENTRY(ADD8ri_DB, ADD8mi, TB_NO_REVERSE) |
| ENTRY(ADD8rr_DB, ADD8mr, TB_NO_REVERSE) |
| ENTRY(ADD16rr_DB, ADD16rm, TB_NO_REVERSE) |
| ENTRY(ADD32rr_DB, ADD32rm, TB_NO_REVERSE) |
| ENTRY(ADD64rr_DB, ADD64rm, TB_NO_REVERSE) |
| ENTRY(ADD8rr_DB, ADD8rm, TB_NO_REVERSE) |
| ENTRY(MMX_MOVD64from64rr, MMX_MOVQ64mr, TB_FOLDED_STORE) |
| ENTRY(MMX_MOVD64grr, MMX_MOVD64mr, TB_FOLDED_STORE) |
| ENTRY(MOV64toSDrr, MOV64mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(MOVDI2SSrr, MOV32mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(MOVPQIto64rr, MOVPQI2QImr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(MOVSDto64rr, MOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(MOVSS2DIrr, MOVSSmr, TB_FOLDED_STORE) |
| ENTRY(MOVLHPSrr, MOVHPSrm, TB_NO_REVERSE) |
| ENTRY(PUSH16r, PUSH16rmm, TB_FOLDED_LOAD) |
| ENTRY(PUSH32r, PUSH32rmm, TB_FOLDED_LOAD) |
| ENTRY(PUSH64r, PUSH64rmm, TB_FOLDED_LOAD) |
| ENTRY(TAILJMPr, TAILJMPm, TB_FOLDED_LOAD) |
| ENTRY(TAILJMPr64, TAILJMPm64, TB_FOLDED_LOAD) |
| ENTRY(TAILJMPr64_REX, TAILJMPm64_REX, TB_FOLDED_LOAD) |
| ENTRY(TCRETURNri, TCRETURNmi, TB_FOLDED_LOAD | TB_NO_FORWARD) |
| ENTRY(TCRETURNri64, TCRETURNmi64, TB_FOLDED_LOAD | TB_NO_FORWARD) |
| ENTRY(VMOVLHPSZrr, VMOVHPSZ128rm, TB_NO_REVERSE) |
| ENTRY(VMOVLHPSrr, VMOVHPSrm, TB_NO_REVERSE) |
| ENTRY(VMOV64toSDZrr, MOV64mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOV64toSDrr, MOV64mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVDI2SSZrr, MOV32mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVDI2SSrr, MOV32mr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVPQIto64Zrr, VMOVPQI2QIZmr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVPQIto64rr, VMOVPQI2QImr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVSDto64Zrr, VMOVSDZmr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVSDto64rr, VMOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE) |
| ENTRY(VMOVSS2DIZrr, VMOVSSZmr, TB_FOLDED_STORE) |
| ENTRY(VMOVSS2DIrr, VMOVSSmr, TB_FOLDED_STORE) |
| ENTRY(MMX_MOVD64to64rr, MMX_MOVQ64rm, 0) |
| ENTRY(MOV64toPQIrr, MOVQI2PQIrm, TB_NO_REVERSE) |
| ENTRY(MOV64toSDrr, MOVSDrm_alt, TB_NO_REVERSE) |
| ENTRY(MOVDI2SSrr, MOVSSrm_alt, 0) |
| ENTRY(VMOV64toPQIZrr, VMOVQI2PQIZrm, TB_NO_REVERSE) |
| ENTRY(VMOV64toPQIrr, VMOVQI2PQIrm, TB_NO_REVERSE) |
| ENTRY(VMOV64toSDZrr, VMOVSDZrm_alt, TB_NO_REVERSE) |
| ENTRY(VMOV64toSDrr, VMOVSDrm_alt, TB_NO_REVERSE) |
| ENTRY(VMOVDI2SSZrr, VMOVSSZrm_alt, 0) |
| ENTRY(VMOVDI2SSrr, VMOVSSrm_alt, 0) |
| ENTRY(MOVSDrr, MOVLPDrm, TB_NO_REVERSE) |
| ENTRY(VMOVSDZrr, VMOVLPDZ128rm, TB_NO_REVERSE) |
| ENTRY(VMOVSDrr, VMOVLPDrm, TB_NO_REVERSE) |
| #undef ENTRY |