blob: a82e333e6bab5953e410d373bdff757f2c09b5ae [file] [log] [blame]
//===-- RISCVInstrInfoVPseudos.td - RISC-V 'V' Pseudos -----*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// This file contains the required infrastructure to support code generation
/// for the standard 'V' (Vector) extension, version 0.10. This version is still
/// experimental as the 'V' extension hasn't been ratified yet.
///
/// This file is included from RISCVInstrInfoV.td
///
//===----------------------------------------------------------------------===//
def riscv_vmv_x_s : SDNode<"RISCVISD::VMV_X_S",
SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>,
SDTCisInt<1>]>>;
def riscv_read_vlenb : SDNode<"RISCVISD::READ_VLENB",
SDTypeProfile<1, 0, [SDTCisVT<0, XLenVT>]>>;
// Operand that is allowed to be a register or a 5 bit immediate.
// This allows us to pick between VSETIVLI and VSETVLI opcodes using the same
// pseudo instructions.
def AVL : RegisterOperand<GPRNoX0> {
let OperandNamespace = "RISCVOp";
let OperandType = "OPERAND_AVL";
}
// X0 has special meaning for vsetvl/vsetvli.
// rd | rs1 | AVL value | Effect on vl
//--------------------------------------------------------------
// !X0 | X0 | VLMAX | Set vl to VLMAX
// X0 | X0 | Value in vl | Keep current vl, just change vtype.
def VLOp : ComplexPattern<XLenVT, 1, "selectVLOp">;
def DecImm : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
N->getValueType(0));
}]>;
defvar TAIL_UNDISTURBED = 0;
defvar TAIL_AGNOSTIC = 1;
//===----------------------------------------------------------------------===//
// Utilities.
//===----------------------------------------------------------------------===//
// This class describes information associated to the LMUL.
class LMULInfo<int lmul, int oct, VReg regclass, VReg wregclass,
VReg f2regclass, VReg f4regclass, VReg f8regclass, string mx> {
bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
VReg vrclass = regclass;
VReg wvrclass = wregclass;
VReg f8vrclass = f8regclass;
VReg f4vrclass = f4regclass;
VReg f2vrclass = f2regclass;
string MX = mx;
int octuple = oct;
}
// Associate LMUL with tablegen records of register classes.
def V_M1 : LMULInfo<0b000, 8, VR, VRM2, VR, VR, VR, "M1">;
def V_M2 : LMULInfo<0b001, 16, VRM2, VRM4, VR, VR, VR, "M2">;
def V_M4 : LMULInfo<0b010, 32, VRM4, VRM8, VRM2, VR, VR, "M4">;
def V_M8 : LMULInfo<0b011, 64, VRM8,/*NoVReg*/VR, VRM4, VRM2, VR, "M8">;
def V_MF8 : LMULInfo<0b101, 1, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF8">;
def V_MF4 : LMULInfo<0b110, 2, VR, VR, VR,/*NoVReg*/VR,/*NoVReg*/VR, "MF4">;
def V_MF2 : LMULInfo<0b111, 4, VR, VR, VR, VR,/*NoVReg*/VR, "MF2">;
// Used to iterate over all possible LMULs.
def MxList {
list<LMULInfo> m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
}
// Used for widening and narrowing instructions as it doesn't contain M8.
def MxListW {
list<LMULInfo> m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4];
}
// Use for zext/sext.vf2
def MxListVF2 {
list<LMULInfo> m = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8];
}
// Use for zext/sext.vf4
def MxListVF4 {
list<LMULInfo> m = [V_MF2, V_M1, V_M2, V_M4, V_M8];
}
// Use for zext/sext.vf8
def MxListVF8 {
list<LMULInfo> m = [V_M1, V_M2, V_M4, V_M8];
}
class FPR_Info<RegisterClass regclass, string fx> {
RegisterClass fprclass = regclass;
string FX = fx;
}
def SCALAR_F16 : FPR_Info<FPR16, "F16">;
def SCALAR_F32 : FPR_Info<FPR32, "F32">;
def SCALAR_F64 : FPR_Info<FPR64, "F64">;
def FPList {
list<FPR_Info> fpinfo = [SCALAR_F16, SCALAR_F32, SCALAR_F64];
}
// Used for widening instructions. It excludes F64.
def FPListW {
list<FPR_Info> fpinfo = [SCALAR_F16, SCALAR_F32];
}
class MxSet<int eew> {
list<LMULInfo> m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
!eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8],
!eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8],
!eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
}
class NFSet<LMULInfo m> {
list<int> L = !cond(!eq(m.value, V_M8.value): [],
!eq(m.value, V_M4.value): [2],
!eq(m.value, V_M2.value): [2, 3, 4],
true: [2, 3, 4, 5, 6, 7, 8]);
}
class log2<int num> {
int val = !if(!eq(num, 1), 0, !add(1, log2<!srl(num, 1)>.val));
}
class octuple_to_str<int octuple> {
string ret = !if(!eq(octuple, 1), "MF8",
!if(!eq(octuple, 2), "MF4",
!if(!eq(octuple, 4), "MF2",
!if(!eq(octuple, 8), "M1",
!if(!eq(octuple, 16), "M2",
!if(!eq(octuple, 32), "M4",
!if(!eq(octuple, 64), "M8",
"NoDef")))))));
}
def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
// Output pattern for X0 used to represent VLMAX in the pseudo instructions.
// We can't use X0 register becuase the AVL operands use GPRNoX0.
// This must be kept in sync with RISCV::VLMaxSentinel.
def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
// List of EEW.
defvar EEWList = [8, 16, 32, 64];
class SegRegClass<LMULInfo m, int nf> {
VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
!eq(m.value, V_MF4.value): V_M1.MX,
!eq(m.value, V_MF2.value): V_M1.MX,
true: m.MX));
}
//===----------------------------------------------------------------------===//
// Vector register and vector group type information.
//===----------------------------------------------------------------------===//
class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M,
ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR>
{
ValueType Vector = Vec;
ValueType Mask = Mas;
int SEW = Sew;
int Log2SEW = log2<Sew>.val;
VReg RegClass = Reg;
LMULInfo LMul = M;
ValueType Scalar = Scal;
RegisterClass ScalarRegClass = ScalarReg;
// The pattern fragment which produces the AVL operand, representing the
// "natural" vector length for this type. For scalable vectors this is VLMax.
OutPatFrag AVL = VLMax;
string ScalarSuffix = !cond(!eq(Scal, XLenVT) : "X",
!eq(Scal, f16) : "F16",
!eq(Scal, f32) : "F32",
!eq(Scal, f64) : "F64");
}
class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
VReg Reg, LMULInfo M, ValueType Scal = XLenVT,
RegisterClass ScalarReg = GPR>
: VTypeInfo<Vec, Mas, Sew, Reg, M, Scal, ScalarReg>
{
ValueType VectorM1 = VecM1;
}
defset list<VTypeInfo> AllVectors = {
defset list<VTypeInfo> AllIntegerVectors = {
defset list<VTypeInfo> NoGroupIntegerVectors = {
defset list<VTypeInfo> FractionalGroupIntegerVectors = {
def VI8MF8: VTypeInfo<vint8mf8_t, vbool64_t, 8, VR, V_MF8>;
def VI8MF4: VTypeInfo<vint8mf4_t, vbool32_t, 8, VR, V_MF4>;
def VI8MF2: VTypeInfo<vint8mf2_t, vbool16_t, 8, VR, V_MF2>;
def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
}
def VI8M1: VTypeInfo<vint8m1_t, vbool8_t, 8, VR, V_M1>;
def VI16M1: VTypeInfo<vint16m1_t, vbool16_t, 16, VR, V_M1>;
def VI32M1: VTypeInfo<vint32m1_t, vbool32_t, 32, VR, V_M1>;
def VI64M1: VTypeInfo<vint64m1_t, vbool64_t, 64, VR, V_M1>;
}
defset list<GroupVTypeInfo> GroupIntegerVectors = {
def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
}
}
defset list<VTypeInfo> AllFloatVectors = {
defset list<VTypeInfo> NoGroupFloatVectors = {
defset list<VTypeInfo> FractionalGroupFloatVectors = {
def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>;
def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>;
def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>;
}
def VF16M1: VTypeInfo<vfloat16m1_t, vbool16_t, 16, VR, V_M1, f16, FPR16>;
def VF32M1: VTypeInfo<vfloat32m1_t, vbool32_t, 32, VR, V_M1, f32, FPR32>;
def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, VR, V_M1, f64, FPR64>;
}
defset list<GroupVTypeInfo> GroupFloatVectors = {
def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
VRM2, V_M2, f16, FPR16>;
def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
VRM4, V_M4, f16, FPR16>;
def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
VRM8, V_M8, f16, FPR16>;
def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
VRM2, V_M2, f32, FPR32>;
def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t, 32,
VRM4, V_M4, f32, FPR32>;
def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t, 32,
VRM8, V_M8, f32, FPR32>;
def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
VRM2, V_M2, f64, FPR64>;
def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
VRM4, V_M4, f64, FPR64>;
def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t, 64,
VRM8, V_M8, f64, FPR64>;
}
}
}
// This functor is used to obtain the int vector type that has the same SEW and
// multiplier as the input parameter type
class GetIntVTypeInfo<VTypeInfo vti>
{
// Equivalent integer vector type. Eg.
// VI8M1 → VI8M1 (identity)
// VF64M4 → VI64M4
VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
}
class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
ValueType Mask = Mas;
// {SEW, VLMul} values set a valid VType to deal with this mask type.
// we assume SEW=1 and set corresponding LMUL. vsetvli insertion will
// look for SEW=1 to optimize based on surrounding instructions.
int SEW = 1;
int Log2SEW = 0;
LMULInfo LMul = M;
string BX = Bx; // Appendix of mask operations.
// The pattern fragment which produces the AVL operand, representing the
// "natural" vector length for this mask type. For scalable masks this is
// VLMax.
OutPatFrag AVL = VLMax;
}
defset list<MTypeInfo> AllMasks = {
// vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
def : MTypeInfo<vbool64_t, V_MF8, "B1">;
def : MTypeInfo<vbool32_t, V_MF4, "B2">;
def : MTypeInfo<vbool16_t, V_MF2, "B4">;
def : MTypeInfo<vbool8_t, V_M1, "B8">;
def : MTypeInfo<vbool4_t, V_M2, "B16">;
def : MTypeInfo<vbool2_t, V_M4, "B32">;
def : MTypeInfo<vbool1_t, V_M8, "B64">;
}
class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
{
VTypeInfo Vti = vti;
VTypeInfo Wti = wti;
}
class VTypeInfoToFraction<VTypeInfo vti, VTypeInfo fti>
{
VTypeInfo Vti = vti;
VTypeInfo Fti = fti;
}
defset list<VTypeInfoToWide> AllWidenableIntVectors = {
def : VTypeInfoToWide<VI8MF8, VI16MF4>;
def : VTypeInfoToWide<VI8MF4, VI16MF2>;
def : VTypeInfoToWide<VI8MF2, VI16M1>;
def : VTypeInfoToWide<VI8M1, VI16M2>;
def : VTypeInfoToWide<VI8M2, VI16M4>;
def : VTypeInfoToWide<VI8M4, VI16M8>;
def : VTypeInfoToWide<VI16MF4, VI32MF2>;
def : VTypeInfoToWide<VI16MF2, VI32M1>;
def : VTypeInfoToWide<VI16M1, VI32M2>;
def : VTypeInfoToWide<VI16M2, VI32M4>;
def : VTypeInfoToWide<VI16M4, VI32M8>;
def : VTypeInfoToWide<VI32MF2, VI64M1>;
def : VTypeInfoToWide<VI32M1, VI64M2>;
def : VTypeInfoToWide<VI32M2, VI64M4>;
def : VTypeInfoToWide<VI32M4, VI64M8>;
}
defset list<VTypeInfoToWide> AllWidenableFloatVectors = {
def : VTypeInfoToWide<VF16MF4, VF32MF2>;
def : VTypeInfoToWide<VF16MF2, VF32M1>;
def : VTypeInfoToWide<VF16M1, VF32M2>;
def : VTypeInfoToWide<VF16M2, VF32M4>;
def : VTypeInfoToWide<VF16M4, VF32M8>;
def : VTypeInfoToWide<VF32MF2, VF64M1>;
def : VTypeInfoToWide<VF32M1, VF64M2>;
def : VTypeInfoToWide<VF32M2, VF64M4>;
def : VTypeInfoToWide<VF32M4, VF64M8>;
}
defset list<VTypeInfoToFraction> AllFractionableVF2IntVectors = {
def : VTypeInfoToFraction<VI16MF4, VI8MF8>;
def : VTypeInfoToFraction<VI16MF2, VI8MF4>;
def : VTypeInfoToFraction<VI16M1, VI8MF2>;
def : VTypeInfoToFraction<VI16M2, VI8M1>;
def : VTypeInfoToFraction<VI16M4, VI8M2>;
def : VTypeInfoToFraction<VI16M8, VI8M4>;
def : VTypeInfoToFraction<VI32MF2, VI16MF4>;
def : VTypeInfoToFraction<VI32M1, VI16MF2>;
def : VTypeInfoToFraction<VI32M2, VI16M1>;
def : VTypeInfoToFraction<VI32M4, VI16M2>;
def : VTypeInfoToFraction<VI32M8, VI16M4>;
def : VTypeInfoToFraction<VI64M1, VI32MF2>;
def : VTypeInfoToFraction<VI64M2, VI32M1>;
def : VTypeInfoToFraction<VI64M4, VI32M2>;
def : VTypeInfoToFraction<VI64M8, VI32M4>;
}
defset list<VTypeInfoToFraction> AllFractionableVF4IntVectors = {
def : VTypeInfoToFraction<VI32MF2, VI8MF8>;
def : VTypeInfoToFraction<VI32M1, VI8MF4>;
def : VTypeInfoToFraction<VI32M2, VI8MF2>;
def : VTypeInfoToFraction<VI32M4, VI8M1>;
def : VTypeInfoToFraction<VI32M8, VI8M2>;
def : VTypeInfoToFraction<VI64M1, VI16MF4>;
def : VTypeInfoToFraction<VI64M2, VI16MF2>;
def : VTypeInfoToFraction<VI64M4, VI16M1>;
def : VTypeInfoToFraction<VI64M8, VI16M2>;
}
defset list<VTypeInfoToFraction> AllFractionableVF8IntVectors = {
def : VTypeInfoToFraction<VI64M1, VI8MF8>;
def : VTypeInfoToFraction<VI64M2, VI8MF4>;
def : VTypeInfoToFraction<VI64M4, VI8MF2>;
def : VTypeInfoToFraction<VI64M8, VI8M1>;
}
defset list<VTypeInfoToWide> AllWidenableIntToFloatVectors = {
def : VTypeInfoToWide<VI8MF8, VF16MF4>;
def : VTypeInfoToWide<VI8MF4, VF16MF2>;
def : VTypeInfoToWide<VI8MF2, VF16M1>;
def : VTypeInfoToWide<VI8M1, VF16M2>;
def : VTypeInfoToWide<VI8M2, VF16M4>;
def : VTypeInfoToWide<VI8M4, VF16M8>;
def : VTypeInfoToWide<VI16MF4, VF32MF2>;
def : VTypeInfoToWide<VI16MF2, VF32M1>;
def : VTypeInfoToWide<VI16M1, VF32M2>;
def : VTypeInfoToWide<VI16M2, VF32M4>;
def : VTypeInfoToWide<VI16M4, VF32M8>;
def : VTypeInfoToWide<VI32MF2, VF64M1>;
def : VTypeInfoToWide<VI32M1, VF64M2>;
def : VTypeInfoToWide<VI32M2, VF64M4>;
def : VTypeInfoToWide<VI32M4, VF64M8>;
}
// This class holds the record of the RISCVVPseudoTable below.
// This represents the information we need in codegen for each pseudo.
// The definition should be consistent with `struct PseudoInfo` in
// RISCVBaseInfo.h.
class CONST8b<bits<8> val> {
bits<8> V = val;
}
def InvalidIndex : CONST8b<0x80>;
class RISCVVPseudo {
Pseudo Pseudo = !cast<Pseudo>(NAME); // Used as a key.
Instruction BaseInstr;
}
// The actual table.
def RISCVVPseudosTable : GenericTable {
let FilterClass = "RISCVVPseudo";
let CppTypeName = "PseudoInfo";
let Fields = [ "Pseudo", "BaseInstr" ];
let PrimaryKey = [ "Pseudo" ];
let PrimaryKeyName = "getPseudoInfo";
let PrimaryKeyEarlyOut = true;
}
def RISCVVIntrinsicsTable : GenericTable {
let FilterClass = "RISCVVIntrinsic";
let CppTypeName = "RISCVVIntrinsicInfo";
let Fields = ["IntrinsicID", "SplatOperand"];
let PrimaryKey = ["IntrinsicID"];
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
}
class RISCVVLE<bit M, bit Str, bit F, bits<3> S, bits<3> L> {
bits<1> Masked = M;
bits<1> Strided = Str;
bits<1> FF = F;
bits<3> Log2SEW = S;
bits<3> LMUL = L;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
def RISCVVLETable : GenericTable {
let FilterClass = "RISCVVLE";
let CppTypeName = "VLEPseudo";
let Fields = ["Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
let PrimaryKey = ["Masked", "Strided", "FF", "Log2SEW", "LMUL"];
let PrimaryKeyName = "getVLEPseudo";
}
class RISCVVSE<bit M, bit Str, bits<3> S, bits<3> L> {
bits<1> Masked = M;
bits<1> Strided = Str;
bits<3> Log2SEW = S;
bits<3> LMUL = L;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
def RISCVVSETable : GenericTable {
let FilterClass = "RISCVVSE";
let CppTypeName = "VSEPseudo";
let Fields = ["Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
let PrimaryKey = ["Masked", "Strided", "Log2SEW", "LMUL"];
let PrimaryKeyName = "getVSEPseudo";
}
class RISCVVLX_VSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
bits<1> Masked = M;
bits<1> Ordered = O;
bits<3> Log2SEW = S;
bits<3> LMUL = L;
bits<3> IndexLMUL = IL;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
class RISCVVLX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
RISCVVLX_VSX<M, O, S, L, IL>;
class RISCVVSX<bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> :
RISCVVLX_VSX<M, O, S, L, IL>;
class RISCVVLX_VSXTable : GenericTable {
let CppTypeName = "VLX_VSXPseudo";
let Fields = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
let PrimaryKey = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
}
def RISCVVLXTable : RISCVVLX_VSXTable {
let FilterClass = "RISCVVLX";
let PrimaryKeyName = "getVLXPseudo";
}
def RISCVVSXTable : RISCVVLX_VSXTable {
let FilterClass = "RISCVVSX";
let PrimaryKeyName = "getVSXPseudo";
}
class RISCVVLSEG<bits<4> N, bit M, bit Str, bit F, bits<3> S, bits<3> L> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Strided = Str;
bits<1> FF = F;
bits<3> Log2SEW = S;
bits<3> LMUL = L;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
def RISCVVLSEGTable : GenericTable {
let FilterClass = "RISCVVLSEG";
let CppTypeName = "VLSEGPseudo";
let Fields = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"];
let PrimaryKey = ["NF", "Masked", "Strided", "FF", "Log2SEW", "LMUL"];
let PrimaryKeyName = "getVLSEGPseudo";
}
class RISCVVLXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Ordered = O;
bits<3> Log2SEW = S;
bits<3> LMUL = L;
bits<3> IndexLMUL = IL;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
def RISCVVLXSEGTable : GenericTable {
let FilterClass = "RISCVVLXSEG";
let CppTypeName = "VLXSEGPseudo";
let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
let PrimaryKeyName = "getVLXSEGPseudo";
}
class RISCVVSSEG<bits<4> N, bit M, bit Str, bits<3> S, bits<3> L> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Strided = Str;
bits<3> Log2SEW = S;
bits<3> LMUL = L;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
def RISCVVSSEGTable : GenericTable {
let FilterClass = "RISCVVSSEG";
let CppTypeName = "VSSEGPseudo";
let Fields = ["NF", "Masked", "Strided", "Log2SEW", "LMUL", "Pseudo"];
let PrimaryKey = ["NF", "Masked", "Strided", "Log2SEW", "LMUL"];
let PrimaryKeyName = "getVSSEGPseudo";
}
class RISCVVSXSEG<bits<4> N, bit M, bit O, bits<3> S, bits<3> L, bits<3> IL> {
bits<4> NF = N;
bits<1> Masked = M;
bits<1> Ordered = O;
bits<3> Log2SEW = S;
bits<3> LMUL = L;
bits<3> IndexLMUL = IL;
Pseudo Pseudo = !cast<Pseudo>(NAME);
}
def RISCVVSXSEGTable : GenericTable {
let FilterClass = "RISCVVSXSEG";
let CppTypeName = "VSXSEGPseudo";
let Fields = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"];
let PrimaryKey = ["NF", "Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"];
let PrimaryKeyName = "getVSXSEGPseudo";
}
//===----------------------------------------------------------------------===//
// Helpers to define the different pseudo instructions.
//===----------------------------------------------------------------------===//
class PseudoToVInst<string PseudoInst> {
string VInst = !subst("_M8", "",
!subst("_M4", "",
!subst("_M2", "",
!subst("_M1", "",
!subst("_MF2", "",
!subst("_MF4", "",
!subst("_MF8", "",
!subst("_B1", "",
!subst("_B2", "",
!subst("_B4", "",
!subst("_B8", "",
!subst("_B16", "",
!subst("_B32", "",
!subst("_B64", "",
!subst("_MASK", "",
!subst("_TIED", "",
!subst("F16", "F",
!subst("F32", "F",
!subst("F64", "F",
!subst("Pseudo", "", PseudoInst))))))))))))))))))));
}
// The destination vector register group for a masked vector instruction cannot
// overlap the source mask register (v0), unless the destination vector register
// is being written with a mask value (e.g., comparisons) or the scalar result
// of a reduction.
class GetVRegNoV0<VReg VRegClass> {
VReg R = !cond(!eq(VRegClass, VR) : VRNoV0,
!eq(VRegClass, VRM2) : VRM2NoV0,
!eq(VRegClass, VRM4) : VRM4NoV0,
!eq(VRegClass, VRM8) : VRM8NoV0,
!eq(VRegClass, VRN2M1) : VRN2M1NoV0,
!eq(VRegClass, VRN2M2) : VRN2M2NoV0,
!eq(VRegClass, VRN2M4) : VRN2M4NoV0,
!eq(VRegClass, VRN3M1) : VRN3M1NoV0,
!eq(VRegClass, VRN3M2) : VRN3M2NoV0,
!eq(VRegClass, VRN4M1) : VRN4M1NoV0,
!eq(VRegClass, VRN4M2) : VRN4M2NoV0,
!eq(VRegClass, VRN5M1) : VRN5M1NoV0,
!eq(VRegClass, VRN6M1) : VRN6M1NoV0,
!eq(VRegClass, VRN7M1) : VRN7M1NoV0,
!eq(VRegClass, VRN8M1) : VRN8M1NoV0,
true : VRegClass);
}
// Join strings in list using separator and ignoring empty elements
class Join<list<string> strings, string separator> {
string ret = !foldl(!head(strings), !tail(strings), a, b,
!cond(
!and(!empty(a), !empty(b)) : "",
!empty(a) : b,
!empty(b) : a,
1 : a#separator#b));
}
class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
Pseudo<outs, ins, []>, RISCVVPseudo {
let BaseInstr = instr;
let VLMul = m.value;
}
class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> :
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVLE</*Masked*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUSLoadMask<VReg RetClass, int EEW, bit isFF> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
GPR:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
RISCVVPseudo,
RISCVVLE</*Masked*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = "$rd = $merge";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSLoadNoMask<VReg RetClass, int EEW>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVLE</*Masked*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSLoadMask<VReg RetClass, int EEW>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
GPR:$rs1, GPR:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
RISCVVPseudo,
RISCVVLE</*Masked*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = "$rd = $merge";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered, bit EarlyClobber>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVLX</*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd", "");
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoILoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered, bit EarlyClobber>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
GPR:$rs1, IdxClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
RISCVVPseudo,
RISCVVLX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $merge", "$rd = $merge");
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUSStoreNoMask<VReg StClass, int EEW>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSE</*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUSStoreMask<VReg StClass, int EEW>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSE</*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSStoreNoMask<VReg StClass, int EEW>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSE</*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSStoreMask<VReg StClass, int EEW>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSE</*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
// Unary instruction that is never masked so HasDummyMask=0.
class VPseudoUnaryNoDummyMask<VReg RetClass,
DAGOperand Op2Class> :
Pseudo<(outs RetClass:$rd),
(ins Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoNullaryNoMask<VReg RegClass>:
Pseudo<(outs RegClass:$rd),
(ins AVL:$vl, ixlenimm:$sew),
[]>, RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoNullaryMask<VReg RegClass>:
Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
(ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl,
ixlenimm:$sew), []>, RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints ="$rd = $merge";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
// Nullary for pseudo instructions. They are expanded in
// RISCVExpandPseudoInsts pass.
class VPseudoNullaryPseudoM<string BaseInst>
: Pseudo<(outs VR:$rd), (ins AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
// BaseInstr is not used in RISCVExpandPseudoInsts pass.
// Just fill a corresponding real v-inst to pass tablegen check.
let BaseInstr = !cast<Instruction>(BaseInst);
}
// RetClass could be GPR or VReg.
class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
Pseudo<(outs RetClass:$rd),
(ins OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Constraint;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUnaryMaskTA<VReg RetClass, VReg OpClass, string Constraint = ""> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
// mask unary operation without maskedoff
class VPseudoMaskUnarySOutMask:
Pseudo<(outs GPR:$rd),
(ins VR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
// Mask can be V0~V31
class VPseudoUnaryAnyMask<VReg RetClass,
VReg Op1Class> :
Pseudo<(outs RetClass:$rd),
(ins RetClass:$merge,
Op1Class:$rs2,
VR:$vm, AVL:$vl, ixlenimm:$sew),
[]>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = "@earlyclobber $rd, $rd = $merge";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoBinaryNoMask<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs RetClass:$rd),
(ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Constraint;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoTiedBinaryNoMask<VReg RetClass,
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs RetClass:$rd),
(ins RetClass:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Join<[Constraint, "$rd = $rs2"], ",">.ret;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let ForceTailAgnostic = 1;
let isConvertibleToThreeAddress = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSX</*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoIStoreMask<VReg StClass, VReg IdxClass, int EEW, bits<3> LMUL,
bit Ordered>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSX</*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoBinaryMask<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoBinaryMaskTA<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
// Like VPseudoBinaryMask, but output can be V0.
class VPseudoBinaryMOutMask<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs RetClass:$rd),
(ins RetClass:$merge,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
// Special version of VPseudoBinaryMask where we pretend the first source is
// tied to the destination so we can workaround the earlyclobber constraint.
// This allows maskedoff and rs2 to be the same register.
class VPseudoTiedBinaryMask<VReg RetClass,
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 0; // Merge is also rs2.
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoBinaryCarryIn<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
LMULInfo MInfo,
bit CarryIn,
string Constraint> :
Pseudo<(outs RetClass:$rd),
!if(CarryIn,
(ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, AVL:$vl,
ixlenimm:$sew),
(ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew)), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Constraint;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 0;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
let VLMul = MInfo.value;
}
class VPseudoTernaryNoMask<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs RetClass:$rd),
(ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
AVL:$vl, ixlenimm:$sew),
[]>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs RetClass:$rd),
(ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),
[]>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Join<[Constraint, "$rd = $rs3"], ",">.ret;
let HasVecPolicyOp = 1;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoAMOWDNoMask<VReg RetClass,
VReg Op1Class> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$vd_wd),
(ins GPR:$rs1,
Op1Class:$vs2,
GetVRegNoV0<RetClass>.R:$vd,
AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 1;
let mayStore = 1;
let hasSideEffects = 1;
let Constraints = "$vd_wd = $vd";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoAMOWDMask<VReg RetClass,
VReg Op1Class> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$vd_wd),
(ins GPR:$rs1,
Op1Class:$vs2,
GetVRegNoV0<RetClass>.R:$vd,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 1;
let mayStore = 1;
let hasSideEffects = 1;
let Constraints = "$vd_wd = $vd";
let HasVLOp = 1;
let HasSEWOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
multiclass VPseudoAMOEI<int eew> {
// Standard scalar AMO supports 32, 64, and 128 Mem data bits,
// and in the base vector "V" extension, only SEW up to ELEN = max(XLEN, FLEN)
// are required to be supported.
// therefore only [32, 64] is allowed here.
foreach sew = [32, 64] in {
foreach lmul = MxSet<sew>.m in {
defvar octuple_lmul = lmul.octuple;
// Calculate emul = eew * lmul / sew
defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emulMX = octuple_to_str<octuple_emul>.ret;
defvar emul= !cast<LMULInfo>("V_" # emulMX);
let VLMul = lmul.value in {
def "_WD_" # lmul.MX # "_" # emulMX : VPseudoAMOWDNoMask<lmul.vrclass, emul.vrclass>;
def "_WD_" # lmul.MX # "_" # emulMX # "_MASK" : VPseudoAMOWDMask<lmul.vrclass, emul.vrclass>;
}
}
}
}
}
multiclass VPseudoAMO {
foreach eew = EEWList in
defm "EI" # eew : VPseudoAMOEI<eew>;
}
class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVLSEG<NF, /*Masked*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
RISCVVPseudo,
RISCVVLSEG<NF, /*Masked*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = "$rd = $merge";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, GPR:$offset, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVLSEG<NF, /*Masked*/0, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
GPR:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
ixlenimm:$policy),[]>,
RISCVVPseudo,
RISCVVLSEG<NF, /*Masked*/1, /*Strided*/1, /*FF*/0, log2<EEW>.val, VLMul> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = "$rd = $merge";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoISegLoadNoMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, IdxClass:$offset, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVLXSEG<NF, /*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
// For vector indexed segment loads, the destination vector register groups
// cannot overlap the source vector register group
let Constraints = "@earlyclobber $rd";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoISegLoadMask<VReg RetClass, VReg IdxClass, int EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
IdxClass:$offset, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew,
ixlenimm:$policy),[]>,
RISCVVPseudo,
RISCVVLXSEG<NF, /*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 1;
let mayStore = 0;
let hasSideEffects = 0;
// For vector indexed segment loads, the destination vector register groups
// cannot overlap the source vector register group
let Constraints = "@earlyclobber $rd, $rd = $merge";
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSSEG<NF, /*Masked*/0, /*Strided*/0, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSSEG<NF, /*Masked*/1, /*Strided*/0, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSSegStoreNoMask<VReg ValClass, int EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, GPR: $offset, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSSEG<NF, /*Masked*/0, /*Strided*/1, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoSSegStoreMask<VReg ValClass, int EEW, bits<4> NF>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, GPR: $offset,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSSEG<NF, /*Masked*/1, /*Strided*/1, log2<EEW>.val, VLMul> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoISegStoreNoMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSXSEG<NF, /*Masked*/0, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoISegStoreMask<VReg ValClass, VReg IdxClass, int EEW, bits<3> LMUL,
bits<4> NF, bit Ordered>:
Pseudo<(outs),
(ins ValClass:$rd, GPR:$rs1, IdxClass: $index,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
RISCVVSXSEG<NF, /*Masked*/1, Ordered, log2<EEW>.val, VLMul, LMUL> {
let mayLoad = 0;
let mayStore = 1;
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
multiclass VPseudoUSLoad<bit isFF> {
foreach eew = EEWList in {
foreach lmul = MxSet<eew>.m in {
defvar LInfo = lmul.MX;
defvar vreg = lmul.vrclass;
defvar FFStr = !if(isFF, "FF", "");
let VLMul = lmul.value in {
def "E" # eew # FFStr # "_V_" # LInfo :
VPseudoUSLoadNoMask<vreg, eew, isFF>;
def "E" # eew # FFStr # "_V_" # LInfo # "_MASK" :
VPseudoUSLoadMask<vreg, eew, isFF>;
}
}
}
}
multiclass VPseudoLoadMask {
foreach mti = AllMasks in {
let VLMul = mti.LMul.value in {
def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0>;
}
}
}
multiclass VPseudoSLoad {
foreach eew = EEWList in {
foreach lmul = MxSet<eew>.m in {
defvar LInfo = lmul.MX;
defvar vreg = lmul.vrclass;
let VLMul = lmul.value in {
def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask<vreg, eew>;
def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSLoadMask<vreg, eew>;
}
}
}
}
multiclass VPseudoILoad<bit Ordered> {
foreach eew = EEWList in {
foreach sew = EEWList in {
foreach lmul = MxSet<sew>.m in {
defvar octuple_lmul = lmul.octuple;
// Calculate emul = eew * lmul / sew
defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar LInfo = lmul.MX;
defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
defvar Vreg = lmul.vrclass;
defvar IdxVreg = idx_lmul.vrclass;
defvar HasConstraint = !ne(sew, eew);
let VLMul = lmul.value in {
def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
VPseudoILoadNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>;
def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
VPseudoILoadMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered, HasConstraint>;
}
}
}
}
}
}
multiclass VPseudoUSStore {
foreach eew = EEWList in {
foreach lmul = MxSet<eew>.m in {
defvar LInfo = lmul.MX;
defvar vreg = lmul.vrclass;
let VLMul = lmul.value in {
def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask<vreg, eew>;
def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask<vreg, eew>;
}
}
}
}
multiclass VPseudoStoreMask {
foreach mti = AllMasks in {
let VLMul = mti.LMul.value in {
def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1>;
}
}
}
multiclass VPseudoSStore {
foreach eew = EEWList in {
foreach lmul = MxSet<eew>.m in {
defvar LInfo = lmul.MX;
defvar vreg = lmul.vrclass;
let VLMul = lmul.value in {
def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask<vreg, eew>;
def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg, eew>;
}
}
}
}
multiclass VPseudoIStore<bit Ordered> {
foreach eew = EEWList in {
foreach sew = EEWList in {
foreach lmul = MxSet<sew>.m in {
defvar octuple_lmul = lmul.octuple;
// Calculate emul = eew * lmul / sew
defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar LInfo = lmul.MX;
defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
defvar Vreg = lmul.vrclass;
defvar IdxVreg = idx_lmul.vrclass;
let VLMul = lmul.value in {
def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo :
VPseudoIStoreNoMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>;
def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" :
VPseudoIStoreMask<Vreg, IdxVreg, eew, idx_lmul.value, Ordered>;
}
}
}
}
}
}
multiclass VPseudoUnaryS_M {
foreach mti = AllMasks in
{
let VLMul = mti.LMul.value in {
def "_M_" # mti.BX : VPseudoUnaryNoMask<GPR, VR>;
def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask;
}
}
}
multiclass VPseudoUnaryM_M {
defvar constraint = "@earlyclobber $rd";
foreach mti = AllMasks in
{
let VLMul = mti.LMul.value in {
def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>;
def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>;
}
}
}
multiclass VPseudoMaskNullaryV {
foreach m = MxList.m in {
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>;
def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>;
}
}
}
multiclass VPseudoNullaryPseudoM <string BaseInst> {
foreach mti = AllMasks in {
let VLMul = mti.LMul.value in {
def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">;
}
}
}
multiclass VPseudoUnaryV_M {
defvar constraint = "@earlyclobber $rd";
foreach m = MxList.m in {
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>;
def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>;
}
}
}
multiclass VPseudoUnaryV_V_AnyMask {
foreach m = MxList.m in {
let VLMul = m.value in
def _VM # "_" # m.MX : VPseudoUnaryAnyMask<m.vrclass, m.vrclass>;
}
}
multiclass VPseudoBinary<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
LMULInfo MInfo,
string Constraint = ""> {
let VLMul = MInfo.value in {
def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
Constraint>;
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskTA<RetClass, Op1Class, Op2Class,
Constraint>;
}
}
multiclass VPseudoBinaryM<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
LMULInfo MInfo,
string Constraint = ""> {
let VLMul = MInfo.value in {
def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
Constraint>;
let ForceTailAgnostic = true in
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
Op2Class, Constraint>;
}
}
multiclass VPseudoBinaryEmul<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
LMULInfo lmul,
LMULInfo emul,
string Constraint = ""> {
let VLMul = lmul.value in {
def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
Constraint>;
def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskTA<RetClass, Op1Class, Op2Class,
Constraint>;
}
}
multiclass VPseudoTiedBinary<VReg RetClass,
DAGOperand Op2Class,
LMULInfo MInfo,
string Constraint = ""> {
let VLMul = MInfo.value in {
def "_" # MInfo.MX # "_TIED": VPseudoTiedBinaryNoMask<RetClass, Op2Class,
Constraint>;
def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
Constraint>;
}
}
multiclass VPseudoBinaryV_VV<string Constraint = ""> {
foreach m = MxList.m in
defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
}
multiclass VPseudoBinaryV_VV_EEW<int eew, string Constraint = ""> {
foreach m = MxList.m in {
foreach sew = EEWList in {
defvar octuple_lmul = m.octuple;
// emul = lmul * eew / sew
defvar octuple_emul = !srl(!mul(octuple_lmul, eew), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar emulMX = octuple_to_str<octuple_emul>.ret;
defvar emul = !cast<LMULInfo>("V_" # emulMX);
defm _VV : VPseudoBinaryEmul<m.vrclass, m.vrclass, emul.vrclass, m, emul, Constraint>;
}
}
}
}
multiclass VPseudoBinaryV_VX<string Constraint = ""> {
foreach m = MxList.m in
defm "_VX" : VPseudoBinary<m.vrclass, m.vrclass, GPR, m, Constraint>;
}
multiclass VPseudoBinaryV_VF<string Constraint = ""> {
foreach m = MxList.m in
foreach f = FPList.fpinfo in
defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
f.fprclass, m, Constraint>;
}
multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
foreach m = MxList.m in
defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
}
multiclass VPseudoBinaryM_MM {
foreach m = MxList.m in
let VLMul = m.value in {
def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "">;
}
}
// We use earlyclobber here due to
// * The destination EEW is smaller than the source EEW and the overlap is
// in the lowest-numbered part of the source register group is legal.
// Otherwise, it is illegal.
// * The destination EEW is greater than the source EEW, the source EMUL is
// at least 1, and the overlap is in the highest-numbered part of the
// destination register group is legal. Otherwise, it is illegal.
multiclass VPseudoBinaryW_VV {
foreach m = MxListW.m in
defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryW_VX {
foreach m = MxListW.m in
defm "_VX" : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryW_VF {
foreach m = MxListW.m in
foreach f = FPListW.fpinfo in
defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
f.fprclass, m,
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryW_WV {
foreach m = MxListW.m in {
defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
"@earlyclobber $rd">;
defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
"@earlyclobber $rd">;
}
}
multiclass VPseudoBinaryW_WX {
foreach m = MxListW.m in
defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m>;
}
multiclass VPseudoBinaryW_WF {
foreach m = MxListW.m in
foreach f = FPListW.fpinfo in
defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass,
f.fprclass, m>;
}
// Narrowing instructions like vnsrl/vnsra/vnclip(u) don't need @earlyclobber
// if the source and destination have an LMUL<=1. This matches this overlap
// exception from the spec.
// "The destination EEW is smaller than the source EEW and the overlap is in the
// lowest-numbered part of the source register group."
multiclass VPseudoBinaryV_WV {
foreach m = MxListW.m in
defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
!if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryV_WX {
foreach m = MxListW.m in
defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
!if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryV_WI {
foreach m = MxListW.m in
defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
!if(!ge(m.octuple, 8), "@earlyclobber $rd", "")>;
}
// For vadc and vsbc, the instruction encoding is reserved if the destination
// vector register is v0.
// For vadc and vsbc, CarryIn == 1 and CarryOut == 0
multiclass VPseudoBinaryV_VM<bit CarryOut = 0, bit CarryIn = 1,
string Constraint = ""> {
foreach m = MxList.m in
def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
VPseudoBinaryCarryIn<!if(CarryOut, VR,
!if(!and(CarryIn, !not(CarryOut)),
GetVRegNoV0<m.vrclass>.R, m.vrclass)),
m.vrclass, m.vrclass, m, CarryIn, Constraint>;
}
multiclass VPseudoBinaryV_XM<bit CarryOut = 0, bit CarryIn = 1,
string Constraint = ""> {
foreach m = MxList.m in
def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
VPseudoBinaryCarryIn<!if(CarryOut, VR,
!if(!and(CarryIn, !not(CarryOut)),
GetVRegNoV0<m.vrclass>.R, m.vrclass)),
m.vrclass, GPR, m, CarryIn, Constraint>;
}
multiclass VPseudoBinaryV_FM {
foreach m = MxList.m in
foreach f = FPList.fpinfo in
def "_V" # f.FX # "M_" # m.MX :
VPseudoBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, f.fprclass, m, /*CarryIn=*/1, "">;
}
multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
string Constraint = ""> {
foreach m = MxList.m in
def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
VPseudoBinaryCarryIn<!if(CarryOut, VR,
!if(!and(CarryIn, !not(CarryOut)),
GetVRegNoV0<m.vrclass>.R, m.vrclass)),
m.vrclass, simm5, m, CarryIn, Constraint>;
}
multiclass VPseudoUnaryV_V_X_I_NoDummyMask {
foreach m = MxList.m in {
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, m.vrclass>;
def "_X_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, GPR>;
def "_I_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, simm5>;
}
}
}
multiclass VPseudoUnaryV_F_NoDummyMask {
foreach m = MxList.m in {
foreach f = FPList.fpinfo in {
let VLMul = m.value in {
def "_" # f.FX # "_" # m.MX : VPseudoUnaryNoDummyMask<m.vrclass, f.fprclass>;
}
}
}
}
multiclass VPseudoUnaryTAV_V {
foreach m = MxList.m in {
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>;
def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>;
}
}
}
multiclass VPseudoUnaryV_V {
foreach m = MxList.m in {
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>;
def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>;
}
}
}
multiclass PseudoUnaryV_VF2 {
defvar constraints = "@earlyclobber $rd";
foreach m = MxListVF2.m in
{
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>;
def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.f2vrclass,
constraints>;
}
}
}
multiclass PseudoUnaryV_VF4 {
defvar constraints = "@earlyclobber $rd";
foreach m = MxListVF4.m in
{
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>;
def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.f4vrclass,
constraints>;
}
}
}
multiclass PseudoUnaryV_VF8 {
defvar constraints = "@earlyclobber $rd";
foreach m = MxListVF8.m in
{
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>;
def "_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.f8vrclass,
constraints>;
}
}
}
// The destination EEW is 1 since "For the purposes of register group overlap
// constraints, mask elements have EEW=1."
// The source EEW is 8, 16, 32, or 64.
// When the destination EEW is different from source EEW, we need to use
// @earlyclobber to avoid the overlap between destination and source registers.
// We don't need @earlyclobber for LMUL<=1 since that matches this overlap
// exception from the spec
// "The destination EEW is smaller than the source EEW and the overlap is in the
// lowest-numbered part of the source register group".
// With LMUL<=1 the source and dest occupy a single register so any overlap
// is in the lowest-numbered part.
multiclass VPseudoBinaryM_VV {
foreach m = MxList.m in
defm _VV : VPseudoBinaryM<VR, m.vrclass, m.vrclass, m,
!if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryM_VX {
foreach m = MxList.m in
defm "_VX" :
VPseudoBinaryM<VR, m.vrclass, GPR, m,
!if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryM_VF {
foreach m = MxList.m in
foreach f = FPList.fpinfo in
defm "_V" # f.FX :
VPseudoBinaryM<VR, m.vrclass, f.fprclass, m,
!if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryM_VI {
foreach m = MxList.m in
defm _VI : VPseudoBinaryM<VR, m.vrclass, simm5, m,
!if(!ge(m.octuple, 16), "@earlyclobber $rd", "")>;
}
multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
defm "" : VPseudoBinaryV_VV<Constraint>;
defm "" : VPseudoBinaryV_VX<Constraint>;
defm "" : VPseudoBinaryV_VI<ImmType, Constraint>;
}
multiclass VPseudoBinaryV_VV_VX {
defm "" : VPseudoBinaryV_VV;
defm "" : VPseudoBinaryV_VX;
}
multiclass VPseudoBinaryV_VV_VF {
defm "" : VPseudoBinaryV_VV;
defm "" : VPseudoBinaryV_VF;
}
multiclass VPseudoBinaryV_VX_VI<Operand ImmType = simm5> {
defm "" : VPseudoBinaryV_VX;
defm "" : VPseudoBinaryV_VI<ImmType>;
}
multiclass VPseudoBinaryW_VV_VX {
defm "" : VPseudoBinaryW_VV;
defm "" : VPseudoBinaryW_VX;
}
multiclass VPseudoBinaryW_VV_VF {
defm "" : VPseudoBinaryW_VV;
defm "" : VPseudoBinaryW_VF;
}
multiclass VPseudoBinaryW_WV_WX {
defm "" : VPseudoBinaryW_WV;
defm "" : VPseudoBinaryW_WX;
}
multiclass VPseudoBinaryW_WV_WF {
defm "" : VPseudoBinaryW_WV;
defm "" : VPseudoBinaryW_WF;
}
multiclass VPseudoBinaryV_VM_XM_IM {
defm "" : VPseudoBinaryV_VM;
defm "" : VPseudoBinaryV_XM;
defm "" : VPseudoBinaryV_IM;
}
multiclass VPseudoBinaryV_VM_XM {
defm "" : VPseudoBinaryV_VM;
defm "" : VPseudoBinaryV_XM;
}
multiclass VPseudoBinaryM_VM_XM_IM<string Constraint> {
defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
}
multiclass VPseudoBinaryM_VM_XM<string Constraint> {
defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/1, Constraint>;
}
multiclass VPseudoBinaryM_V_X_I<string Constraint> {
defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
defm "" : VPseudoBinaryV_IM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
}
multiclass VPseudoBinaryM_V_X<string Constraint> {
defm "" : VPseudoBinaryV_VM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
defm "" : VPseudoBinaryV_XM</*CarryOut=*/1, /*CarryIn=*/0, Constraint>;
}
multiclass VPseudoBinaryV_WV_WX_WI {
defm "" : VPseudoBinaryV_WV;
defm "" : VPseudoBinaryV_WX;
defm "" : VPseudoBinaryV_WI;
}
multiclass VPseudoTernary<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
LMULInfo MInfo,
string Constraint = ""> {
let VLMul = MInfo.value in {
def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>;
}
}
multiclass VPseudoTernaryWithPolicy<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
LMULInfo MInfo,
string Constraint = "",
bit Commutable = 0> {
let VLMul = MInfo.value in {
let isCommutable = Commutable in
def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>;
}
}
multiclass VPseudoTernaryV_VV_AAXA<string Constraint = ""> {
foreach m = MxList.m in {
defm _VV : VPseudoTernaryWithPolicy<m.vrclass, m.vrclass, m.vrclass, m,
Constraint, /*Commutable*/1>;
}
}
multiclass VPseudoTernaryV_VX<string Constraint = ""> {
foreach m = MxList.m in
defm _VX : VPseudoTernary<m.vrclass, m.vrclass, GPR, m, Constraint>;
}
multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
foreach m = MxList.m in
defm "_VX" : VPseudoTernaryWithPolicy<m.vrclass, GPR, m.vrclass, m,
Constraint, /*Commutable*/1>;
}
multiclass VPseudoTernaryV_VF_AAXA<string Constraint = ""> {
foreach m = MxList.m in
foreach f = FPList.fpinfo in
defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.vrclass, f.fprclass,
m.vrclass, m, Constraint,
/*Commutable*/1>;
}
multiclass VPseudoTernaryW_VV {
defvar constraint = "@earlyclobber $rd";
foreach m = MxListW.m in
defm _VV : VPseudoTernaryWithPolicy<m.wvrclass, m.vrclass, m.vrclass, m,
constraint>;
}
multiclass VPseudoTernaryW_VX {
defvar constraint = "@earlyclobber $rd";
foreach m = MxListW.m in
defm "_VX" : VPseudoTernaryWithPolicy<m.wvrclass, GPR, m.vrclass, m,
constraint>;
}
multiclass VPseudoTernaryW_VF {
defvar constraint = "@earlyclobber $rd";
foreach m = MxListW.m in
foreach f = FPListW.fpinfo in
defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.wvrclass, f.fprclass,
m.vrclass, m, constraint>;
}
multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
foreach m = MxList.m in
defm _VI : VPseudoTernary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
}
multiclass VPseudoTernaryV_VV_VX_AAXA<string Constraint = ""> {
defm "" : VPseudoTernaryV_VV_AAXA<Constraint>;
defm "" : VPseudoTernaryV_VX_AAXA<Constraint>;
}
multiclass VPseudoTernaryV_VV_VF_AAXA<string Constraint = ""> {
defm "" : VPseudoTernaryV_VV_AAXA<Constraint>;
defm "" : VPseudoTernaryV_VF_AAXA<Constraint>;
}
multiclass VPseudoTernaryV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
defm "" : VPseudoTernaryV_VX<Constraint>;
defm "" : VPseudoTernaryV_VI<ImmType, Constraint>;
}
multiclass VPseudoTernaryW_VV_VX {
defm "" : VPseudoTernaryW_VV;
defm "" : VPseudoTernaryW_VX;
}
multiclass VPseudoTernaryW_VV_VF {
defm "" : VPseudoTernaryW_VV;
defm "" : VPseudoTernaryW_VF;
}
multiclass VPseudoBinaryM_VV_VX_VI {
defm "" : VPseudoBinaryM_VV;
defm "" : VPseudoBinaryM_VX;
defm "" : VPseudoBinaryM_VI;
}
multiclass VPseudoBinaryM_VV_VX {
defm "" : VPseudoBinaryM_VV;
defm "" : VPseudoBinaryM_VX;
}
multiclass VPseudoBinaryM_VV_VF {
defm "" : VPseudoBinaryM_VV;
defm "" : VPseudoBinaryM_VF;
}
multiclass VPseudoBinaryM_VX_VI {
defm "" : VPseudoBinaryM_VX;
defm "" : VPseudoBinaryM_VI;
}
multiclass VPseudoReductionV_VS {
foreach m = MxList.m in {
defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>;
}
}
multiclass VPseudoConversion<VReg RetClass,
VReg Op1Class,
LMULInfo MInfo,
string Constraint = ""> {
let VLMul = MInfo.value in {
def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>;
def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA<RetClass, Op1Class,
Constraint>;
}
}
multiclass VPseudoConversionV_V {
foreach m = MxList.m in
defm _V : VPseudoConversion<m.vrclass, m.vrclass, m>;
}
multiclass VPseudoConversionW_V {
defvar constraint = "@earlyclobber $rd";
foreach m = MxListW.m in
defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint>;
}
multiclass VPseudoConversionV_W {
defvar constraint = "@earlyclobber $rd";
foreach m = MxListW.m in
defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>;
}
multiclass VPseudoUSSegLoad<bit isFF> {
foreach eew = EEWList in {
foreach lmul = MxSet<eew>.m in {
defvar LInfo = lmul.MX;
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
defvar FFStr = !if(isFF, "FF", "");
def nf # "E" # eew # FFStr # "_V_" # LInfo :
VPseudoUSSegLoadNoMask<vreg, eew, nf, isFF>;
def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" :
VPseudoUSSegLoadMask<vreg, eew, nf, isFF>;
}
}
}
}
}
multiclass VPseudoSSegLoad {
foreach eew = EEWList in {
foreach lmul = MxSet<eew>.m in {
defvar LInfo = lmul.MX;
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew, nf>;
def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew, nf>;
}
}
}
}
}
multiclass VPseudoISegLoad<bit Ordered> {
foreach idx_eew = EEWList in {
foreach sew = EEWList in {
foreach val_lmul = MxSet<sew>.m in {
defvar octuple_lmul = val_lmul.octuple;
// Calculate emul = eew * lmul / sew
defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar ValLInfo = val_lmul.MX;
defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
defvar Vreg = val_lmul.vrclass;
defvar IdxVreg = idx_lmul.vrclass;
let VLMul = val_lmul.value in {
foreach nf = NFSet<val_lmul>.L in {
defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
VPseudoISegLoadNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
nf, Ordered>;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
VPseudoISegLoadMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
nf, Ordered>;
}
}
}
}
}
}
}
multiclass VPseudoUSSegStore {
foreach eew = EEWList in {
foreach lmul = MxSet<eew>.m in {
defvar LInfo = lmul.MX;
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask<vreg, eew, nf>;
def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask<vreg, eew, nf>;
}
}
}
}
}
multiclass VPseudoSSegStore {
foreach eew = EEWList in {
foreach lmul = MxSet<eew>.m in {
defvar LInfo = lmul.MX;
let VLMul = lmul.value in {
foreach nf = NFSet<lmul>.L in {
defvar vreg = SegRegClass<lmul, nf>.RC;
def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew, nf>;
def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew, nf>;
}
}
}
}
}
multiclass VPseudoISegStore<bit Ordered> {
foreach idx_eew = EEWList in {
foreach sew = EEWList in {
foreach val_lmul = MxSet<sew>.m in {
defvar octuple_lmul = val_lmul.octuple;
// Calculate emul = eew * lmul / sew
defvar octuple_emul = !srl(!mul(idx_eew, octuple_lmul), log2<sew>.val);
if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
defvar ValLInfo = val_lmul.MX;
defvar IdxLInfo = octuple_to_str<octuple_emul>.ret;
defvar idx_lmul = !cast<LMULInfo>("V_" # IdxLInfo);
defvar Vreg = val_lmul.vrclass;
defvar IdxVreg = idx_lmul.vrclass;
let VLMul = val_lmul.value in {
foreach nf = NFSet<val_lmul>.L in {
defvar ValVreg = SegRegClass<val_lmul, nf>.RC;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo :
VPseudoISegStoreNoMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
nf, Ordered>;
def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" :
VPseudoISegStoreMask<ValVreg, IdxVreg, idx_eew, idx_lmul.value,
nf, Ordered>;
}
}
}
}
}
}
}
//===----------------------------------------------------------------------===//
// Helpers to define the intrinsic patterns.
//===----------------------------------------------------------------------===//
class VPatUnaryNoMask<string intrinsic_name,
string inst,
string kind,
ValueType result_type,
ValueType op2_type,
int sew,
LMULInfo vlmul,
VReg op2_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
(op2_type op2_reg_class:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
(op2_type op2_reg_class:$rs2),
GPR:$vl, sew)>;
class VPatUnaryMask<string intrinsic_name,
string inst,
string kind,
ValueType result_type,
ValueType op2_type,
ValueType mask_type,
int sew,
LMULInfo vlmul,
VReg result_reg_class,
VReg op2_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
(mask_type V0), GPR:$vl, sew)>;
class VPatUnaryMaskTA<string intrinsic_name,
string inst,
string kind,
ValueType result_type,
ValueType op2_type,
ValueType mask_type,
int sew,
LMULInfo vlmul,
VReg result_reg_class,
VReg op2_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
(mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
class VPatMaskUnaryNoMask<string intrinsic_name,
string inst,
MTypeInfo mti> :
Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name)
(mti.Mask VR:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX)
(mti.Mask VR:$rs2),
GPR:$vl, mti.Log2SEW)>;
class VPatMaskUnaryMask<string intrinsic_name,
string inst,
MTypeInfo mti> :
Pat<(mti.Mask (!cast<Intrinsic>(intrinsic_name#"_mask")
(mti.Mask VR:$merge),
(mti.Mask VR:$rs2),
(mti.Mask V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
(mti.Mask VR:$merge),
(mti.Mask VR:$rs2),
(mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
class VPatUnaryAnyMask<string intrinsic,
string inst,
string kind,
ValueType result_type,
ValueType op1_type,
ValueType mask_type,
int sew,
LMULInfo vlmul,
VReg result_reg_class,
VReg op1_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic)
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(mask_type VR:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(mask_type VR:$rs2),
GPR:$vl, sew)>;
class VPatBinaryNoMask<string intrinsic_name,
string inst,
ValueType result_type,
ValueType op1_type,
ValueType op2_type,
int sew,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
GPR:$vl, sew)>;
// Same as above but source operands are swapped.
class VPatBinaryNoMaskSwapped<string intrinsic_name,
string inst,
ValueType result_type,
ValueType op1_type,
ValueType op2_type,
int sew,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
(op2_type op2_kind:$rs2),
(op1_type op1_reg_class:$rs1),
VLOpFrag)),
(!cast<Instruction>(inst)
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
GPR:$vl, sew)>;
class VPatBinaryMask<string intrinsic_name,
string inst,
ValueType result_type,
ValueType op1_type,
ValueType op2_type,
ValueType mask_type,
int sew,
VReg result_reg_class,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew)>;
class VPatBinaryMaskTA<string intrinsic_name,
string inst,
ValueType result_type,
ValueType op1_type,
ValueType op2_type,
ValueType mask_type,
int sew,
VReg result_reg_class,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
// Same as above but source operands are swapped.
class VPatBinaryMaskSwapped<string intrinsic_name,
string inst,
ValueType result_type,
ValueType op1_type,
ValueType op2_type,
ValueType mask_type,
int sew,
VReg result_reg_class,
VReg op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$merge),
(op2_type op2_kind:$rs2),
(op1_type op1_reg_class:$rs1),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew)>;
class VPatTiedBinaryNoMask<string intrinsic_name,
string inst,
ValueType result_type,
ValueType op2_type,
int sew,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
(result_type result_reg_class:$rs1),
(op2_type op2_kind:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst#"_TIED")
(result_type result_reg_class:$rs1),
(op2_type op2_kind:$rs2),
GPR:$vl, sew)>;
class VPatTiedBinaryMask<string intrinsic_name,
string inst,
ValueType result_type,
ValueType op2_type,
ValueType mask_type,
int sew,
VReg result_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$merge),
(result_type result_reg_class:$merge),
(op2_type op2_kind:$rs2),
(mask_type V0),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK_TIED")
(result_type result_reg_class:$merge),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
class VPatTernaryNoMask<string intrinsic,
string inst,
string kind,
ValueType result_type,
ValueType op1_type,
ValueType op2_type,
int sew,
LMULInfo vlmul,
VReg result_reg_class,
RegisterClass op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic)
(result_type result_reg_class:$rs3),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
result_reg_class:$rs3,
(op1_type op1_reg_class:$rs1),
op2_kind:$rs2,
GPR:$vl, sew)>;
class VPatTernaryNoMaskWithPolicy<string intrinsic,
string inst,
string kind,
ValueType result_type,
ValueType op1_type,
ValueType op2_type,
int sew,
LMULInfo vlmul,
VReg result_reg_class,
RegisterClass op1_reg_class,
DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic)
(result_type result_reg_class:$rs3),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
VLOpFrag)),