Skip to content

Commit

Permalink
[LLVM][XTHeadVector] Implement 16.1 `vmand{n}/vmnand/vmxor/vmor{n}/vm…
Browse files Browse the repository at this point in the history
…nor/vmxnor` (#95)

* [LLVM][XTHeadVector] Implement 16.1 `vmand/vmnand/vmandn/vmxor/vmor/vmnor/vmorn/vmxnor`

* [LLVM][XTHeadVector] Test 16.1 `vmand/vmnand/vmandn/vmxor/vmor/vmnor/vmorn/vmxnor`

* [LLVM][XTHeadVector] Implement 16.1 `vmclr/vmset`

* [LLVM][XTHeadVector] Test 16.1 `vmclr/vmset`
  • Loading branch information
imkiva authored Apr 11, 2024
1 parent a547f9d commit 46ecdfa
Show file tree
Hide file tree
Showing 13 changed files with 1,306 additions and 1 deletion.
13 changes: 13 additions & 0 deletions llvm/include/llvm/IR/IntrinsicsRISCVXTHeadV.td
Original file line number Diff line number Diff line change
Expand Up @@ -897,4 +897,17 @@ let TargetPrefix = "riscv" in {
defm th_vfmul : XVBinaryAAXRoundingMode;
defm th_vfdiv : XVBinaryAAXRoundingMode;
defm th_vfrdiv : XVBinaryAAXRoundingMode;

// 16.1. Vector Mask-Register Logical Operations
def int_riscv_th_vmand: RISCVBinaryAAAUnMasked;
def int_riscv_th_vmnand: RISCVBinaryAAAUnMasked;
def int_riscv_th_vmandnot: RISCVBinaryAAAUnMasked;
def int_riscv_th_vmxor: RISCVBinaryAAAUnMasked;
def int_riscv_th_vmor: RISCVBinaryAAAUnMasked;
def int_riscv_th_vmnor: RISCVBinaryAAAUnMasked;
def int_riscv_th_vmornot: RISCVBinaryAAAUnMasked;
def int_riscv_th_vmxnor: RISCVBinaryAAAUnMasked;
def int_riscv_th_vmclr : RISCVNullaryIntrinsic;
def int_riscv_th_vmset : RISCVNullaryIntrinsic;

} // TargetPrefix = "riscv"
12 changes: 12 additions & 0 deletions llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,18 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
case RISCV::PseudoVMSET_M_B64:
// vmset.m vd => vmxnor.mm vd, vd, vd
return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXNOR_MM);
case RISCV::PseudoTH_VMCLR_M_B8:
case RISCV::PseudoTH_VMCLR_M_B16:
case RISCV::PseudoTH_VMCLR_M_B32:
case RISCV::PseudoTH_VMCLR_M_B64:
// th.vmclr.m vd => th.vmxor.mm vd, vd, vd
return expandVMSET_VMCLR(MBB, MBBI, RISCV::TH_VMXOR_MM);
case RISCV::PseudoTH_VMSET_M_B8:
case RISCV::PseudoTH_VMSET_M_B16:
case RISCV::PseudoTH_VMSET_M_B32:
case RISCV::PseudoTH_VMSET_M_B64:
// th.vmset.m vd => th.vmxnor.mm vd, vd, vd
return expandVMSET_VMCLR(MBB, MBBI, RISCV::TH_VMXNOR_MM);
}

return false;
Expand Down
87 changes: 86 additions & 1 deletion llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,17 @@ defset list<VTypeInfoToWide> AllWidenableFloatXVectors = {
def : VTypeInfoToWide<VF32M4, VF64M8>;
}

// Redefine `AllMasks` from RISCVInstrInfoVPseudos.td to remove fractionally-grouped register groups.
// TODO: riscv-v-intrinsics.pdf declares there are functions accepting vbool<16,32,64>_t, but they need
// to be connected to MF2, MF4, MF8, which are not supported by the 'V' extension 0.7.1.
defset list<MTypeInfo> AllXMasks = {
// vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
def : MTypeInfo<vbool8_t, V_M1, "B8">;
def : MTypeInfo<vbool4_t, V_M2, "B16">;
def : MTypeInfo<vbool2_t, V_M4, "B32">;
def : MTypeInfo<vbool1_t, V_M8, "B64">;
}

class GetXVTypePredicates<VTypeInfo vti> {
// TODO: distinguish different types (like F16, F32, F64, AnyF)? Is it needed?
list<Predicate> Predicates = !cond(!eq(vti.Scalar, f16) : [HasVendorXTHeadV],
Expand Down Expand Up @@ -1666,6 +1677,19 @@ class XVPseudoBinaryMaskNoPolicy<VReg RetClass,
let HasSEWOp = 1;
}

multiclass XVPseudoNullaryPseudoM <string BaseInst> {
foreach mti = AllXMasks in {
defvar mx = mti.LMul.MX;
defvar WriteVMALUV_MX = !cast<SchedWrite>("WriteVMALUV_" # mx);
defvar ReadVMALUV_MX = !cast<SchedRead>("ReadVMALUV_" # mx);

let VLMul = mti.LMul.value in {
def "_M_" # mti.BX : VPseudoNullaryPseudoM<BaseInst # "_MM">,
Sched<[WriteVMALUV_MX, ReadVMALUV_MX, ReadVMALUV_MX]>;
}
}
}

class XVPseudoUnaryNoMask<DAGOperand RetClass, DAGOperand OpClass,
string Constraint = ""> :
Pseudo<(outs RetClass:$rd),
Expand Down Expand Up @@ -2480,6 +2504,19 @@ multiclass XVPseudoVFRDIV_VF_RM {
}
}

multiclass XVPseudoVALU_MM {
foreach m = MxListXTHeadV in {
defvar mx = m.MX;
defvar WriteVMALUV_MX = !cast<SchedWrite>("WriteVMALUV_" # mx);
defvar ReadVMALUV_MX = !cast<SchedRead>("ReadVMALUV_" # mx);

let VLMul = m.value in {
def "_MM_" # mx : VPseudoBinaryNoMask<VR, VR, VR, "">,
Sched<[WriteVMALUV_MX, ReadVMALUV_MX, ReadVMALUV_MX]>;
}
}
}

//===----------------------------------------------------------------------===//
// Helpers to define the intrinsic patterns for the XTHeadVector extension.
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -2943,6 +2980,21 @@ multiclass XVPatBinaryM_VI<string intrinsic, string instruction,
vti.RegClass, simm5>;
}

multiclass XVPatBinaryM_MM<string intrinsic, string instruction> {
foreach mti = AllXMasks in
def : VPatBinaryM<intrinsic, instruction # "_MM_" # mti.LMul.MX,
mti.Mask, mti.Mask, mti.Mask,
mti.Log2SEW, VR, VR>;
}

multiclass XVPatNullaryM<string intrinsic, string inst> {
foreach mti = AllXMasks in
def : Pat<(mti.Mask (!cast<Intrinsic>(intrinsic)
VLOpFrag)),
(!cast<Instruction>(inst#"_M_"#mti.BX)
GPR:$vl, mti.Log2SEW)>;
}

multiclass XVPatCompare_VI<string intrinsic, string inst,
ImmLeaf ImmType> {
foreach vti = AllIntegerXVectors in {
Expand Down Expand Up @@ -3818,12 +3870,45 @@ let Predicates = [HasVendorXTHeadV], mayRaiseFPException = true,
} // Predicates = [HasVendorXTHeadV]

let Predicates = [HasVendorXTHeadV] in {
defm : XVPatBinaryV_VV_VX_RM<"int_riscv_th_vfmul", "PseudoTH_VFMUL",
defm : XVPatBinaryV_VV_VX_RM<"int_riscv_th_vfmul", "PseudoTH_VFMUL",
AllFloatXVectors>;
defm : XVPatBinaryV_VV_VX_RM<"int_riscv_th_vfdiv", "PseudoTH_VFDIV",
AllFloatXVectors, isSEWAware=1>;
defm : XVPatBinaryV_VX_RM<"int_riscv_th_vfrdiv", "PseudoTH_VFRDIV",
AllFloatXVectors, isSEWAware=1>;
} // Predicates = [HasVendorXTHeadV]

//===----------------------------------------------------------------------===//
// 16.1. Vector Mask-Register Logical Operations
//===----------------------------------------------------------------------===//

defm PseudoTH_VMAND: XVPseudoVALU_MM;
defm PseudoTH_VMNAND: XVPseudoVALU_MM;
defm PseudoTH_VMANDN: XVPseudoVALU_MM;
defm PseudoTH_VMXOR: XVPseudoVALU_MM;
defm PseudoTH_VMOR: XVPseudoVALU_MM;
defm PseudoTH_VMNOR: XVPseudoVALU_MM;
defm PseudoTH_VMORN: XVPseudoVALU_MM;
defm PseudoTH_VMXNOR: XVPseudoVALU_MM;

// Pseudo instructions, processed by the RISCVExpandPseudoInsts pass.
defm PseudoTH_VMCLR : XVPseudoNullaryPseudoM<"TH_VMXOR">;
defm PseudoTH_VMSET : XVPseudoNullaryPseudoM<"TH_VMXNOR">;

// Patterns
let Predicates = [HasVendorXTHeadV] in {
defm : XVPatBinaryM_MM<"int_riscv_th_vmand", "PseudoTH_VMAND">;
defm : XVPatBinaryM_MM<"int_riscv_th_vmnand", "PseudoTH_VMNAND">;
defm : XVPatBinaryM_MM<"int_riscv_th_vmandnot", "PseudoTH_VMANDN">;
defm : XVPatBinaryM_MM<"int_riscv_th_vmxor", "PseudoTH_VMXOR">;
defm : XVPatBinaryM_MM<"int_riscv_th_vmor", "PseudoTH_VMOR">;
defm : XVPatBinaryM_MM<"int_riscv_th_vmnor", "PseudoTH_VMNOR">;
defm : XVPatBinaryM_MM<"int_riscv_th_vmornot", "PseudoTH_VMORN">;
defm : XVPatBinaryM_MM<"int_riscv_th_vmxnor", "PseudoTH_VMXNOR">;

// pseudo instructions
defm : XVPatNullaryM<"int_riscv_th_vmclr", "PseudoTH_VMCLR">;
defm : XVPatNullaryM<"int_riscv_th_vmset", "PseudoTH_VMSET">;
}

include "RISCVInstrInfoXTHeadVVLPatterns.td"
129 changes: 129 additions & 0 deletions llvm/test/CodeGen/RISCV/rvv0p71/vmand.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xtheadvector \
; RUN: -verify-machineinstrs | FileCheck %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \
; RUN: -verify-machineinstrs | FileCheck %s

declare <vscale x 8 x i1> @llvm.riscv.th.vmand.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
iXLen);

define <vscale x 8 x i1> @intrinsic_vmand_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmand_mm_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a1, vl
; CHECK-NEXT: csrr a2, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a1, a2
; CHECK-NEXT: csrr a1, vl
; CHECK-NEXT: csrr a2, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a1, a2
; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1
; CHECK-NEXT: th.vmand.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.th.vmand.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
iXLen %2)

ret <vscale x 8 x i1> %a
}

declare <vscale x 16 x i1> @llvm.riscv.th.vmand.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
iXLen);

define <vscale x 16 x i1> @intrinsic_vmand_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmand_mm_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a1, vl
; CHECK-NEXT: csrr a2, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a1, a2
; CHECK-NEXT: csrr a1, vl
; CHECK-NEXT: csrr a2, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a1, a2
; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1
; CHECK-NEXT: th.vmand.mm v0, v0, v8
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: csrr a1, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a0, a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.th.vmand.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
iXLen %2)

ret <vscale x 16 x i1> %a
}

declare <vscale x 32 x i1> @llvm.riscv.th.vmand.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
iXLen);

define <vscale x 32 x i1> @intrinsic_vmand_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmand_mm_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a1, vl
; CHECK-NEXT: csrr a2, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a1, a2
; CHECK-NEXT: csrr a1, vl
; CHECK-NEXT: csrr a2, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a1, a2
; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1
; CHECK-NEXT: th.vmand.mm v0, v0, v8
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: csrr a1, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a0, a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.th.vmand.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
iXLen %2)

ret <vscale x 32 x i1> %a
}

declare <vscale x 64 x i1> @llvm.riscv.th.vmand.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
iXLen);

define <vscale x 64 x i1> @intrinsic_vmand_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmand_mm_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a1, vl
; CHECK-NEXT: csrr a2, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a1, a2
; CHECK-NEXT: csrr a1, vl
; CHECK-NEXT: csrr a2, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a1, a2
; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1
; CHECK-NEXT: th.vmand.mm v0, v0, v8
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: csrr a1, vtype
; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1
; CHECK-NEXT: th.vsetvl zero, a0, a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.th.vmand.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
iXLen %2)

ret <vscale x 64 x i1> %a
}
Loading

0 comments on commit 46ecdfa

Please sign in to comment.