From 70442b7864519e5c4ded96863e0def90dc445d84 Mon Sep 17 00:00:00 2001 From: imkiva Date: Sun, 28 Apr 2024 19:30:30 +0800 Subject: [PATCH] [LLVM][XTHeadVector] Fix `vslide1` tests for rv32 --- .../CodeGen/RISCV/rvv0p71/vslide1down-rv32.ll | 306 +++------------- .../CodeGen/RISCV/rvv0p71/vslide1up-rv32.ll | 330 ++++-------------- 2 files changed, 108 insertions(+), 528 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vslide1down-rv32.ll index d5a2752a2e338e..fd2428f38bbc9c 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vslide1down-rv32.ll @@ -11,8 +11,8 @@ declare @llvm.riscv.th.vslide1down.nxv8i8.i8( define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv8i8.i8( @@ -34,8 +34,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv8i8.i8( define @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv8i8.i8( @@ -57,8 +57,8 @@ declare @llvm.riscv.th.vslide1down.nxv16i8.i8( define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv16i8.i8( @@ -80,8 +80,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv16i8.i8( define @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv16i8.i8( @@ -103,8 +103,8 @@ declare @llvm.riscv.th.vslide1down.nxv32i8.i8( define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv32i8.i8( @@ -126,8 +126,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv32i8.i8( define @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv32i8.i8( @@ -149,8 +149,8 @@ declare @llvm.riscv.th.vslide1down.nxv64i8.i8( define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv64i8.i8( @@ -172,8 +172,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv64i8.i8( define @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv64i8.i8( @@ -195,8 +195,8 @@ declare @llvm.riscv.th.vslide1down.nxv4i16.i16( define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv4i16.i16( @@ -218,8 +218,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv4i16.i16( define @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv4i16.i16( @@ -241,8 +241,8 @@ declare @llvm.riscv.th.vslide1down.nxv8i16.i16( define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv8i16.i16( @@ -264,8 +264,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv8i16.i16( define @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv8i16.i16( @@ -287,8 +287,8 @@ declare @llvm.riscv.th.vslide1down.nxv16i16.i16( define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv16i16.i16( @@ -310,8 +310,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv16i16.i16( define @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv16i16.i16( @@ -333,8 +333,8 @@ declare @llvm.riscv.th.vslide1down.nxv32i16.i16( define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv32i16.i16( @@ -356,8 +356,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv32i16.i16( define @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv32i16.i16( @@ -379,8 +379,8 @@ declare @llvm.riscv.th.vslide1down.nxv2i32.i32( define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv2i32.i32( @@ -402,8 +402,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv2i32.i32( define @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v9, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv2i32.i32( @@ -425,8 +425,8 @@ declare @llvm.riscv.th.vslide1down.nxv4i32.i32( define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv4i32.i32( @@ -448,8 +448,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv4i32.i32( define @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v10, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv4i32.i32( @@ -471,8 +471,8 @@ declare @llvm.riscv.th.vslide1down.nxv8i32.i32( define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv8i32.i32( @@ -494,8 +494,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv8i32.i32( define @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v12, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv8i32.i32( @@ -517,8 +517,8 @@ declare @llvm.riscv.th.vslide1down.nxv16i32.i32( define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.nxv16i32.i32( @@ -540,8 +540,8 @@ declare @llvm.riscv.th.vslide1down.mask.nxv16i32.i32( define @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vslide1down.vx v8, v16, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vslide1down.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1down.mask.nxv16i32.i32( @@ -559,213 +559,3 @@ declare @llvm.riscv.th.vslide1down.nxv1i64.i64( , i64, i32); - -define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, ma -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 -; CHECK-NEXT: vslide1down.vx v8, v8, a1 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1down.nxv1i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.th.vslide1down.mask.nxv1i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma -; CHECK-NEXT: vslide1down.vx v9, v9, a0 -; CHECK-NEXT: vslide1down.vx v9, v9, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1down.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.th.vslide1down.nxv2i64.i64( - , - , - i64, - i32); - -define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m2, ta, ma -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 -; CHECK-NEXT: vslide1down.vx v8, v8, a1 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1down.nxv2i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.th.vslide1down.mask.nxv2i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m2, ta, ma -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, ma -; CHECK-NEXT: vslide1down.vx v10, v10, a0 -; CHECK-NEXT: vslide1down.vx v10, v10, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1down.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.th.vslide1down.nxv4i64.i64( - , - , - i64, - i32); - -define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m4, ta, ma -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 -; CHECK-NEXT: vslide1down.vx v8, v8, a1 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1down.nxv4i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.th.vslide1down.mask.nxv4i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m4, ta, ma -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma -; CHECK-NEXT: vslide1down.vx v12, v12, a0 -; CHECK-NEXT: vslide1down.vx v12, v12, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1down.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.th.vslide1down.nxv8i64.i64( - , - , - i64, - i32); - -define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m8, ta, ma -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 -; CHECK-NEXT: vslide1down.vx v8, v8, a1 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1down.nxv8i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.th.vslide1down.mask.nxv8i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m8, ta, ma -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vslide1down.vx v16, v16, a0 -; CHECK-NEXT: vslide1down.vx v16, v16, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1down.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vslide1up-rv32.ll index 9c93243dd85e5a..6bdda43b7dab15 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vslide1up-rv32.ll @@ -11,9 +11,9 @@ declare @llvm.riscv.th.vslide1up.nxv8i8.i8( define @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vslide1up.vx v9, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vslide1up.vx v9, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv8i8.i8( @@ -35,8 +35,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv8i8.i8( define @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e8, m1, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv8i8.i8( @@ -58,9 +58,9 @@ declare @llvm.riscv.th.vslide1up.nxv16i8.i8( define @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vslide1up.vx v10, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vslide1up.vx v10, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv16i8.i8( @@ -82,8 +82,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv16i8.i8( define @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e8, m2, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv16i8.i8( @@ -105,9 +105,9 @@ declare @llvm.riscv.th.vslide1up.nxv32i8.i8( define @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma -; CHECK-NEXT: vslide1up.vx v12, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vslide1up.vx v12, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv32i8.i8( @@ -129,8 +129,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv32i8.i8( define @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e8, m4, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv32i8.i8( @@ -152,9 +152,9 @@ declare @llvm.riscv.th.vslide1up.nxv64i8.i8( define @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma -; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vslide1up.vx v16, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv64i8.i8( @@ -176,8 +176,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv64i8.i8( define @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e8, m8, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv64i8.i8( @@ -199,9 +199,9 @@ declare @llvm.riscv.th.vslide1up.nxv4i16.i16( define @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vslide1up.vx v9, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vslide1up.vx v9, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv4i16.i16( @@ -223,8 +223,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv4i16.i16( define @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e16, m1, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv4i16.i16( @@ -246,9 +246,9 @@ declare @llvm.riscv.th.vslide1up.nxv8i16.i16( define @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma -; CHECK-NEXT: vslide1up.vx v10, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vslide1up.vx v10, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv8i16.i16( @@ -270,8 +270,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv8i16.i16( define @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e16, m2, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv8i16.i16( @@ -293,9 +293,9 @@ declare @llvm.riscv.th.vslide1up.nxv16i16.i16( define @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma -; CHECK-NEXT: vslide1up.vx v12, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vslide1up.vx v12, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv16i16.i16( @@ -317,8 +317,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv16i16.i16( define @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e16, m4, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv16i16.i16( @@ -340,9 +340,9 @@ declare @llvm.riscv.th.vslide1up.nxv32i16.i16( define @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma -; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vslide1up.vx v16, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv32i16.i16( @@ -364,8 +364,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv32i16.i16( define @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e16, m8, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv32i16.i16( @@ -387,9 +387,9 @@ declare @llvm.riscv.th.vslide1up.nxv2i32.i32( define @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vslide1up.vx v9, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vslide1up.vx v9, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv2i32.i32( @@ -411,8 +411,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv2i32.i32( define @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e32, m1, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv2i32.i32( @@ -434,9 +434,9 @@ declare @llvm.riscv.th.vslide1up.nxv4i32.i32( define @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma -; CHECK-NEXT: vslide1up.vx v10, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vslide1up.vx v10, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv4i32.i32( @@ -458,8 +458,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv4i32.i32( define @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v10, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e32, m2, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv4i32.i32( @@ -481,9 +481,9 @@ declare @llvm.riscv.th.vslide1up.nxv8i32.i32( define @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma -; CHECK-NEXT: vslide1up.vx v12, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vslide1up.vx v12, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv8i32.i32( @@ -505,8 +505,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv8i32.i32( define @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v12, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e32, m4, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v12, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv8i32.i32( @@ -528,9 +528,9 @@ declare @llvm.riscv.th.vslide1up.nxv16i32.i32( define @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma -; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vslide1up.vx v16, v8, a0 +; CHECK-NEXT: th.vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.nxv16i32.i32( @@ -552,8 +552,8 @@ declare @llvm.riscv.th.vslide1up.mask.nxv16i32.i32( define @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu -; CHECK-NEXT: vslide1up.vx v8, v16, a0, v0.t +; CHECK-NEXT: th.vsetvli zero, a1, e32, m8, d1 +; CHECK-NEXT: th.vslide1up.vx v8, v16, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.th.vslide1up.mask.nxv16i32.i32( @@ -571,213 +571,3 @@ declare @llvm.riscv.th.vslide1up.nxv1i64.i64( , i64, i32); - -define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, ma -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vslide1up.vx v9, v8, a1 -; CHECK-NEXT: vslide1up.vx v8, v9, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1up.nxv1i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.th.vslide1up.mask.nxv1i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma -; CHECK-NEXT: vslide1up.vx v10, v9, a1 -; CHECK-NEXT: vslide1up.vx v9, v10, a0 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1up.mask.nxv1i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.th.vslide1up.nxv2i64.i64( - , - , - i64, - i32); - -define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m2, ta, ma -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma -; CHECK-NEXT: vslide1up.vx v10, v8, a1 -; CHECK-NEXT: vslide1up.vx v8, v10, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1up.nxv2i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.th.vslide1up.mask.nxv2i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m2, ta, ma -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, ma -; CHECK-NEXT: vslide1up.vx v12, v10, a1 -; CHECK-NEXT: vslide1up.vx v10, v12, a0 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1up.mask.nxv2i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.th.vslide1up.nxv4i64.i64( - , - , - i64, - i32); - -define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m4, ta, ma -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma -; CHECK-NEXT: vslide1up.vx v12, v8, a1 -; CHECK-NEXT: vslide1up.vx v8, v12, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1up.nxv4i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.th.vslide1up.mask.nxv4i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m4, ta, ma -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma -; CHECK-NEXT: vslide1up.vx v16, v12, a1 -; CHECK-NEXT: vslide1up.vx v12, v16, a0 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1up.mask.nxv4i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.th.vslide1up.nxv8i64.i64( - , - , - i64, - i32); - -define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m8, ta, ma -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma -; CHECK-NEXT: vslide1up.vx v16, v8, a1 -; CHECK-NEXT: vslide1up.vx v8, v16, a0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1up.nxv8i64.i64( - undef, - %0, - i64 %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.th.vslide1up.mask.nxv8i64.i64( - , - , - i64, - , - i32); - -define @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m8, ta, ma -; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma -; CHECK-NEXT: vslide1up.vx v24, v16, a1 -; CHECK-NEXT: vslide1up.vx v16, v24, a0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma -; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.th.vslide1up.mask.nxv8i64.i64( - %0, - %1, - i64 %2, - %3, - i32 %4) - - ret %a -}