From 33dac5626e63cfb62e6e18d0291886e371d5c9b1 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Tue, 19 Sep 2023 13:54:09 +0100 Subject: [PATCH 1/3] [RISCV] Add tests for strided VP loads with unit stride. NFC --- .../RISCV/rvv/fixed-vectors-strided-vpload.ll | 110 +++++++++++++--- llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll | 117 +++++++++++++++--- 2 files changed, 191 insertions(+), 36 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll index 96100d2b62e41..1d913220c6ad2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll @@ -96,6 +96,17 @@ define <8 x i8> @strided_vpload_v8i8(ptr %ptr, i32 signext %stride, <8 x i1> %m, ret <8 x i8> %load } +define <8 x i8> @strided_vpload_v8i8_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v8i8_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i32(ptr %ptr, i32 1, <8 x i1> %m, i32 %evl) + ret <8 x i8> %load +} + declare <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0.i32(ptr, i32, <2 x i1>, i32) define <2 x i16> @strided_vpload_v2i16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { @@ -132,6 +143,17 @@ define <8 x i16> @strided_vpload_v8i16(ptr %ptr, i32 signext %stride, <8 x i1> % ret <8 x i16> %load } +define <8 x i16> @strided_vpload_v8i16_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v8i16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) + ret <8 x i16> %load +} + define <8 x i16> @strided_vpload_v8i16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i16_allones_mask: ; CHECK: # %bb.0: @@ -168,6 +190,17 @@ define <4 x i32> @strided_vpload_v4i32(ptr %ptr, i32 signext %stride, <4 x i1> % ret <4 x i32> %load } +define <4 x i32> @strided_vpload_v4i32_unit_stride(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v4i32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i32(ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) + ret <4 x i32> %load +} + declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr, i32, <8 x i1>, i32) define <8 x i32> @strided_vpload_v8i32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { @@ -204,6 +237,17 @@ define <2 x i64> @strided_vpload_v2i64(ptr %ptr, i32 signext %stride, <2 x i1> % ret <2 x i64> %load } +define <2 x i64> @strided_vpload_v2i64_unit_stride(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v2i64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0.i32(ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) + ret <2 x i64> %load +} + declare <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i32(ptr, i32, <4 x i1>, i32) define <4 x i64> @strided_vpload_v4i64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { @@ -288,6 +332,17 @@ define <8 x half> @strided_vpload_v8f16(ptr %ptr, i32 signext %stride, <8 x i1> ret <8 x half> %load } +define <8 x half> @strided_vpload_v8f16_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v8f16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0.i32(ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) + ret <8 x half> %load +} + declare <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0.i32(ptr, i32, <2 x i1>, i32) define <2 x float> @strided_vpload_v2f32(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { @@ -312,6 +367,17 @@ define <4 x float> @strided_vpload_v4f32(ptr %ptr, i32 signext %stride, <4 x i1> ret <4 x float> %load } +define <4 x float> @strided_vpload_v4f32_unit_stride(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v4f32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0.i32(ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) + ret <4 x float> %load +} + declare <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0.i32(ptr, i32, <8 x i1>, i32) define <8 x float> @strided_vpload_v8f32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { @@ -348,6 +414,18 @@ define <2 x double> @strided_vpload_v2f64(ptr %ptr, i32 signext %stride, <2 x i1 ret <2 x double> %load } +define <2 x double> @strided_vpload_v2f64_unit_stride(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v2f64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0.i32(ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) + ret <2 x double> %load +} + + declare <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0.i32(ptr, i32, <4 x i1>, i32) define <4 x double> @strided_vpload_v4f64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { @@ -416,10 +494,10 @@ define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: bltu a2, a4, .LBB33_2 +; CHECK-NEXT: bltu a2, a4, .LBB40_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: .LBB40_2: ; CHECK-NEXT: mul a4, a3, a1 ; CHECK-NEXT: add a4, a0, a4 ; CHECK-NEXT: addi a5, a2, -16 @@ -444,10 +522,10 @@ define <32 x double> @strided_vpload_v32f64_allones_mask(ptr %ptr, i32 signext % ; CHECK: # %bb.0: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: bltu a2, a4, .LBB34_2 +; CHECK-NEXT: bltu a2, a4, .LBB41_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: .LBB34_2: +; CHECK-NEXT: .LBB41_2: ; CHECK-NEXT: mul a4, a3, a1 ; CHECK-NEXT: add a4, a0, a4 ; CHECK-NEXT: addi a5, a2, -16 @@ -474,10 +552,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV32-NEXT: li a5, 32 ; CHECK-RV32-NEXT: vmv1r.v v8, v0 ; CHECK-RV32-NEXT: mv a3, a4 -; CHECK-RV32-NEXT: bltu a4, a5, .LBB35_2 +; CHECK-RV32-NEXT: bltu a4, a5, .LBB42_2 ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: li a3, 32 -; CHECK-RV32-NEXT: .LBB35_2: +; CHECK-RV32-NEXT: .LBB42_2: ; CHECK-RV32-NEXT: mul a5, a3, a2 ; CHECK-RV32-NEXT: addi a6, a4, -32 ; CHECK-RV32-NEXT: sltu a4, a4, a6 @@ -485,10 +563,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV32-NEXT: and a6, a4, a6 ; CHECK-RV32-NEXT: li a4, 16 ; CHECK-RV32-NEXT: add a5, a1, a5 -; CHECK-RV32-NEXT: bltu a6, a4, .LBB35_4 +; CHECK-RV32-NEXT: bltu a6, a4, .LBB42_4 ; CHECK-RV32-NEXT: # %bb.3: ; CHECK-RV32-NEXT: li a6, 16 -; CHECK-RV32-NEXT: .LBB35_4: +; CHECK-RV32-NEXT: .LBB42_4: ; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 4 ; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, ma @@ -497,10 +575,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV32-NEXT: sltu a6, a3, a5 ; CHECK-RV32-NEXT: addi a6, a6, -1 ; CHECK-RV32-NEXT: and a5, a6, a5 -; CHECK-RV32-NEXT: bltu a3, a4, .LBB35_6 +; CHECK-RV32-NEXT: bltu a3, a4, .LBB42_6 ; CHECK-RV32-NEXT: # %bb.5: ; CHECK-RV32-NEXT: li a3, 16 -; CHECK-RV32-NEXT: .LBB35_6: +; CHECK-RV32-NEXT: .LBB42_6: ; CHECK-RV32-NEXT: mul a4, a3, a2 ; CHECK-RV32-NEXT: add a4, a1, a4 ; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma @@ -524,10 +602,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV64-NEXT: li a5, 32 ; CHECK-RV64-NEXT: vmv1r.v v8, v0 ; CHECK-RV64-NEXT: mv a4, a3 -; CHECK-RV64-NEXT: bltu a3, a5, .LBB35_2 +; CHECK-RV64-NEXT: bltu a3, a5, .LBB42_2 ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: li a4, 32 -; CHECK-RV64-NEXT: .LBB35_2: +; CHECK-RV64-NEXT: .LBB42_2: ; CHECK-RV64-NEXT: mul a5, a4, a2 ; CHECK-RV64-NEXT: addi a6, a3, -32 ; CHECK-RV64-NEXT: sltu a3, a3, a6 @@ -535,10 +613,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV64-NEXT: and a6, a3, a6 ; CHECK-RV64-NEXT: li a3, 16 ; CHECK-RV64-NEXT: add a5, a1, a5 -; CHECK-RV64-NEXT: bltu a6, a3, .LBB35_4 +; CHECK-RV64-NEXT: bltu a6, a3, .LBB42_4 ; CHECK-RV64-NEXT: # %bb.3: ; CHECK-RV64-NEXT: li a6, 16 -; CHECK-RV64-NEXT: .LBB35_4: +; CHECK-RV64-NEXT: .LBB42_4: ; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 4 ; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, ma @@ -547,10 +625,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV64-NEXT: sltu a6, a4, a5 ; CHECK-RV64-NEXT: addi a6, a6, -1 ; CHECK-RV64-NEXT: and a5, a6, a5 -; CHECK-RV64-NEXT: bltu a4, a3, .LBB35_6 +; CHECK-RV64-NEXT: bltu a4, a3, .LBB42_6 ; CHECK-RV64-NEXT: # %bb.5: ; CHECK-RV64-NEXT: li a4, 16 -; CHECK-RV64-NEXT: .LBB35_6: +; CHECK-RV64-NEXT: .LBB42_6: ; CHECK-RV64-NEXT: mul a3, a4, a2 ; CHECK-RV64-NEXT: add a3, a1, a3 ; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll index d8431ad7662d9..d06e93ca33027 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll @@ -126,6 +126,17 @@ define @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, < ret %load } +define @strided_vpload_nxv8i8_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_nxv8i8_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 1, %m, i32 %evl) + ret %load +} + define @strided_vpload_nxv8i8_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i8_allones_mask: ; CHECK: # %bb.0: @@ -186,6 +197,17 @@ define @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride, ret %load } +define @strided_vpload_nxv4i16_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_nxv4i16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr %ptr, i32 2, %m, i32 %evl) + ret %load +} + declare @llvm.experimental.vp.strided.load.nxv8i16.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv8i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { @@ -222,6 +244,17 @@ define @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride, ret %load } +define @strided_vpload_nxv2i32_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_nxv2i32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr %ptr, i32 4, %m, i32 %evl) + ret %load +} + declare @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv4i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { @@ -270,6 +303,17 @@ define @strided_vpload_nxv1i64(ptr %ptr, i32 signext %stride, ret %load } +define @strided_vpload_nxv1i64_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_nxv1i64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 8, %m, i32 %evl) + ret %load +} + define @strided_vpload_nxv1i64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i64_allones_mask: ; CHECK: # %bb.0: @@ -366,6 +410,17 @@ define @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride ret %load } +define @strided_vpload_nxv4f16_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_nxv4f16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr %ptr, i32 2, %m, i32 %evl) + ret %load +} + declare @llvm.experimental.vp.strided.load.nxv8f16.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv8f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { @@ -402,6 +457,17 @@ define @strided_vpload_nxv2f32(ptr %ptr, i32 signext %strid ret %load } +define @strided_vpload_nxv2f32_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_nxv2f32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr %ptr, i32 4, %m, i32 %evl) + ret %load +} + declare @llvm.experimental.vp.strided.load.nxv4f32.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv4f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { @@ -450,6 +516,17 @@ define @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stri ret %load } +define @strided_vpload_nxv1f64_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_nxv1f64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + %load = call @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr %ptr, i32 8, %m, i32 %evl) + ret %load +} + declare @llvm.experimental.vp.strided.load.nxv2f64.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv2f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { @@ -533,10 +610,10 @@ define @strided_load_nxv16f64(ptr %ptr, i64 %stride, @strided_load_nxv16f64(ptr %ptr, i64 %stride, @strided_load_nxv16f64_allones_mask(ptr %ptr, i64 ; CHECK-RV32-NEXT: sltu a5, a3, a2 ; CHECK-RV32-NEXT: addi a5, a5, -1 ; CHECK-RV32-NEXT: and a2, a5, a2 -; CHECK-RV32-NEXT: bltu a3, a4, .LBB43_2 +; CHECK-RV32-NEXT: bltu a3, a4, .LBB50_2 ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: mv a3, a4 -; CHECK-RV32-NEXT: .LBB43_2: +; CHECK-RV32-NEXT: .LBB50_2: ; CHECK-RV32-NEXT: mul a4, a3, a1 ; CHECK-RV32-NEXT: add a4, a0, a4 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma @@ -605,10 +682,10 @@ define @strided_load_nxv16f64_allones_mask(ptr %ptr, i64 ; CHECK-RV64-NEXT: sltu a5, a2, a3 ; CHECK-RV64-NEXT: addi a5, a5, -1 ; CHECK-RV64-NEXT: and a3, a5, a3 -; CHECK-RV64-NEXT: bltu a2, a4, .LBB43_2 +; CHECK-RV64-NEXT: bltu a2, a4, .LBB50_2 ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: mv a2, a4 -; CHECK-RV64-NEXT: .LBB43_2: +; CHECK-RV64-NEXT: .LBB50_2: ; CHECK-RV64-NEXT: mul a4, a2, a1 ; CHECK-RV64-NEXT: add a4, a0, a4 ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma @@ -635,19 +712,19 @@ define @strided_load_nxv17f64(ptr %ptr, i64 %stride, @strided_load_nxv17f64(ptr %ptr, i64 %stride, @strided_load_nxv17f64(ptr %ptr, i64 %stride, @strided_load_nxv17f64(ptr %ptr, i64 %stride, Date: Tue, 19 Sep 2023 13:04:17 +0100 Subject: [PATCH 2/3] [DAGCombiner] Combine vp.strided.load with unit stride to vp.load This is the VP equivalent of #65674. We already combine MGATHER loads with unit stride to MLOAD, so this extends it for EXPERIMENTAL_VP_STRIDED_LOAD. --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 21 +++++++++++++++++++ .../RISCV/rvv/fixed-vectors-strided-vpload.ll | 21 +++++++------------ llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll | 21 +++++++------------ 3 files changed, 35 insertions(+), 28 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 484a6231b7f65..df69dbb16042f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -539,6 +539,7 @@ namespace { SDValue visitMSCATTER(SDNode *N); SDValue visitVPGATHER(SDNode *N); SDValue visitVPSCATTER(SDNode *N); + SDValue visitVP_STRIDED_LOAD(SDNode *N); SDValue visitFP_TO_FP16(SDNode *N); SDValue visitFP16_TO_FP(SDNode *N); SDValue visitFP_TO_BF16(SDNode *N); @@ -11959,6 +11960,22 @@ SDValue DAGCombiner::visitMLOAD(SDNode *N) { return SDValue(); } +SDValue DAGCombiner::visitVP_STRIDED_LOAD(SDNode *N) { + auto *SLD = cast(N); + EVT EltVT = SLD->getValueType(0).getVectorElementType(); + // Combine strided loads with unit-stride to a regular load. + if (auto *CStride = dyn_cast(SLD->getStride()); + CStride && CStride->getZExtValue() == EltVT.getStoreSize()) { + SDValue NewLd = DAG.getLoadVP( + SLD->getAddressingMode(), SLD->getExtensionType(), SLD->getValueType(0), + SDLoc(N), SLD->getChain(), SLD->getBasePtr(), SLD->getOffset(), + SLD->getMask(), SLD->getVectorLength(), SLD->getMemoryVT(), + SLD->getMemOperand(), SLD->isExpandingLoad()); + return CombineTo(N, NewLd, NewLd.getValue(1)); + } + return SDValue(); +} + /// A vector select of 2 constant vectors can be simplified to math/logic to /// avoid a variable select instruction and possibly avoid constant loads. SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) { @@ -25976,6 +25993,10 @@ SDValue DAGCombiner::visitVPOp(SDNode *N) { if (SDValue SD = visitVPSCATTER(N)) return SD; + if (N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD) + if (SDValue SD = visitVP_STRIDED_LOAD(N)) + return SD; + // VP operations in which all vector elements are disabled - either by // determining that the mask is all false or that the EVL is 0 - can be // eliminated. diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll index 1d913220c6ad2..2ae031798f5bd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll @@ -99,9 +99,8 @@ define <8 x i8> @strided_vpload_v8i8(ptr %ptr, i32 signext %stride, <8 x i1> %m, define <8 x i8> @strided_vpload_v8i8_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i8_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 1 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma -; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i32(ptr %ptr, i32 1, <8 x i1> %m, i32 %evl) ret <8 x i8> %load @@ -146,9 +145,8 @@ define <8 x i16> @strided_vpload_v8i16(ptr %ptr, i32 signext %stride, <8 x i1> % define <8 x i16> @strided_vpload_v8i16_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i16_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 2 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) ret <8 x i16> %load @@ -193,9 +191,8 @@ define <4 x i32> @strided_vpload_v4i32(ptr %ptr, i32 signext %stride, <4 x i1> % define <4 x i32> @strided_vpload_v4i32_unit_stride(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4i32_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i32(ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) ret <4 x i32> %load @@ -240,9 +237,8 @@ define <2 x i64> @strided_vpload_v2i64(ptr %ptr, i32 signext %stride, <2 x i1> % define <2 x i64> @strided_vpload_v2i64_unit_stride(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2i64_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0.i32(ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) ret <2 x i64> %load @@ -335,9 +331,8 @@ define <8 x half> @strided_vpload_v8f16(ptr %ptr, i32 signext %stride, <8 x i1> define <8 x half> @strided_vpload_v8f16_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8f16_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 2 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0.i32(ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) ret <8 x half> %load @@ -370,9 +365,8 @@ define <4 x float> @strided_vpload_v4f32(ptr %ptr, i32 signext %stride, <4 x i1> define <4 x float> @strided_vpload_v4f32_unit_stride(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v4f32_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0.i32(ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) ret <4 x float> %load @@ -417,9 +411,8 @@ define <2 x double> @strided_vpload_v2f64(ptr %ptr, i32 signext %stride, <2 x i1 define <2 x double> @strided_vpload_v2f64_unit_stride(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v2f64_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0.i32(ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) ret <2 x double> %load diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll index d06e93ca33027..47074d612bb64 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll @@ -129,9 +129,8 @@ define @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, < define @strided_vpload_nxv8i8_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i8_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma -; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 1, %m, i32 %evl) ret %load @@ -200,9 +199,8 @@ define @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride, define @strided_vpload_nxv4i16_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i16_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 2 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr %ptr, i32 2, %m, i32 %evl) ret %load @@ -247,9 +245,8 @@ define @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride, define @strided_vpload_nxv2i32_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i32_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr %ptr, i32 4, %m, i32 %evl) ret %load @@ -306,9 +303,8 @@ define @strided_vpload_nxv1i64(ptr %ptr, i32 signext %stride, define @strided_vpload_nxv1i64_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i64_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 8, %m, i32 %evl) ret %load @@ -413,9 +409,8 @@ define @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride define @strided_vpload_nxv4f16_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f16_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 2 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma -; CHECK-NEXT: vlse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr %ptr, i32 2, %m, i32 %evl) ret %load @@ -460,9 +455,8 @@ define @strided_vpload_nxv2f32(ptr %ptr, i32 signext %strid define @strided_vpload_nxv2f32_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f32_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 4 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vlse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr %ptr, i32 4, %m, i32 %evl) ret %load @@ -519,9 +513,8 @@ define @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stri define @strided_vpload_nxv1f64_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f64_unit_stride: ; CHECK: # %bb.0: -; CHECK-NEXT: li a2, 8 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vlse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr %ptr, i32 8, %m, i32 %evl) ret %load From 414af88efa286d0972f3496e7def29451abe7bd9 Mon Sep 17 00:00:00 2001 From: Luke Lau Date: Tue, 19 Sep 2023 16:38:44 +0100 Subject: [PATCH 3/3] Fix comment --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index df69dbb16042f..20a89f24603d0 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11963,7 +11963,7 @@ SDValue DAGCombiner::visitMLOAD(SDNode *N) { SDValue DAGCombiner::visitVP_STRIDED_LOAD(SDNode *N) { auto *SLD = cast(N); EVT EltVT = SLD->getValueType(0).getVectorElementType(); - // Combine strided loads with unit-stride to a regular load. + // Combine strided loads with unit-stride to a regular VP load. if (auto *CStride = dyn_cast(SLD->getStride()); CStride && CStride->getZExtValue() == EltVT.getStoreSize()) { SDValue NewLd = DAG.getLoadVP(