From dbb65dd330cc1696d7ca3dedc7aa9fa12c55a075 Mon Sep 17 00:00:00 2001 From: Paul Walker Date: Wed, 14 Feb 2024 14:28:23 +0000 Subject: [PATCH] [LLVM][tests/CodeGen/RISCV] Convert instances of ConstantExpr based splats to use splat(). This is mostly NFC but some output does change due to consistently inserting into poison rather than undef and using i64 as the index type for inserts. --- llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll | 16 ++-- .../CodeGen/RISCV/rvv/mscatter-combine.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/pr61561.ll | 10 +-- llvm/test/CodeGen/RISCV/rvv/pr63459.ll | 2 +- llvm/test/CodeGen/RISCV/rvv/stepvector.ll | 10 +-- .../CodeGen/RISCV/rvv/strided-load-store.ll | 38 ++++---- .../RISCV/rvv/undef-earlyclobber-chain.ll | 12 +-- llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll | 88 +++++++++---------- llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll | 6 +- .../test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll | 26 +++--- .../test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll | 26 +++--- llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll | 10 +-- llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll | 4 +- llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll | 4 +- llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll | 88 +++++++++---------- llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll | 14 +-- .../test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll | 26 +++--- llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll | 12 +-- .../test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll | 26 +++--- .../CodeGen/RISCV/rvv/vwadd-mask-sdnode.ll | 12 +-- llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll | 12 +-- llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll | 6 +- .../CodeGen/RISCV/rvv/vwsub-mask-sdnode.ll | 10 +-- llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll | 14 +-- 34 files changed, 269 insertions(+), 269 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll index 3a73f1729deddf..c310274d685081 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll @@ -606,7 +606,7 @@ define @ctpop_nxv16i32_ult_two( %va) { ; CHECK-ZVBB-NEXT: vmsleu.vi v0, v8, 1 ; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv16i32( %va) - %cmp = icmp ult %a, shufflevector ( insertelement ( poison, i32 2, i64 0), poison, zeroinitializer) + %cmp = icmp ult %a, splat (i32 2) ret %cmp } @@ -626,7 +626,7 @@ define @ctpop_nxv16i32_ugt_one( %va) { ; CHECK-ZVBB-NEXT: vmsgtu.vi v0, v8, 1 ; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv16i32( %va) - %cmp = icmp ugt %a, shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) + %cmp = icmp ugt %a, splat (i32 1) ret %cmp } @@ -646,7 +646,7 @@ define @ctpop_nxv16i32_eq_one( %va) { ; CHECK-ZVBB-NEXT: vmseq.vi v0, v8, 1 ; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv16i32( %va) - %cmp = icmp eq %a, shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) + %cmp = icmp eq %a, splat (i32 1) ret %cmp } @@ -666,7 +666,7 @@ define @ctpop_nxv16i32_ne_one( %va) { ; CHECK-ZVBB-NEXT: vmsne.vi v0, v8, 1 ; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv16i32( %va) - %cmp = icmp ne %a, shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) + %cmp = icmp ne %a, splat (i32 1) ret %cmp } @@ -1020,7 +1020,7 @@ define @ctpop_nxv8i64_ult_two( %va) { ; CHECK-ZVBB-NEXT: vmsleu.vi v0, v8, 1 ; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv8i64( %va) - %cmp = icmp ult %a, shufflevector ( insertelement ( poison, i64 2, i64 0), poison, zeroinitializer) + %cmp = icmp ult %a, splat (i64 2) ret %cmp } @@ -1040,7 +1040,7 @@ define @ctpop_nxv8i64_ugt_one( %va) { ; CHECK-ZVBB-NEXT: vmsgtu.vi v0, v8, 1 ; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv8i64( %va) - %cmp = icmp ugt %a, shufflevector ( insertelement ( poison, i64 1, i64 0), poison, zeroinitializer) + %cmp = icmp ugt %a, splat (i64 1) ret %cmp } @@ -1060,7 +1060,7 @@ define @ctpop_nxv8i64_eq_one( %va) { ; CHECK-ZVBB-NEXT: vmseq.vi v0, v8, 1 ; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv8i64( %va) - %cmp = icmp eq %a, shufflevector ( insertelement ( poison, i64 1, i64 0), poison, zeroinitializer) + %cmp = icmp eq %a, splat (i64 1) ret %cmp } @@ -1080,7 +1080,7 @@ define @ctpop_nxv8i64_ne_one( %va) { ; CHECK-ZVBB-NEXT: vmsne.vi v0, v8, 1 ; CHECK-ZVBB-NEXT: ret %a = call @llvm.ctpop.nxv8i64( %va) - %cmp = icmp ne %a, shufflevector ( insertelement ( poison, i64 1, i64 0), poison, zeroinitializer) + %cmp = icmp ne %a, splat (i64 1) ret %cmp } diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll index df944fada7964b..c26532d3559571 100644 --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll @@ -58,7 +58,7 @@ define void @strided_store_zero_start(i64 %n, ptr %p) { ; RV64-NEXT: ret %step = tail call @llvm.experimental.stepvector.nxv1i64() %gep = getelementptr inbounds %struct, ptr %p, %step, i32 6 - tail call void @llvm.masked.scatter.nxv1i64.nxv1p0( zeroinitializer, %gep, i32 8, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer)) + tail call void @llvm.masked.scatter.nxv1i64.nxv1p0( zeroinitializer, %gep, i32 8, splat (i1 true)) ret void } @@ -93,7 +93,7 @@ define void @strided_store_offset_start(i64 %n, ptr %p) { %.splat = shufflevector %.splatinsert, poison, zeroinitializer %add = add %step, %.splat %gep = getelementptr inbounds %struct, ptr %p, %add, i32 6 - tail call void @llvm.masked.scatter.nxv1i64.nxv1p0( zeroinitializer, %gep, i32 8, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer)) + tail call void @llvm.masked.scatter.nxv1i64.nxv1p0( zeroinitializer, %gep, i32 8, splat (i1 true)) ret void } @@ -118,7 +118,7 @@ define void @stride_one_store(i64 %n, ptr %p) { ; RV64-NEXT: ret %step = tail call @llvm.experimental.stepvector.nxv1i64() %gep = getelementptr inbounds i64, ptr %p, %step - tail call void @llvm.masked.scatter.nxv1i64.nxv1p0( zeroinitializer, %gep, i32 8, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer)) + tail call void @llvm.masked.scatter.nxv1i64.nxv1p0( zeroinitializer, %gep, i32 8, splat (i1 true)) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/pr61561.ll b/llvm/test/CodeGen/RISCV/rvv/pr61561.ll index f27edd36116657..c5fd6943e51bef 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pr61561.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pr61561.ll @@ -23,11 +23,11 @@ define @foo(ptr %p) { ; CHECK-NEXT: ret %i13 = load , ptr %p, align 2 %i14 = zext %i13 to - %i15 = shl nuw nsw %i14, shufflevector ( insertelement ( poison, i32 3, i64 0), poison, zeroinitializer) - %i16 = and %i15, shufflevector ( insertelement ( poison, i32 248, i64 0), poison, zeroinitializer) - %i17 = mul nuw nsw %i16, shufflevector ( insertelement ( poison, i32 3735, i64 0), poison, zeroinitializer) - %i18 = add nuw nsw %i17, shufflevector ( insertelement ( poison, i32 16384, i64 0), poison, zeroinitializer) - %i21 = lshr %i18, shufflevector ( insertelement ( poison, i32 15, i64 0), poison, zeroinitializer) + %i15 = shl nuw nsw %i14, splat (i32 3) + %i16 = and %i15, splat (i32 248) + %i17 = mul nuw nsw %i16, splat (i32 3735) + %i18 = add nuw nsw %i17, splat (i32 16384) + %i21 = lshr %i18, splat (i32 15) %i22 = trunc %i21 to ret %i22 } diff --git a/llvm/test/CodeGen/RISCV/rvv/pr63459.ll b/llvm/test/CodeGen/RISCV/rvv/pr63459.ll index c871e2992a5efe..5ef8e18bb2641e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/pr63459.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pr63459.ll @@ -14,7 +14,7 @@ define void @snork(ptr %arg, %arg1) { ; CHECK-NEXT: ret bb: %getelementptr = getelementptr inbounds , ptr %arg, %arg1 - tail call void @llvm.vp.scatter.nxv2i32.nxv2p0( shufflevector ( insertelement ( poison, i32 1, i32 0), poison, zeroinitializer), align 4 %getelementptr, shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), i32 4) + tail call void @llvm.vp.scatter.nxv2i32.nxv2p0( splat (i32 1), align 4 %getelementptr, splat (i1 true), i32 4) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll index 2d65c9d178b789..8f02ca65358102 100644 --- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll @@ -743,7 +743,7 @@ define @hi_bits_known_zero() vscale_range(2, 4) { ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret %step = call @llvm.experimental.stepvector.nxv2i64() - %and = and %step, shufflevector( insertelement( poison, i64 u0xfffffffffffffff8, i32 0), poison, zeroinitializer) + %and = and %step, splat (i64 u0xfffffffffffffff8) ret %and } @@ -758,8 +758,8 @@ define @hi_bits_known_zero_overflow() vscale_range(2, 4) { ; CHECK-NEXT: vand.vi v8, v8, -8 ; CHECK-NEXT: ret %step = call @llvm.experimental.stepvector.nxv2i64() - %step.mul = mul %step, shufflevector( insertelement( poison, i64 u0xffffffffffffffff, i32 0), poison, zeroinitializer) - %and = and %step.mul, shufflevector( insertelement( poison, i64 u0xfffffffffffffff8, i32 0), poison, zeroinitializer) + %step.mul = mul %step, splat (i64 u0xffffffffffffffff) + %and = and %step.mul, splat (i64 u0xfffffffffffffff8) ret %and } @@ -771,7 +771,7 @@ define @lo_bits_known_zero() { ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret %step = call @llvm.experimental.stepvector.nxv2i64() - %step.mul = mul %step, shufflevector( insertelement( poison, i64 8, i32 0), poison, zeroinitializer) - %and = and %step.mul, shufflevector( insertelement( poison, i64 7, i32 0), poison, zeroinitializer) + %step.mul = mul %step, splat (i64 8) + %and = and %step.mul, splat (i64 7) ret %and } diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll index 54e5d39e248544..6b584cfb22a52e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll @@ -16,7 +16,7 @@ define @gather(ptr %a, i32 %len) { ; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[ACCUM:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR]], i32 3 -; CHECK-NEXT: [[GATHER:%.*]] = call @llvm.riscv.masked.strided.load.nxv1i64.p0.i64( undef, ptr [[TMP1]], i64 16, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer)) +; CHECK-NEXT: [[GATHER:%.*]] = call @llvm.riscv.masked.strided.load.nxv1i64.p0.i64( undef, ptr [[TMP1]], i64 16, shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer)) ; CHECK-NEXT: [[ACCUM_NEXT]] = add [[ACCUM]], [[GATHER]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP0]] ; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add i64 [[VEC_IND_SCALAR]], [[TMP0]] @@ -38,7 +38,7 @@ vector.body: ; preds = %vector.body, %vecto %vec.ind = phi [ %1, %vector.ph ], [ %vec.ind.next, %vector.body ] %accum = phi [ zeroinitializer, %vector.ph ], [ %accum.next, %vector.body ] %2 = getelementptr inbounds %struct.foo, ptr %a, %vec.ind, i32 3 - %gather = call @llvm.masked.gather.nxv1i64.nxv1p0( %2, i32 8, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), undef) + %gather = call @llvm.masked.gather.nxv1i64.nxv1p0( %2, i32 8, splat (i1 true), undef) %accum.next = add %accum, %gather %index.next = add nuw i64 %index, %0 %vec.ind.next = add %vec.ind, %.splat @@ -59,7 +59,7 @@ define @gather_disjoint_or(ptr %a, i64 %len) { ; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 1, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[ACCUM:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[A:%.*]], i64 [[VEC_IND_SCALAR]] -; CHECK-NEXT: [[GATHER:%.*]] = call @llvm.riscv.masked.strided.load.nxv1i64.p0.i64( poison, ptr [[TMP0]], i64 16, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer)) +; CHECK-NEXT: [[GATHER:%.*]] = call @llvm.riscv.masked.strided.load.nxv1i64.p0.i64( poison, ptr [[TMP0]], i64 16, shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer)) ; CHECK-NEXT: [[ACCUM_NEXT]] = add [[ACCUM]], [[GATHER]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[VSCALE]] ; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add i64 [[VEC_IND_SCALAR]], 2 @@ -71,7 +71,7 @@ define @gather_disjoint_or(ptr %a, i64 %len) { vector.ph: %vscale = call i64 @llvm.vscale.i64() %step = tail call @llvm.experimental.stepvector.nxv1i64() - %step.mul2 = shl %step, shufflevector( insertelement( poison, i64 1, i32 0), poison, zeroinitializer) + %step.mul2 = shl %step, splat (i64 1) br label %vector.body vector.body: ; preds = %vector.body, %vector.ph @@ -80,19 +80,19 @@ vector.body: ; preds = %vector.body, %vecto %accum = phi [ zeroinitializer, %vector.ph ], [ %accum.next, %vector.body ] - %vec.ind.or = or disjoint %vec.ind, shufflevector( insertelement( poison, i64 1, i64 0), poison, zeroinitializer) + %vec.ind.or = or disjoint %vec.ind, splat (i64 1) %gep = getelementptr i64, ptr %a, %vec.ind.or %gather = call @llvm.masked.gather.nxv1i64.nxv1p0( %gep, i32 8, - shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), + splat (i1 true), poison ) %accum.next = add %accum, %gather %index.next = add nuw i64 %index, %vscale - %vec.ind.next = add %vec.ind, shufflevector( insertelement( poison, i64 2, i64 0), poison, zeroinitializer) + %vec.ind.next = add %vec.ind, splat (i64 2) %exit = icmp ne i64 %index.next, %len br i1 %exit, label %for.cond.cleanup, label %vector.body @@ -111,7 +111,7 @@ define void @scatter(ptr %a, i32 %len) { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND_SCALAR:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT_SCALAR:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT_FOO:%.*]], ptr [[A:%.*]], i64 [[VEC_IND_SCALAR]], i32 3 -; CHECK-NEXT: call void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64( zeroinitializer, ptr [[TMP1]], i64 16, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer)) +; CHECK-NEXT: call void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64( zeroinitializer, ptr [[TMP1]], i64 16, shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer)) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP0]] ; CHECK-NEXT: [[VEC_IND_NEXT_SCALAR]] = add i64 [[VEC_IND_SCALAR]], [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i64 [[INDEX_NEXT]], [[WIDE_TRIP_COUNT]] @@ -131,7 +131,7 @@ vector.body: ; preds = %vector.body, %vecto %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi [ %1, %vector.ph ], [ %vec.ind.next, %vector.body ] %2 = getelementptr inbounds %struct.foo, ptr %a, %vec.ind, i32 3 - tail call void @llvm.masked.scatter.nxv1i64.nxv1p0( zeroinitializer, %2, i32 8, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer)) + tail call void @llvm.masked.scatter.nxv1i64.nxv1p0( zeroinitializer, %2, i32 8, splat (i1 true)) %index.next = add nuw i64 %index, %0 %vec.ind.next = add %vec.ind, %.splat %3 = icmp ne i64 %index.next, %wide.trip.count @@ -155,7 +155,7 @@ define @gather_loopless(ptr %p, i64 %stride) { %x = call @llvm.masked.gather.nxv1i64.nxv1p0( %ptrs, i32 8, - shufflevector ( insertelement ( poison, i1 1, i64 0), poison, zeroinitializer), + splat (i1 1), poison ) ret %x @@ -175,7 +175,7 @@ define @straightline_offset_add(ptr %p, i64 %offset) { %x = call @llvm.masked.gather.nxv1i64.nxv1p0( %ptrs, i32 8, - shufflevector ( insertelement ( poison, i1 1, i64 0), poison, zeroinitializer), + splat (i1 1), poison ) ret %x @@ -188,13 +188,13 @@ define @straightline_offset_disjoint_or(ptr %p, i64 %offset) ; CHECK-NEXT: ret [[X]] ; %step = call @llvm.experimental.stepvector.nxv1i64() - %step.shl = shl %step, shufflevector ( insertelement ( poison, i64 1, i32 0), poison, zeroinitializer) - %offsetv = or disjoint %step.shl, shufflevector ( insertelement ( poison, i64 1, i32 0), poison, zeroinitializer) + %step.shl = shl %step, splat (i64 1) + %offsetv = or disjoint %step.shl, splat (i64 1) %ptrs = getelementptr i32, ptr %p, %offsetv %x = call @llvm.masked.gather.nxv1i64.nxv1p0( %ptrs, i32 8, - shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), + splat (i1 true), poison ) ret %x @@ -213,7 +213,7 @@ define @straightline_offset_shl(ptr %p) { %x = call @llvm.masked.gather.nxv1i64.nxv1p0( %ptrs, i32 8, - shufflevector ( insertelement ( poison, i1 1, i64 0), poison, zeroinitializer), + splat (i1 1), poison ) ret %x @@ -237,7 +237,7 @@ define @neg_shl_is_not_commutative(ptr %p) { %x = call @llvm.masked.gather.nxv1i64.nxv1p0( %ptrs, i32 8, - shufflevector ( insertelement ( poison, i1 1, i64 0), poison, zeroinitializer), + splat (i1 1), poison ) ret %x @@ -258,7 +258,7 @@ define @straightline_offset_shl_nonc(ptr %p, i64 %shift) { %x = call @llvm.masked.gather.nxv1i64.nxv1p0( %ptrs, i32 8, - shufflevector ( insertelement ( poison, i1 1, i64 0), poison, zeroinitializer), + splat (i1 1), poison ) ret %x @@ -279,7 +279,7 @@ define void @scatter_loopless( %x, ptr %p, i64 %stride) { %x, %ptrs, i32 8, - shufflevector ( insertelement ( poison, i1 1, i64 0), poison, zeroinitializer) + splat (i1 1) ) ret void } @@ -296,7 +296,7 @@ define void @constant_stride( %x, ptr %p, i64 %stride) { %x, %ptrs, i32 8, - shufflevector ( insertelement ( poison, i1 1, i64 0), poison, zeroinitializer) + splat (i1 1) ) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll index 0901c261af1aca..f41a3ec72aed7e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll +++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll @@ -79,18 +79,18 @@ start: Cond1: ; preds = %start %v15 = tail call @llvm.experimental.stepvector.nxv1i16() %v17 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %v15, i64 0) - %vs12.i.i.i = add %v15, shufflevector ( insertelement ( poison, i16 1, i32 0), poison, zeroinitializer) + %vs12.i.i.i = add %v15, splat (i16 1) %v18 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs12.i.i.i, i64 0) - %vs16.i.i.i = add %v15, shufflevector ( insertelement ( poison, i16 3, i32 0), poison, zeroinitializer) + %vs16.i.i.i = add %v15, splat (i16 3) %v20 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs16.i.i.i, i64 0) br label %UseSR Cond2: ; preds = %start %v15.2 = tail call @llvm.experimental.stepvector.nxv1i16() %v17.2 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %v15.2, i64 1) - %vs12.i.i.i.2 = add %v15.2, shufflevector ( insertelement ( poison, i16 1, i32 0), poison, zeroinitializer) + %vs12.i.i.i.2 = add %v15.2, splat (i16 1) %v18.2 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs12.i.i.i.2, i64 1) - %vs16.i.i.i.2 = add %v15.2, shufflevector ( insertelement ( poison, i16 3, i32 0), poison, zeroinitializer) + %vs16.i.i.i.2 = add %v15.2, splat (i16 3) %v20.2 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs16.i.i.i.2, i64 1) br label %UseSR @@ -132,9 +132,9 @@ define internal void @SubRegLivenessUndef() { loopIR.preheader.i.i: %v15 = tail call @llvm.experimental.stepvector.nxv1i16() %v17 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %v15, i64 0) - %vs12.i.i.i = add %v15, shufflevector ( insertelement ( poison, i16 1, i32 0), poison, zeroinitializer) + %vs12.i.i.i = add %v15, splat (i16 1) %v18 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs12.i.i.i, i64 0) - %vs16.i.i.i = add %v15, shufflevector ( insertelement ( poison, i16 3, i32 0), poison, zeroinitializer) + %vs16.i.i.i = add %v15, splat (i16 3) %v20 = tail call @llvm.vector.insert.nxv8i16.nxv1i16( poison, %vs16.i.i.i, i64 0) br label %loopIR3.i.i diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll index f076c3c621cdbd..95866543828fc7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll @@ -20,7 +20,7 @@ define @vandn_vv_vp_nxv1i8( %a, @llvm.vp.xor.nxv1i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv1i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv1i8( %not.a, %b, %mask, i32 %evl) ret %x } @@ -38,7 +38,7 @@ define @vandn_vv_vp_swapped_nxv1i8( %a, @llvm.vp.xor.nxv1i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv1i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv1i8( %b, %not.a, %mask, i32 %evl) ret %x } @@ -79,7 +79,7 @@ define @vandn_vv_vp_nxv2i8( %a, @llvm.vp.xor.nxv2i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv2i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv2i8( %not.a, %b, %mask, i32 %evl) ret %x } @@ -97,7 +97,7 @@ define @vandn_vv_vp_swapped_nxv2i8( %a, @llvm.vp.xor.nxv2i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv2i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv2i8( %b, %not.a, %mask, i32 %evl) ret %x } @@ -138,7 +138,7 @@ define @vandn_vv_vp_nxv4i8( %a, @llvm.vp.xor.nxv4i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv4i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv4i8( %not.a, %b, %mask, i32 %evl) ret %x } @@ -156,7 +156,7 @@ define @vandn_vv_vp_swapped_nxv4i8( %a, @llvm.vp.xor.nxv4i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv4i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv4i8( %b, %not.a, %mask, i32 %evl) ret %x } @@ -197,7 +197,7 @@ define @vandn_vv_vp_nxv8i8( %a, @llvm.vp.xor.nxv8i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv8i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv8i8( %not.a, %b, %mask, i32 %evl) ret %x } @@ -215,7 +215,7 @@ define @vandn_vv_vp_swapped_nxv8i8( %a, @llvm.vp.xor.nxv8i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv8i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv8i8( %b, %not.a, %mask, i32 %evl) ret %x } @@ -256,7 +256,7 @@ define @vandn_vv_vp_nxv16i8( %a, @llvm.vp.xor.nxv16i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv16i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv16i8( %not.a, %b, %mask, i32 %evl) ret %x } @@ -274,7 +274,7 @@ define @vandn_vv_vp_swapped_nxv16i8( %a, @llvm.vp.xor.nxv16i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv16i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv16i8( %b, %not.a, %mask, i32 %evl) ret %x } @@ -315,7 +315,7 @@ define @vandn_vv_vp_nxv32i8( %a, @llvm.vp.xor.nxv32i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv32i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv32i8( %not.a, %b, %mask, i32 %evl) ret %x } @@ -333,7 +333,7 @@ define @vandn_vv_vp_swapped_nxv32i8( %a, @llvm.vp.xor.nxv32i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv32i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv32i8( %b, %not.a, %mask, i32 %evl) ret %x } @@ -374,7 +374,7 @@ define @vandn_vv_vp_nxv64i8( %a, @llvm.vp.xor.nxv64i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv64i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv64i8( %not.a, %b, %mask, i32 %evl) ret %x } @@ -392,7 +392,7 @@ define @vandn_vv_vp_swapped_nxv64i8( %a, @llvm.vp.xor.nxv64i8( %a, shufflevector( insertelement( poison, i8 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv64i8( %a, splat (i8 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv64i8( %b, %not.a, %mask, i32 %evl) ret %x } @@ -433,7 +433,7 @@ define @vandn_vv_vp_nxv1i16( %a, @llvm.vp.xor.nxv1i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv1i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv1i16( %not.a, %b, %mask, i32 %evl) ret %x } @@ -451,7 +451,7 @@ define @vandn_vv_vp_swapped_nxv1i16( %a, @llvm.vp.xor.nxv1i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv1i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv1i16( %b, %not.a, %mask, i32 %evl) ret %x } @@ -492,7 +492,7 @@ define @vandn_vv_vp_nxv2i16( %a, @llvm.vp.xor.nxv2i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv2i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv2i16( %not.a, %b, %mask, i32 %evl) ret %x } @@ -510,7 +510,7 @@ define @vandn_vv_vp_swapped_nxv2i16( %a, @llvm.vp.xor.nxv2i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv2i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv2i16( %b, %not.a, %mask, i32 %evl) ret %x } @@ -551,7 +551,7 @@ define @vandn_vv_vp_nxv4i16( %a, @llvm.vp.xor.nxv4i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv4i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv4i16( %not.a, %b, %mask, i32 %evl) ret %x } @@ -569,7 +569,7 @@ define @vandn_vv_vp_swapped_nxv4i16( %a, @llvm.vp.xor.nxv4i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv4i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv4i16( %b, %not.a, %mask, i32 %evl) ret %x } @@ -610,7 +610,7 @@ define @vandn_vv_vp_nxv8i16( %a, @llvm.vp.xor.nxv8i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv8i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv8i16( %not.a, %b, %mask, i32 %evl) ret %x } @@ -628,7 +628,7 @@ define @vandn_vv_vp_swapped_nxv8i16( %a, @llvm.vp.xor.nxv8i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv8i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv8i16( %b, %not.a, %mask, i32 %evl) ret %x } @@ -669,7 +669,7 @@ define @vandn_vv_vp_nxv16i16( %a, @llvm.vp.xor.nxv16i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv16i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv16i16( %not.a, %b, %mask, i32 %evl) ret %x } @@ -687,7 +687,7 @@ define @vandn_vv_vp_swapped_nxv16i16( %a, ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8, v0.t ; CHECK-ZVKB-NEXT: ret - %not.a = call @llvm.vp.xor.nxv16i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv16i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv16i16( %b, %not.a, %mask, i32 %evl) ret %x } @@ -728,7 +728,7 @@ define @vandn_vv_vp_nxv32i16( %a, @llvm.vp.xor.nxv32i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv32i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv32i16( %not.a, %b, %mask, i32 %evl) ret %x } @@ -746,7 +746,7 @@ define @vandn_vv_vp_swapped_nxv32i16( %a, ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8, v0.t ; CHECK-ZVKB-NEXT: ret - %not.a = call @llvm.vp.xor.nxv32i16( %a, shufflevector( insertelement( poison, i16 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv32i16( %a, splat (i16 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv32i16( %b, %not.a, %mask, i32 %evl) ret %x } @@ -787,7 +787,7 @@ define @vandn_vv_vp_nxv1i32( %a, @llvm.vp.xor.nxv1i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv1i32( %a, splat (i32 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv1i32( %not.a, %b, %mask, i32 %evl) ret %x } @@ -805,7 +805,7 @@ define @vandn_vv_vp_swapped_nxv1i32( %a, @llvm.vp.xor.nxv1i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv1i32( %a, splat (i32 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv1i32( %b, %not.a, %mask, i32 %evl) ret %x } @@ -846,7 +846,7 @@ define @vandn_vv_vp_nxv2i32( %a, @llvm.vp.xor.nxv2i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv2i32( %a, splat (i32 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv2i32( %not.a, %b, %mask, i32 %evl) ret %x } @@ -864,7 +864,7 @@ define @vandn_vv_vp_swapped_nxv2i32( %a, @llvm.vp.xor.nxv2i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv2i32( %a, splat (i32 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv2i32( %b, %not.a, %mask, i32 %evl) ret %x } @@ -905,7 +905,7 @@ define @vandn_vv_vp_nxv4i32( %a, @llvm.vp.xor.nxv4i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv4i32( %a, splat (i32 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv4i32( %not.a, %b, %mask, i32 %evl) ret %x } @@ -923,7 +923,7 @@ define @vandn_vv_vp_swapped_nxv4i32( %a, @llvm.vp.xor.nxv4i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv4i32( %a, splat (i32 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv4i32( %b, %not.a, %mask, i32 %evl) ret %x } @@ -964,7 +964,7 @@ define @vandn_vv_vp_nxv8i32( %a, @llvm.vp.xor.nxv8i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv8i32( %a, splat (i32 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv8i32( %not.a, %b, %mask, i32 %evl) ret %x } @@ -982,7 +982,7 @@ define @vandn_vv_vp_swapped_nxv8i32( %a, @llvm.vp.xor.nxv8i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv8i32( %a, splat (i32 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv8i32( %b, %not.a, %mask, i32 %evl) ret %x } @@ -1023,7 +1023,7 @@ define @vandn_vv_vp_nxv16i32( %a, @llvm.vp.xor.nxv16i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv16i32( %a, splat (i32 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv16i32( %not.a, %b, %mask, i32 %evl) ret %x } @@ -1041,7 +1041,7 @@ define @vandn_vv_vp_swapped_nxv16i32( %a, ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8, v0.t ; CHECK-ZVKB-NEXT: ret - %not.a = call @llvm.vp.xor.nxv16i32( %a, shufflevector( insertelement( poison, i32 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv16i32( %a, splat (i32 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv16i32( %b, %not.a, %mask, i32 %evl) ret %x } @@ -1082,7 +1082,7 @@ define @vandn_vv_vp_nxv1i64( %a, @llvm.vp.xor.nxv1i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv1i64( %a, splat (i64 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv1i64( %not.a, %b, %mask, i32 %evl) ret %x } @@ -1100,7 +1100,7 @@ define @vandn_vv_vp_swapped_nxv1i64( %a, @llvm.vp.xor.nxv1i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv1i64( %a, splat (i64 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv1i64( %b, %not.a, %mask, i32 %evl) ret %x } @@ -1173,7 +1173,7 @@ define @vandn_vv_vp_nxv2i64( %a, @llvm.vp.xor.nxv2i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv2i64( %a, splat (i64 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv2i64( %not.a, %b, %mask, i32 %evl) ret %x } @@ -1191,7 +1191,7 @@ define @vandn_vv_vp_swapped_nxv2i64( %a, @llvm.vp.xor.nxv2i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv2i64( %a, splat (i64 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv2i64( %b, %not.a, %mask, i32 %evl) ret %x } @@ -1264,7 +1264,7 @@ define @vandn_vv_vp_nxv4i64( %a, @llvm.vp.xor.nxv4i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv4i64( %a, splat (i64 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv4i64( %not.a, %b, %mask, i32 %evl) ret %x } @@ -1282,7 +1282,7 @@ define @vandn_vv_vp_swapped_nxv4i64( %a, @llvm.vp.xor.nxv4i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv4i64( %a, splat (i64 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv4i64( %b, %not.a, %mask, i32 %evl) ret %x } @@ -1355,7 +1355,7 @@ define @vandn_vv_vp_nxv8i64( %a, @llvm.vp.xor.nxv8i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv8i64( %a, splat (i64 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv8i64( %not.a, %b, %mask, i32 %evl) ret %x } @@ -1373,7 +1373,7 @@ define @vandn_vv_vp_swapped_nxv8i64( %a, @llvm.vp.xor.nxv8i64( %a, shufflevector( insertelement( poison, i64 -1, i32 0), poison, zeroinitializer), %mask, i32 %evl) + %not.a = call @llvm.vp.xor.nxv8i64( %a, splat (i64 -1), %mask, i32 %evl) %x = call @llvm.vp.and.nxv8i64( %b, %not.a, %mask, i32 %evl) ret %x } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll index 4440ea56ba9017..5cfa98916a2de0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll @@ -24,7 +24,7 @@ define @vfpext_nxv2f16_nxv2f32_unmasked( ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.fpext.nxv2f32.nxv2f16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.fpext.nxv2f32.nxv2f16( %a, splat (i1 true), i32 %vl) ret %v } @@ -50,7 +50,7 @@ define @vfpext_nxv2f16_nxv2f64_unmasked( @llvm.vp.fpext.nxv2f64.nxv2f16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.fpext.nxv2f64.nxv2f16( %a, splat (i1 true), i32 %vl) ret %v } @@ -74,7 +74,7 @@ define @vfpext_nxv2f32_nxv2f64_unmasked( @llvm.vp.fpext.nxv2f64.nxv2f32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.fpext.nxv2f64.nxv2f32( %a, splat (i1 true), i32 %vl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll index 9061c38975e283..e5048eaf9d0c23 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll @@ -42,7 +42,7 @@ define @vfptosi_nxv2i1_nxv2f16_unmasked( %v ; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9 ; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i1.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i1.nxv2f16( %va, splat (i1 true), i32 %evl) ret %v } @@ -66,7 +66,7 @@ define @vfptosi_nxv2i1_nxv2f32_unmasked( % ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i1.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i1.nxv2f32( %va, splat (i1 true), i32 %evl) ret %v } @@ -91,6 +91,6 @@ define @vfptosi_nxv2i1_nxv2f64_unmasked( ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i1.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i1.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll index 9e7d6f92d84e93..15c4bf255e6dce 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -67,7 +67,7 @@ define @vfptosi_nxv2i8_nxv2f16_unmasked( %v ; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i8.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f16( %va, splat (i1 true), i32 %evl) ret %v } @@ -105,7 +105,7 @@ define @vfptosi_nxv2i16_nxv2f16_unmasked( ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i16.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f16( %va, splat (i1 true), i32 %evl) ret %v } @@ -145,7 +145,7 @@ define @vfptosi_nxv2i32_nxv2f16_unmasked( ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i32.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f16( %va, splat (i1 true), i32 %evl) ret %v } @@ -187,7 +187,7 @@ define @vfptosi_nxv2i64_nxv2f16_unmasked( ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; ZVFHMIN-NEXT: vfwcvt.rtz.x.f.v v8, v10 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i64.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f16( %va, splat (i1 true), i32 %evl) ret %v } @@ -213,7 +213,7 @@ define @vfptosi_nxv2i8_nxv2f32_unmasked( % ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i8.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f32( %va, splat (i1 true), i32 %evl) ret %v } @@ -237,7 +237,7 @@ define @vfptosi_nxv2i16_nxv2f32_unmasked( ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i16.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f32( %va, splat (i1 true), i32 %evl) ret %v } @@ -259,7 +259,7 @@ define @vfptosi_nxv2i32_nxv2f32_unmasked( ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i32.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f32( %va, splat (i1 true), i32 %evl) ret %v } @@ -283,7 +283,7 @@ define @vfptosi_nxv2i64_nxv2f32_unmasked( ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i64.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f32( %va, splat (i1 true), i32 %evl) ret %v } @@ -313,7 +313,7 @@ define @vfptosi_nxv2i8_nxv2f64_unmasked( ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptosi.nxv2i8.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i8.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -339,7 +339,7 @@ define @vfptosi_nxv2i16_nxv2f64_unmasked( @llvm.vp.fptosi.nxv2i16.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i16.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -363,7 +363,7 @@ define @vfptosi_nxv2i32_nxv2f64_unmasked( @llvm.vp.fptosi.nxv2i32.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i32.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -385,7 +385,7 @@ define @vfptosi_nxv2i64_nxv2f64_unmasked( @llvm.vp.fptosi.nxv2i64.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv2i64.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -466,6 +466,6 @@ define @vfptosi_nxv32i32_nxv32f32_unmasked( @llvm.vp.fptosi.nxv32i32.nxv32f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptosi.nxv32i32.nxv32f32( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll index 6646171fcd15eb..4b609d07c1e7ae 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll @@ -42,7 +42,7 @@ define @vfptoui_nxv2i1_nxv2f16_unmasked( %v ; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9 ; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i1.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i1.nxv2f16( %va, splat (i1 true), i32 %evl) ret %v } @@ -66,7 +66,7 @@ define @vfptoui_nxv2i1_nxv2f32_unmasked( % ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i1.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i1.nxv2f32( %va, splat (i1 true), i32 %evl) ret %v } @@ -91,6 +91,6 @@ define @vfptoui_nxv2i1_nxv2f64_unmasked( ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i1.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i1.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll index 486efbe66a6fea..a2591e7dc35f03 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll @@ -67,7 +67,7 @@ define @vfptoui_nxv2i8_nxv2f16_unmasked( %v ; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i8.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i8.nxv2f16( %va, splat (i1 true), i32 %evl) ret %v } @@ -105,7 +105,7 @@ define @vfptoui_nxv2i16_nxv2f16_unmasked( ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i16.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i16.nxv2f16( %va, splat (i1 true), i32 %evl) ret %v } @@ -145,7 +145,7 @@ define @vfptoui_nxv2i32_nxv2f16_unmasked( ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i32.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i32.nxv2f16( %va, splat (i1 true), i32 %evl) ret %v } @@ -187,7 +187,7 @@ define @vfptoui_nxv2i64_nxv2f16_unmasked( ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; ZVFHMIN-NEXT: vfwcvt.rtz.xu.f.v v8, v10 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i64.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i64.nxv2f16( %va, splat (i1 true), i32 %evl) ret %v } @@ -213,7 +213,7 @@ define @vfptoui_nxv2i8_nxv2f32_unmasked( % ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i8.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i8.nxv2f32( %va, splat (i1 true), i32 %evl) ret %v } @@ -237,7 +237,7 @@ define @vfptoui_nxv2i16_nxv2f32_unmasked( ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i16.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i16.nxv2f32( %va, splat (i1 true), i32 %evl) ret %v } @@ -259,7 +259,7 @@ define @vfptoui_nxv2i32_nxv2f32_unmasked( ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i32.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i32.nxv2f32( %va, splat (i1 true), i32 %evl) ret %v } @@ -283,7 +283,7 @@ define @vfptoui_nxv2i64_nxv2f32_unmasked( ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i64.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i64.nxv2f32( %va, splat (i1 true), i32 %evl) ret %v } @@ -313,7 +313,7 @@ define @vfptoui_nxv2i8_nxv2f64_unmasked( ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.fptoui.nxv2i8.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i8.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -339,7 +339,7 @@ define @vfptoui_nxv2i16_nxv2f64_unmasked( @llvm.vp.fptoui.nxv2i16.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i16.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -363,7 +363,7 @@ define @vfptoui_nxv2i32_nxv2f64_unmasked( @llvm.vp.fptoui.nxv2i32.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i32.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -385,7 +385,7 @@ define @vfptoui_nxv2i64_nxv2f64_unmasked( @llvm.vp.fptoui.nxv2i64.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv2i64.nxv2f64( %va, splat (i1 true), i32 %evl) ret %v } @@ -466,6 +466,6 @@ define @vfptoui_nxv32i32_nxv32f32_unmasked( @llvm.vp.fptoui.nxv32i32.nxv32f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.fptoui.nxv32i32.nxv32f32( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll index c6554561be3395..4e84a31d71b512 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -24,7 +24,7 @@ define @vfptrunc_nxv2f16_nxv2f32_unmasked( @llvm.vp.fptrunc.nxv2f16.nxv2f32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.fptrunc.nxv2f16.nxv2f32( %a, splat (i1 true), i32 %vl) ret %v } @@ -50,7 +50,7 @@ define @vfptrunc_nxv2f16_nxv2f64_unmasked( @llvm.vp.fptrunc.nxv2f16.nxv2f64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.fptrunc.nxv2f16.nxv2f64( %a, splat (i1 true), i32 %vl) ret %v } @@ -74,7 +74,7 @@ define @vfptrunc_nxv2f32_nxv2f64_unmasked( @llvm.vp.fptrunc.nxv2f64.nxv2f32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.fptrunc.nxv2f64.nxv2f32( %a, splat (i1 true), i32 %vl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll index 64887da78cb7fd..1ef0ed858d80a9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-vp.ll @@ -22,8 +22,8 @@ define @vfwadd_same_operand( %arg, i32 s ; ZVFHMIN-NEXT: vfadd.vv v8, v9, v9 ; ZVFHMIN-NEXT: ret bb: - %tmp = call @llvm.vp.fpext.nxv2f32.nxv2f16( %arg, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), i32 %vl) - %tmp2 = call @llvm.vp.fadd.nxv2f32( %tmp, %tmp, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), i32 %vl) + %tmp = call @llvm.vp.fpext.nxv2f32.nxv2f16( %arg, splat (i1 true), i32 %vl) + %tmp2 = call @llvm.vp.fadd.nxv2f32( %tmp, %tmp, splat (i1 true), i32 %vl) ret %tmp2 } @@ -48,9 +48,9 @@ define @vfwadd_tu( %arg, @llvm.vp.fpext.nxv2f32.nxv2f16( %arg, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), i32 %arg2) - %tmp3 = call @llvm.vp.fadd.nxv2f32( %arg1, %tmp, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), i32 %arg2) - %tmp4 = call @llvm.vp.merge.nxv2f32( shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), %tmp3, %arg1, i32 %arg2) + %tmp = call @llvm.vp.fpext.nxv2f32.nxv2f16( %arg, splat (i1 true), i32 %arg2) + %tmp3 = call @llvm.vp.fadd.nxv2f32( %arg1, %tmp, splat (i1 true), i32 %arg2) + %tmp4 = call @llvm.vp.merge.nxv2f32( splat (i1 true), %tmp3, %arg1, i32 %arg2) ret %tmp4 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll index 07536407ace8d9..e1988c058fac34 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll @@ -597,7 +597,7 @@ define @combine_mul_add_imm1( %a, %a, shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) + %x = add %a, splat (i32 1) %y = mul %x, %b ret %y } @@ -608,7 +608,7 @@ define @combine_mul_add_imm1_2( %a, %a, shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) + %x = add %a, splat (i32 1) %y = mul %b, %x ret %y } diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll index a3c896ecca22a6..186ffb64e59025 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll @@ -597,7 +597,7 @@ define @combine_mul_sub_imm1( %a, shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer), %a + %x = sub splat (i32 1), %a %y = mul %x, %b ret %y } @@ -608,7 +608,7 @@ define @combine_mul_sub_imm1_2( %a, shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer), %a + %x = sub splat (i32 1), %a %y = mul %b, %x ret %y } diff --git a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll index b8a091b242591f..16abf2bd28accd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll @@ -66,7 +66,7 @@ define @vror_vi_nxv1i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv1i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv1i8( %a, %a, splat (i8 1)) ret %x } @@ -84,7 +84,7 @@ define @vror_vi_rotl_nxv1i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 7 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv1i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv1i8( %a, %a, splat (i8 1)) ret %x } @@ -150,7 +150,7 @@ define @vror_vi_nxv2i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv2i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv2i8( %a, %a, splat (i8 1)) ret %x } @@ -168,7 +168,7 @@ define @vror_vi_rotl_nxv2i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 7 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv2i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv2i8( %a, %a, splat (i8 1)) ret %x } @@ -234,7 +234,7 @@ define @vror_vi_nxv4i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv4i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv4i8( %a, %a, splat (i8 1)) ret %x } @@ -252,7 +252,7 @@ define @vror_vi_rotl_nxv4i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 7 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv4i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv4i8( %a, %a, splat (i8 1)) ret %x } @@ -318,7 +318,7 @@ define @vror_vi_nxv8i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv8i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv8i8( %a, %a, splat (i8 1)) ret %x } @@ -336,7 +336,7 @@ define @vror_vi_rotl_nxv8i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 7 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv8i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv8i8( %a, %a, splat (i8 1)) ret %x } @@ -402,7 +402,7 @@ define @vror_vi_nxv16i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv16i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv16i8( %a, %a, splat (i8 1)) ret %x } @@ -420,7 +420,7 @@ define @vror_vi_rotl_nxv16i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 7 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv16i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv16i8( %a, %a, splat (i8 1)) ret %x } @@ -486,7 +486,7 @@ define @vror_vi_nxv32i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv32i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv32i8( %a, %a, splat (i8 1)) ret %x } @@ -504,7 +504,7 @@ define @vror_vi_rotl_nxv32i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 7 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv32i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv32i8( %a, %a, splat (i8 1)) ret %x } @@ -570,7 +570,7 @@ define @vror_vi_nxv64i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv64i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv64i8( %a, %a, splat (i8 1)) ret %x } @@ -588,7 +588,7 @@ define @vror_vi_rotl_nxv64i8( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 7 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv64i8( %a, %a, shufflevector( insertelement( poison, i8 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv64i8( %a, %a, splat (i8 1)) ret %x } @@ -654,7 +654,7 @@ define @vror_vi_nxv1i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv1i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv1i16( %a, %a, splat (i16 1)) ret %x } @@ -672,7 +672,7 @@ define @vror_vi_rotl_nxv1i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 15 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv1i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv1i16( %a, %a, splat (i16 1)) ret %x } @@ -738,7 +738,7 @@ define @vror_vi_nxv2i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv2i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv2i16( %a, %a, splat (i16 1)) ret %x } @@ -756,7 +756,7 @@ define @vror_vi_rotl_nxv2i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 15 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv2i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv2i16( %a, %a, splat (i16 1)) ret %x } @@ -822,7 +822,7 @@ define @vror_vi_nxv4i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv4i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv4i16( %a, %a, splat (i16 1)) ret %x } @@ -840,7 +840,7 @@ define @vror_vi_rotl_nxv4i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 15 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv4i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv4i16( %a, %a, splat (i16 1)) ret %x } @@ -906,7 +906,7 @@ define @vror_vi_nxv8i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv8i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv8i16( %a, %a, splat (i16 1)) ret %x } @@ -924,7 +924,7 @@ define @vror_vi_rotl_nxv8i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 15 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv8i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv8i16( %a, %a, splat (i16 1)) ret %x } @@ -990,7 +990,7 @@ define @vror_vi_nxv16i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv16i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv16i16( %a, %a, splat (i16 1)) ret %x } @@ -1008,7 +1008,7 @@ define @vror_vi_rotl_nxv16i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 15 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv16i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv16i16( %a, %a, splat (i16 1)) ret %x } @@ -1074,7 +1074,7 @@ define @vror_vi_nxv32i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv32i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv32i16( %a, %a, splat (i16 1)) ret %x } @@ -1092,7 +1092,7 @@ define @vror_vi_rotl_nxv32i16( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 15 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv32i16( %a, %a, shufflevector( insertelement( poison, i16 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv32i16( %a, %a, splat (i16 1)) ret %x } @@ -1171,7 +1171,7 @@ define @vror_vi_nxv1i32( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv1i32( %a, %a, shufflevector( insertelement( poison, i32 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv1i32( %a, %a, splat (i32 1)) ret %x } @@ -1189,7 +1189,7 @@ define @vror_vi_rotl_nxv1i32( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 31 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv1i32( %a, %a, shufflevector( insertelement( poison, i32 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv1i32( %a, %a, splat (i32 1)) ret %x } @@ -1268,7 +1268,7 @@ define @vror_vi_nxv2i32( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv2i32( %a, %a, shufflevector( insertelement( poison, i32 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv2i32( %a, %a, splat (i32 1)) ret %x } @@ -1286,7 +1286,7 @@ define @vror_vi_rotl_nxv2i32( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 31 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv2i32( %a, %a, shufflevector( insertelement( poison, i32 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv2i32( %a, %a, splat (i32 1)) ret %x } @@ -1365,7 +1365,7 @@ define @vror_vi_nxv4i32( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv4i32( %a, %a, shufflevector( insertelement( poison, i32 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv4i32( %a, %a, splat (i32 1)) ret %x } @@ -1383,7 +1383,7 @@ define @vror_vi_rotl_nxv4i32( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 31 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv4i32( %a, %a, shufflevector( insertelement( poison, i32 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv4i32( %a, %a, splat (i32 1)) ret %x } @@ -1462,7 +1462,7 @@ define @vror_vi_nxv8i32( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv8i32( %a, %a, shufflevector( insertelement( poison, i32 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv8i32( %a, %a, splat (i32 1)) ret %x } @@ -1480,7 +1480,7 @@ define @vror_vi_rotl_nxv8i32( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 31 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv8i32( %a, %a, shufflevector( insertelement( poison, i32 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv8i32( %a, %a, splat (i32 1)) ret %x } @@ -1559,7 +1559,7 @@ define @vror_vi_nxv16i32( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv16i32( %a, %a, shufflevector( insertelement( poison, i32 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv16i32( %a, %a, splat (i32 1)) ret %x } @@ -1577,7 +1577,7 @@ define @vror_vi_rotl_nxv16i32( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 31 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv16i32( %a, %a, shufflevector( insertelement( poison, i32 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv16i32( %a, %a, splat (i32 1)) ret %x } @@ -1657,7 +1657,7 @@ define @vror_vi_nxv1i64( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv1i64( %a, %a, shufflevector( insertelement( poison, i64 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv1i64( %a, %a, splat (i64 1)) ret %x } @@ -1676,7 +1676,7 @@ define @vror_vi_rotl_nxv1i64( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 63 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv1i64( %a, %a, shufflevector( insertelement( poison, i64 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv1i64( %a, %a, splat (i64 1)) ret %x } @@ -1756,7 +1756,7 @@ define @vror_vi_nxv2i64( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv2i64( %a, %a, shufflevector( insertelement( poison, i64 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv2i64( %a, %a, splat (i64 1)) ret %x } @@ -1775,7 +1775,7 @@ define @vror_vi_rotl_nxv2i64( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 63 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv2i64( %a, %a, shufflevector( insertelement( poison, i64 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv2i64( %a, %a, splat (i64 1)) ret %x } @@ -1855,7 +1855,7 @@ define @vror_vi_nxv4i64( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv4i64( %a, %a, shufflevector( insertelement( poison, i64 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv4i64( %a, %a, splat (i64 1)) ret %x } @@ -1874,7 +1874,7 @@ define @vror_vi_rotl_nxv4i64( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 63 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv4i64( %a, %a, shufflevector( insertelement( poison, i64 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv4i64( %a, %a, splat (i64 1)) ret %x } @@ -1954,7 +1954,7 @@ define @vror_vi_nxv8i64( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 1 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshr.nxv8i64( %a, %a, shufflevector( insertelement( poison, i64 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshr.nxv8i64( %a, %a, splat (i64 1)) ret %x } @@ -1973,6 +1973,6 @@ define @vror_vi_rotl_nxv8i64( %a) { ; CHECK-ZVKB-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-ZVKB-NEXT: vror.vi v8, v8, 63 ; CHECK-ZVKB-NEXT: ret - %x = call @llvm.fshl.nxv8i64( %a, %a, shufflevector( insertelement( poison, i64 1, i32 0), poison, zeroinitializer)) + %x = call @llvm.fshl.nxv8i64( %a, %a, splat (i64 1)) ret %x } diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll index 1b568bf8801b10..0d52dd794fd56f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll @@ -762,7 +762,7 @@ define @select_one( %x, %y, ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmorn.mm v0, v8, v0 ; CHECK-NEXT: ret - %a = call @llvm.vp.select.nxv2i1( %x, %y, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %a = call @llvm.vp.select.nxv2i1( %x, %y, splat (i1 true), i32 %evl) ret %a } @@ -782,7 +782,7 @@ define @select_x_one( %x, % ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmorn.mm v0, v8, v0 ; CHECK-NEXT: ret - %a = call @llvm.vp.select.nxv2i1( %x, %y, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %a = call @llvm.vp.select.nxv2i1( %x, %y, splat (i1 true), i32 %evl) ret %a } @@ -802,7 +802,7 @@ define @select_one_x( %x, % ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret - %a = call @llvm.vp.select.nxv2i1( %x, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), %y, i32 %evl) + %a = call @llvm.vp.select.nxv2i1( %x, splat (i1 true), %y, i32 %evl) ret %a } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll index 66f9e8dc9c5fc2..04aed5d81db99c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll @@ -22,7 +22,7 @@ define @vsext_nxv2i1_nxv2i16_unmasked( %a, i ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret - %v = call @llvm.vp.sext.nxv2i16.nxv2i1( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.sext.nxv2i16.nxv2i1( %a, splat (i1 true), i32 %vl) ret %v } @@ -46,7 +46,7 @@ define @vsext_nxv2i1_nxv2i32_unmasked( %a, i ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret - %v = call @llvm.vp.sext.nxv2i32.nxv2i1( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.sext.nxv2i32.nxv2i1( %a, splat (i1 true), i32 %vl) ret %v } @@ -70,6 +70,6 @@ define @vsext_nxv2i1_nxv2i64_unmasked( %a, i ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret - %v = call @llvm.vp.sext.nxv2i64.nxv2i1( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.sext.nxv2i64.nxv2i1( %a, splat (i1 true), i32 %vl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll index 8aaa74c8e21fc6..834e7dd85aea06 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll @@ -22,7 +22,7 @@ define @vsext_nxv2i8_nxv2i16_unmasked( %a, i ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.sext.nxv2i16.nxv2i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.sext.nxv2i16.nxv2i8( %a, splat (i1 true), i32 %vl) ret %v } @@ -46,7 +46,7 @@ define @vsext_nxv2i8_nxv2i32_unmasked( %a, i ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.sext.nxv2i32.nxv2i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.sext.nxv2i32.nxv2i8( %a, splat (i1 true), i32 %vl) ret %v } @@ -70,7 +70,7 @@ define @vsext_nxv2i8_nxv2i64_unmasked( %a, i ; CHECK-NEXT: vsext.vf8 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.sext.nxv2i64.nxv2i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.sext.nxv2i64.nxv2i8( %a, splat (i1 true), i32 %vl) ret %v } @@ -94,7 +94,7 @@ define @vsext_nxv2i16_nxv2i32_unmasked( %a, ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.sext.nxv2i32.nxv2i16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.sext.nxv2i32.nxv2i16( %a, splat (i1 true), i32 %vl) ret %v } @@ -118,7 +118,7 @@ define @vsext_nxv2i16_nxv2i64_unmasked( %a, ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.sext.nxv2i64.nxv2i16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.sext.nxv2i64.nxv2i16( %a, splat (i1 true), i32 %vl) ret %v } @@ -142,7 +142,7 @@ define @vsext_nxv2i32_nxv2i64_unmasked( %a, ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.sext.nxv2i64.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.sext.nxv2i64.nxv2i32( %a, splat (i1 true), i32 %vl) ret %v } @@ -195,6 +195,6 @@ define @vsext_nxv32i8_nxv32i32_unmasked( % ; CHECK-NEXT: vsext.vf4 v24, v8 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret - %v = call @llvm.vp.sext.nxv32i32.nxv32i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.sext.nxv32i32.nxv32i8( %a, splat (i1 true), i32 %vl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll index 43451f446b373e..6e09ceefb72920 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll @@ -25,7 +25,7 @@ define @vsitofp_nxv2f16_nxv2i1_unmasked( %v ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f16.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i1( %va, splat (i1 true), i32 %evl) ret %v } @@ -52,7 +52,7 @@ define @vsitofp_nxv2f32_nxv2i1_unmasked( % ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f32.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i1( %va, splat (i1 true), i32 %evl) ret %v } @@ -79,6 +79,6 @@ define @vsitofp_nxv2f64_nxv2i1_unmasked( ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f64.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i1( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll index 62848ea2279a30..016a43784733d0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -69,7 +69,7 @@ define @vsitofp_nxv2f16_nxv2i8_unmasked( %v ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f16.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i8( %va, splat (i1 true), i32 %evl) ret %v } @@ -107,7 +107,7 @@ define @vsitofp_nxv2f16_nxv2i16_unmasked( ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f16.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i16( %va, splat (i1 true), i32 %evl) ret %v } @@ -147,7 +147,7 @@ define @vsitofp_nxv2f16_nxv2i32_unmasked( ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f16.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i32( %va, splat (i1 true), i32 %evl) ret %v } @@ -189,7 +189,7 @@ define @vsitofp_nxv2f16_nxv2i64_unmasked( ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f16.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f16.nxv2i64( %va, splat (i1 true), i32 %evl) ret %v } @@ -213,7 +213,7 @@ define @vsitofp_nxv2f32_nxv2i8_unmasked( % ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f32.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i8( %va, splat (i1 true), i32 %evl) ret %v } @@ -237,7 +237,7 @@ define @vsitofp_nxv2f32_nxv2i16_unmasked( ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f32.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i16( %va, splat (i1 true), i32 %evl) ret %v } @@ -259,7 +259,7 @@ define @vsitofp_nxv2f32_nxv2i32_unmasked( ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f32.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i32( %va, splat (i1 true), i32 %evl) ret %v } @@ -283,7 +283,7 @@ define @vsitofp_nxv2f32_nxv2i64_unmasked( ; CHECK-NEXT: vfncvt.f.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f32.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f32.nxv2i64( %va, splat (i1 true), i32 %evl) ret %v } @@ -307,7 +307,7 @@ define @vsitofp_nxv2f64_nxv2i8_unmasked( ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.sitofp.nxv2f64.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i8( %va, splat (i1 true), i32 %evl) ret %v } @@ -331,7 +331,7 @@ define @vsitofp_nxv2f64_nxv2i16_unmasked( @llvm.vp.sitofp.nxv2f64.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i16( %va, splat (i1 true), i32 %evl) ret %v } @@ -355,7 +355,7 @@ define @vsitofp_nxv2f64_nxv2i32_unmasked( @llvm.vp.sitofp.nxv2f64.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i32( %va, splat (i1 true), i32 %evl) ret %v } @@ -377,7 +377,7 @@ define @vsitofp_nxv2f64_nxv2i64_unmasked( @llvm.vp.sitofp.nxv2f64.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv2f64.nxv2i64( %va, splat (i1 true), i32 %evl) ret %v } @@ -486,6 +486,6 @@ define @vsitofp_nxv32f32_nxv32i32_unmasked( @llvm.vp.sitofp.nxv32f32.nxv32i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.sitofp.nxv32f32.nxv32i32( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll index 4eb80e36001aa9..ad8097631acd33 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll @@ -22,7 +22,7 @@ define @vtrunc_nxv2i1_nxv2i16_unmasked( %a, ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i1.nxv2i16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i1.nxv2i16( %a, splat (i1 true), i32 %vl) ret %v } @@ -46,7 +46,7 @@ define @vtrunc_nxv2i1_nxv2i32_unmasked( %a, ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i1.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i1.nxv2i32( %a, splat (i1 true), i32 %vl) ret %v } @@ -71,6 +71,6 @@ define @vtrunc_nxv2i1_nxv2i64_unmasked( %a, ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i1.nxv2i64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i1.nxv2i64( %a, splat (i1 true), i32 %vl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll index a624a42b3873bb..a7b4d6616b7b5d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -44,7 +44,7 @@ define @vtrunc_nxv2i8_nxv2i16_unmasked( %a, ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i8.nxv2i16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i8.nxv2i16( %a, splat (i1 true), i32 %vl) ret %v } @@ -70,7 +70,7 @@ define @vtrunc_nxv2i8_nxv2i32_unmasked( %a, ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i8.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i8.nxv2i32( %a, splat (i1 true), i32 %vl) ret %v } @@ -100,7 +100,7 @@ define @vtrunc_nxv2i8_nxv2i64_unmasked( %a, ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i8.nxv2i64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i8.nxv2i64( %a, splat (i1 true), i32 %vl) ret %v } @@ -122,7 +122,7 @@ define @vtrunc_nxv2i16_nxv2i32_unmasked( %a ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i16.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i16.nxv2i32( %a, splat (i1 true), i32 %vl) ret %v } @@ -148,7 +148,7 @@ define @vtrunc_nxv2i16_nxv2i64_unmasked( %a ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i16.nxv2i64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i16.nxv2i64( %a, splat (i1 true), i32 %vl) ret %v } @@ -205,7 +205,7 @@ define @vtrunc_nxv2i32_nxv2i64_unmasked( %a ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i32.nxv2i64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i32.nxv2i64( %a, splat (i1 true), i32 %vl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll index 128bb80971ac7e..cf4bb161ea75b0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll @@ -25,7 +25,7 @@ define @vuitofp_nxv2f16_nxv2i1_unmasked( %v ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f16.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f16.nxv2i1( %va, splat (i1 true), i32 %evl) ret %v } @@ -52,7 +52,7 @@ define @vuitofp_nxv2f32_nxv2i1_unmasked( % ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f32.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f32.nxv2i1( %va, splat (i1 true), i32 %evl) ret %v } @@ -79,6 +79,6 @@ define @vuitofp_nxv2f64_nxv2i1_unmasked( ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f64.nxv2i1( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f64.nxv2i1( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll index 8ca27484d69fcb..668d9373b81d3b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll @@ -69,7 +69,7 @@ define @vuitofp_nxv2f16_nxv2i8_unmasked( %v ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f16.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f16.nxv2i8( %va, splat (i1 true), i32 %evl) ret %v } @@ -107,7 +107,7 @@ define @vuitofp_nxv2f16_nxv2i16_unmasked( ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f16.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f16.nxv2i16( %va, splat (i1 true), i32 %evl) ret %v } @@ -147,7 +147,7 @@ define @vuitofp_nxv2f16_nxv2i32_unmasked( ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f16.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f16.nxv2i32( %va, splat (i1 true), i32 %evl) ret %v } @@ -189,7 +189,7 @@ define @vuitofp_nxv2f16_nxv2i64_unmasked( ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f16.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f16.nxv2i64( %va, splat (i1 true), i32 %evl) ret %v } @@ -213,7 +213,7 @@ define @vuitofp_nxv2f32_nxv2i8_unmasked( % ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f32.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f32.nxv2i8( %va, splat (i1 true), i32 %evl) ret %v } @@ -237,7 +237,7 @@ define @vuitofp_nxv2f32_nxv2i16_unmasked( ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f32.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f32.nxv2i16( %va, splat (i1 true), i32 %evl) ret %v } @@ -259,7 +259,7 @@ define @vuitofp_nxv2f32_nxv2i32_unmasked( ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f32.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f32.nxv2i32( %va, splat (i1 true), i32 %evl) ret %v } @@ -283,7 +283,7 @@ define @vuitofp_nxv2f32_nxv2i64_unmasked( ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f32.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f32.nxv2i64( %va, splat (i1 true), i32 %evl) ret %v } @@ -307,7 +307,7 @@ define @vuitofp_nxv2f64_nxv2i8_unmasked( ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.uitofp.nxv2f64.nxv2i8( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f64.nxv2i8( %va, splat (i1 true), i32 %evl) ret %v } @@ -331,7 +331,7 @@ define @vuitofp_nxv2f64_nxv2i16_unmasked( @llvm.vp.uitofp.nxv2f64.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f64.nxv2i16( %va, splat (i1 true), i32 %evl) ret %v } @@ -355,7 +355,7 @@ define @vuitofp_nxv2f64_nxv2i32_unmasked( @llvm.vp.uitofp.nxv2f64.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f64.nxv2i32( %va, splat (i1 true), i32 %evl) ret %v } @@ -377,7 +377,7 @@ define @vuitofp_nxv2f64_nxv2i64_unmasked( @llvm.vp.uitofp.nxv2f64.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv2f64.nxv2i64( %va, splat (i1 true), i32 %evl) ret %v } @@ -486,6 +486,6 @@ define @vuitofp_nxv32f32_nxv32i32_unmasked( @llvm.vp.uitofp.nxv32f32.nxv32i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) + %v = call @llvm.vp.uitofp.nxv32f32.nxv32i32( %va, splat (i1 true), i32 %evl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-mask-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-mask-sdnode.ll index ad7ad991e082c8..02af09f028fc12 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-mask-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-mask-sdnode.ll @@ -12,7 +12,7 @@ define @vwadd_wv_mask_v8i32( %x, %x, shufflevector ( insertelement ( poison, i32 42, i64 0), poison, zeroinitializer) + %mask = icmp slt %x, splat (i32 42) %a = select %mask, %x, zeroinitializer %sa = sext %a to %ret = add %sa, %y @@ -29,7 +29,7 @@ define @vwaddu_wv_mask_v8i32( %x, %x, shufflevector ( insertelement ( poison, i32 42, i64 0), poison, zeroinitializer) + %mask = icmp slt %x, splat (i32 42) %a = select %mask, %x, zeroinitializer %sa = zext %a to %ret = add %sa, %y @@ -47,7 +47,7 @@ define @vwaddu_vv_mask_v8i32( %x, %x, shufflevector ( insertelement ( poison, i32 42, i64 0), poison, zeroinitializer) + %mask = icmp slt %x, splat (i32 42) %a = select %mask, %x, zeroinitializer %sa = zext %a to %sy = zext %y to @@ -65,7 +65,7 @@ define @vwadd_wv_mask_v8i32_commutative( %x ; CHECK-NEXT: vwadd.wv v16, v16, v8, v0.t ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret - %mask = icmp slt %x, shufflevector ( insertelement ( poison, i32 42, i64 0), poison, zeroinitializer) + %mask = icmp slt %x, splat (i32 42) %a = select %mask, %x, zeroinitializer %sa = sext %a to %ret = add %y, %sa @@ -82,8 +82,8 @@ define @vwadd_wv_mask_v8i32_nonzero( %x, %x, shufflevector ( insertelement ( poison, i32 42, i64 0), poison, zeroinitializer) - %a = select %mask, %x, shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) + %mask = icmp slt %x, splat (i32 42) + %a = select %mask, %x, splat (i32 1) %sa = sext %a to %ret = add %sa, %y ret %ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll index c3ffee6969d702..a0b7726d3cb5e6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-vp.ll @@ -13,9 +13,9 @@ define @vwadd_tu( %arg, % ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret bb: - %tmp = call @llvm.vp.sext.nxv2i32.nxv2i8( %arg, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), i32 %arg2) - %tmp3 = call @llvm.vp.add.nxv2i32( %arg1, %tmp, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), i32 %arg2) - %tmp4 = call @llvm.vp.merge.nxv2i32( shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), %tmp3, %arg1, i32 %arg2) + %tmp = call @llvm.vp.sext.nxv2i32.nxv2i8( %arg, splat (i1 true), i32 %arg2) + %tmp3 = call @llvm.vp.add.nxv2i32( %arg1, %tmp, splat (i1 true), i32 %arg2) + %tmp4 = call @llvm.vp.merge.nxv2i32( splat (i1 true), %tmp3, %arg1, i32 %arg2) ret %tmp4 } @@ -31,9 +31,9 @@ define @vwaddu_tu( %arg, ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret bb: - %tmp = call @llvm.vp.zext.nxv2i32.nxv2i8( %arg, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), i32 %arg2) - %tmp3 = call @llvm.vp.add.nxv2i32( %arg1, %tmp, shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), i32 %arg2) - %tmp4 = call @llvm.vp.merge.nxv2i32( shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer), %tmp3, %arg1, i32 %arg2) + %tmp = call @llvm.vp.zext.nxv2i32.nxv2i8( %arg, splat (i1 true), i32 %arg2) + %tmp3 = call @llvm.vp.add.nxv2i32( %arg1, %tmp, splat (i1 true), i32 %arg2) + %tmp4 = call @llvm.vp.merge.nxv2i32( splat (i1 true), %tmp3, %arg1, i32 %arg2) ret %tmp4 } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll index 8a0af38f724c4c..770bb566c764cd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll @@ -236,7 +236,7 @@ define @vwsll_vi_nxv2i64( %a) { ; CHECK-ZVBB-NEXT: vmv2r.v v8, v10 ; CHECK-ZVBB-NEXT: ret %x = zext %a to - %z = shl %x, shufflevector( insertelement( poison, i64 2, i32 0), poison, zeroinitializer) + %z = shl %x, splat (i64 2) ret %z } @@ -444,7 +444,7 @@ define @vwsll_vi_nxv4i32( %a) { ; CHECK-ZVBB-NEXT: vmv2r.v v8, v10 ; CHECK-ZVBB-NEXT: ret %x = zext %a to - %z = shl %x, shufflevector( insertelement( poison, i32 2, i32 0), poison, zeroinitializer) + %z = shl %x, splat (i32 2) ret %z } @@ -624,6 +624,6 @@ define @vwsll_vi_nxv8i16( %a) { ; CHECK-ZVBB-NEXT: vmv2r.v v8, v10 ; CHECK-ZVBB-NEXT: ret %x = zext %a to - %z = shl %x, shufflevector( insertelement( poison, i16 2, i32 0), poison, zeroinitializer) + %z = shl %x, splat (i16 2) ret %z } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll index e7ac8ee1756415..bb3076b3a945e8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll @@ -263,7 +263,7 @@ define @vwsll_vi_nxv2i64( %a, %a to - %z = call @llvm.vp.shl.nxv2i64( %x, shufflevector( insertelement( poison, i64 2, i32 0), poison, zeroinitializer), %m, i32 %vl) + %z = call @llvm.vp.shl.nxv2i64( %x, splat (i64 2), %m, i32 %vl) ret %z } @@ -497,7 +497,7 @@ define @vwsll_vi_nxv4i32( %a, %a to - %z = call @llvm.vp.shl.nxv4i32( %x, shufflevector( insertelement( poison, i32 2, i32 0), poison, zeroinitializer), %m, i32 %vl) + %z = call @llvm.vp.shl.nxv4i32( %x, splat (i32 2), %m, i32 %vl) ret %z } @@ -703,6 +703,6 @@ define @vwsll_vi_nxv8i16( %a, %a to - %z = call @llvm.vp.shl.nxv8i16( %x, shufflevector( insertelement( poison, i16 2, i32 0), poison, zeroinitializer), %m, i32 %vl) + %z = call @llvm.vp.shl.nxv8i16( %x, splat (i16 2), %m, i32 %vl) ret %z } diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-mask-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-mask-sdnode.ll index 0cc0063c1d41cc..04ece9d94880ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-mask-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-mask-sdnode.ll @@ -12,7 +12,7 @@ define @vwsub_wv_mask_v8i32( %x, %x, shufflevector ( insertelement ( poison, i32 42, i64 0), poison, zeroinitializer) + %mask = icmp slt %x, splat (i32 42) %a = select %mask, %x, zeroinitializer %sa = sext %a to %ret = sub %y, %sa @@ -29,7 +29,7 @@ define @vwsubu_wv_mask_v8i32( %x, %x, shufflevector ( insertelement ( poison, i32 42, i64 0), poison, zeroinitializer) + %mask = icmp slt %x, splat (i32 42) %a = select %mask, %x, zeroinitializer %sa = zext %a to %ret = sub %y, %sa @@ -47,7 +47,7 @@ define @vwsubu_vv_mask_v8i32( %x, %x, shufflevector ( insertelement ( poison, i32 42, i64 0), poison, zeroinitializer) + %mask = icmp slt %x, splat (i32 42) %a = select %mask, %x, zeroinitializer %sa = zext %a to %sy = zext %y to @@ -65,8 +65,8 @@ define @vwsub_wv_mask_v8i32_nonzero( %x, %x, shufflevector ( insertelement ( poison, i32 42, i64 0), poison, zeroinitializer) - %a = select %mask, %x, shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) + %mask = icmp slt %x, splat (i32 42) + %a = select %mask, %x, splat (i32 1) %sa = sext %a to %ret = sub %y, %sa ret %ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll index 41668d806ec7e2..e14236c0258c46 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll @@ -22,7 +22,7 @@ define @vzext_nxv2i1_nxv2i16_unmasked( %a, i ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret - %v = call @llvm.vp.zext.nxv2i16.nxv2i1( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.zext.nxv2i16.nxv2i1( %a, splat (i1 true), i32 %vl) ret %v } @@ -46,7 +46,7 @@ define @vzext_nxv2i1_nxv2i32_unmasked( %a, i ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret - %v = call @llvm.vp.zext.nxv2i32.nxv2i1( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.zext.nxv2i32.nxv2i1( %a, splat (i1 true), i32 %vl) ret %v } @@ -70,6 +70,6 @@ define @vzext_nxv2i1_nxv2i64_unmasked( %a, i ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret - %v = call @llvm.vp.zext.nxv2i64.nxv2i1( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.zext.nxv2i64.nxv2i1( %a, splat (i1 true), i32 %vl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll index 365c221c9b9fc6..400f89b1ef77d1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll @@ -22,7 +22,7 @@ define @vzext_nxv2i8_nxv2i16_unmasked( %a, i ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.zext.nxv2i16.nxv2i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.zext.nxv2i16.nxv2i8( %a, splat (i1 true), i32 %vl) ret %v } @@ -46,7 +46,7 @@ define @vzext_nxv2i8_nxv2i32_unmasked( %a, i ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.zext.nxv2i32.nxv2i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.zext.nxv2i32.nxv2i8( %a, splat (i1 true), i32 %vl) ret %v } @@ -70,7 +70,7 @@ define @vzext_nxv2i8_nxv2i64_unmasked( %a, i ; CHECK-NEXT: vzext.vf8 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.zext.nxv2i64.nxv2i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.zext.nxv2i64.nxv2i8( %a, splat (i1 true), i32 %vl) ret %v } @@ -94,7 +94,7 @@ define @vzext_nxv2i16_nxv2i32_unmasked( %a, ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret - %v = call @llvm.vp.zext.nxv2i32.nxv2i16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.zext.nxv2i32.nxv2i16( %a, splat (i1 true), i32 %vl) ret %v } @@ -118,7 +118,7 @@ define @vzext_nxv2i16_nxv2i64_unmasked( %a, ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.zext.nxv2i64.nxv2i16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.zext.nxv2i64.nxv2i16( %a, splat (i1 true), i32 %vl) ret %v } @@ -142,7 +142,7 @@ define @vzext_nxv2i32_nxv2i64_unmasked( %a, ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.zext.nxv2i64.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.zext.nxv2i64.nxv2i32( %a, splat (i1 true), i32 %vl) ret %v } @@ -195,6 +195,6 @@ define @vzext_nxv32i8_nxv32i32_unmasked( % ; CHECK-NEXT: vzext.vf4 v24, v8 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret - %v = call @llvm.vp.zext.nxv32i32.nxv32i8( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.zext.nxv32i32.nxv32i8( %a, splat (i1 true), i32 %vl) ret %v }