Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[LLVM][SVE] Move ADDVL isel patterns under UseScalarIncVL feature flag. #71173

Merged
merged 1 commit into from
Nov 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 8 additions & 11 deletions llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -2423,14 +2423,6 @@ let Predicates = [HasSVEorSME] in {
}

let AddedComplexity = 5 in {
def : Pat<(add GPR64:$op, (vscale (sve_rdvl_imm i32:$imm))),
(ADDVL_XXI GPR64:$op, $imm)>;

def : Pat<(add GPR32:$op, (i32 (trunc (vscale (sve_rdvl_imm i32:$imm))))),
(i32 (EXTRACT_SUBREG (ADDVL_XXI (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
GPR32:$op, sub_32), $imm),
sub_32))>;

def : Pat<(nxv8i16 (add ZPR:$op, (nxv8i16 (splat_vector (i32 (trunc (vscale (sve_cnth_imm i32:$imm)))))))),
(INCH_ZPiI ZPR:$op, 31, $imm)>;
def : Pat<(nxv4i32 (add ZPR:$op, (nxv4i32 (splat_vector (i32 (trunc (vscale (sve_cntw_imm i32:$imm)))))))),
Expand All @@ -2447,6 +2439,14 @@ let Predicates = [HasSVEorSME] in {
}

let Predicates = [HasSVEorSME, UseScalarIncVL], AddedComplexity = 5 in {
def : Pat<(add GPR64:$op, (vscale (sve_rdvl_imm i32:$imm))),
(ADDVL_XXI GPR64:$op, $imm)>;

def : Pat<(add GPR32:$op, (i32 (trunc (vscale (sve_rdvl_imm i32:$imm))))),
(i32 (EXTRACT_SUBREG (ADDVL_XXI (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
GPR32:$op, sub_32), $imm),
sub_32))>;

def : Pat<(add GPR64:$op, (vscale (sve_cnth_imm i32:$imm))),
(INCH_XPiI GPR64:$op, 31, $imm)>;
def : Pat<(add GPR64:$op, (vscale (sve_cntw_imm i32:$imm))),
Expand Down Expand Up @@ -2488,9 +2488,6 @@ let Predicates = [HasSVEorSME] in {
sub_32))>;
}

def : Pat<(add GPR64:$op, (vscale (sve_rdvl_imm i32:$imm))),
(ADDVL_XXI GPR64:$op, $imm)>;

// FIXME: BigEndian requires an additional REV instruction to satisfy the
// constraint that none of the bits change when stored to memory as one
// type, and reloaded as another type.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,13 +195,14 @@ define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: neg x10, x9
; CHECK-NEXT: mov w11, #1000 // =0x3e8
; CHECK-NEXT: rdvl x13, #2
; CHECK-NEXT: mov x8, xzr
; CHECK-NEXT: and x10, x10, x11
; CHECK-NEXT: rdvl x11, #4
; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
; CHECK-NEXT: addvl x12, x1, #2
; CHECK-NEXT: addvl x13, x0, #2
; CHECK-NEXT: rdvl x11, #4
; CHECK-NEXT: add x12, x1, x13
; CHECK-NEXT: add x13, x0, x13
; CHECK-NEXT: mov z2.d, z1.d
; CHECK-NEXT: mov z3.d, z0.d
; CHECK-NEXT: .LBB2_1: // %vector.body
Expand Down
21 changes: 11 additions & 10 deletions llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
Original file line number Diff line number Diff line change
Expand Up @@ -351,9 +351,9 @@ define <vscale x 16 x float> @splice_nxv16f32_16(<vscale x 16 x float> %a, <vsca
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-8
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
; CHECK-NEXT: rdvl x8, #1
; CHECK-NEXT: mov w9, #16 // =0x10
; CHECK-NEXT: addvl x8, x8, #1
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: cmp x8, #16
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: mov x9, sp
Expand Down Expand Up @@ -457,7 +457,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_neg17(<vscale x 16 x i8> %a, <vscale x
; CHECK-NEXT: mov w9, #17 // =0x11
; CHECK-NEXT: mov x10, sp
; CHECK-NEXT: cmp x8, #17
; CHECK-NEXT: addvl x10, x10, #1
; CHECK-NEXT: add x10, x10, x8
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: sub x8, x10, x8
; CHECK-NEXT: st1b { z0.b }, p0, [sp]
Expand Down Expand Up @@ -502,7 +502,7 @@ define <vscale x 8 x i16> @splice_nxv8i16_neg9(<vscale x 8 x i16> %a, <vscale x
; CHECK-NEXT: mov w9, #18 // =0x12
; CHECK-NEXT: mov x10, sp
; CHECK-NEXT: cmp x8, #18
; CHECK-NEXT: addvl x10, x10, #1
; CHECK-NEXT: add x10, x10, x8
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: sub x8, x10, x8
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
Expand Down Expand Up @@ -613,7 +613,7 @@ define <vscale x 8 x half> @splice_nxv8f16_neg9(<vscale x 8 x half> %a, <vscale
; CHECK-NEXT: mov w9, #18 // =0x12
; CHECK-NEXT: mov x10, sp
; CHECK-NEXT: cmp x8, #18
; CHECK-NEXT: addvl x10, x10, #1
; CHECK-NEXT: add x10, x10, x8
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: sub x8, x10, x8
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
Expand Down Expand Up @@ -779,9 +779,10 @@ define <vscale x 8 x i32> @splice_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-4
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: rdvl x8, #2
; CHECK-NEXT: mov x9, sp
; CHECK-NEXT: add x8, x9, x8
; CHECK-NEXT: mov x9, #-8 // =0xfffffffffffffff8
; CHECK-NEXT: addvl x8, x8, #2
; CHECK-NEXT: sub x10, x8, #32
; CHECK-NEXT: st1w { z1.s }, p0, [sp, #1, mul vl]
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
Expand All @@ -807,9 +808,9 @@ define <vscale x 16 x float> @splice_nxv16f32_neg17(<vscale x 16 x float> %a, <v
; CHECK-NEXT: mov w9, #68 // =0x44
; CHECK-NEXT: mov x10, sp
; CHECK-NEXT: cmp x8, #68
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: addvl x9, x10, #4
; CHECK-NEXT: sub x8, x9, x8
; CHECK-NEXT: csel x9, x8, x9, lo
; CHECK-NEXT: add x8, x10, x8
; CHECK-NEXT: sub x8, x8, x9
; CHECK-NEXT: st1w { z3.s }, p0, [sp, #3, mul vl]
; CHECK-NEXT: st1w { z2.s }, p0, [sp, #2, mul vl]
; CHECK-NEXT: st1w { z1.s }, p0, [sp, #1, mul vl]
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -215,9 +215,9 @@ define <16 x i8> @extract_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec) nounwind
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: mov x8, #-16 // =0xfffffffffffffff0
; CHECK-NEXT: rdvl x8, #1
; CHECK-NEXT: mov w9, #16 // =0x10
; CHECK-NEXT: addvl x8, x8, #1
; CHECK-NEXT: sub x8, x8, #16
; CHECK-NEXT: cmp x8, #16
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: mov x9, sp
Expand Down
6 changes: 4 additions & 2 deletions llvm/test/CodeGen/AArch64/sve-gep.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
define <vscale x 2 x i64>* @scalar_of_scalable_1(<vscale x 2 x i64>* %base) {
; CHECK-LABEL: scalar_of_scalable_1:
; CHECK: // %bb.0:
; CHECK-NEXT: addvl x0, x0, #4
; CHECK-NEXT: rdvl x8, #4
; CHECK-NEXT: add x0, x0, x8
; CHECK-NEXT: ret
%d = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 4
ret <vscale x 2 x i64>* %d
Expand Down Expand Up @@ -202,7 +203,8 @@ define <vscale x 2 x i64*> @scalable_of_fixed_5_i64(i64* %base, <vscale x 2 x i3
define <vscale x 2 x <vscale x 2 x i64>*> @scalable_of_scalable_1(<vscale x 2 x i64>* %base) {
; CHECK-LABEL: scalable_of_scalable_1:
; CHECK: // %bb.0:
; CHECK-NEXT: addvl x8, x0, #1
; CHECK-NEXT: rdvl x8, #1
; CHECK-NEXT: add x8, x0, x8
; CHECK-NEXT: mov z0.d, x8
; CHECK-NEXT: ret
%idx = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 1, i32 0), <vscale x 2 x i64> zeroinitializer, <vscale x 2 x i32> zeroinitializer
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/sve-insert-element.ll
Original file line number Diff line number Diff line change
Expand Up @@ -590,10 +590,10 @@ define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val,
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: ptrue p2.b
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
; CHECK-NEXT: rdvl x8, #2
; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1
; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
; CHECK-NEXT: addvl x8, x8, #2
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: mov w9, w1
; CHECK-NEXT: cmp x9, x8
; CHECK-NEXT: csel x8, x9, x8, lo
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/sve-insert-vector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,9 @@ define <vscale x 16 x i8> @insert_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec, <
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: mov x8, #-16 // =0xfffffffffffffff0
; CHECK-NEXT: rdvl x8, #1
; CHECK-NEXT: mov w9, #16 // =0x10
; CHECK-NEXT: addvl x8, x8, #1
; CHECK-NEXT: sub x8, x8, #16
; CHECK-NEXT: mov x10, sp
; CHECK-NEXT: cmp x8, #16
; CHECK-NEXT: csel x8, x8, x9, lo
Expand Down
6 changes: 4 additions & 2 deletions llvm/test/CodeGen/AArch64/sve-intrinsics-loads-nf.ll
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ define <vscale x 16 x i8> @ldnf1b(<vscale x 16 x i1> %pg, ptr %a) {
define <vscale x 16 x i8> @ldnf1b_out_of_lower_bound(<vscale x 16 x i1> %pg, ptr %a) {
; CHECK-LABEL: ldnf1b_out_of_lower_bound:
; CHECK: // %bb.0:
; CHECK-NEXT: addvl x8, x0, #-9
; CHECK-NEXT: rdvl x8, #-9
; CHECK-NEXT: add x8, x0, x8
; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x8]
; CHECK-NEXT: ret
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 -9
Expand Down Expand Up @@ -62,7 +63,8 @@ define <vscale x 16 x i8> @ldnf1b_upper_bound(<vscale x 16 x i1> %pg, ptr %a) {
define <vscale x 16 x i8> @ldnf1b_out_of_upper_bound(<vscale x 16 x i1> %pg, ptr %a) {
; CHECK-LABEL: ldnf1b_out_of_upper_bound:
; CHECK: // %bb.0:
; CHECK-NEXT: addvl x8, x0, #8
; CHECK-NEXT: rdvl x8, #8
; CHECK-NEXT: add x8, x0, x8
; CHECK-NEXT: ldnf1b { z0.b }, p0/z, [x8]
; CHECK-NEXT: ret
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %a, i64 8
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,11 @@
define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: imm_out_of_range:
; CHECK: // %bb.0:
; CHECK-NEXT: addvl x8, x0, #8
; CHECK-NEXT: rdvl x8, #8
; CHECK-NEXT: add x8, x0, x8
; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8]
; CHECK-NEXT: addvl x8, x0, #-9
; CHECK-NEXT: rdvl x8, #-9
; CHECK-NEXT: add x8, x0, x8
; CHECK-NEXT: st1d { z0.d }, p0, [x8]
; CHECK-NEXT: ret
%base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 8
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,11 @@
define void @imm_out_of_range(<vscale x 2 x i64> * %base, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: imm_out_of_range:
; CHECK: // %bb.0:
; CHECK-NEXT: addvl x8, x0, #8
; CHECK-NEXT: rdvl x8, #8
; CHECK-NEXT: add x8, x0, x8
; CHECK-NEXT: ldnt1d { z0.d }, p0/z, [x8]
; CHECK-NEXT: addvl x8, x0, #-9
; CHECK-NEXT: rdvl x8, #-9
; CHECK-NEXT: add x8, x0, x8
; CHECK-NEXT: stnt1d { z0.d }, p0, [x8]
; CHECK-NEXT: ret
%base_load = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base, i64 8
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ define i8 @split_extract_32i8_idx(<vscale x 32 x i8> %a, i32 %idx) {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
; CHECK-NEXT: rdvl x8, #2
; CHECK-NEXT: mov w9, w0
; CHECK-NEXT: addvl x8, x8, #2
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: cmp x9, x8
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: mov x9, sp
Expand All @@ -47,9 +47,9 @@ define i16 @split_extract_16i16_idx(<vscale x 16 x i16> %a, i32 %idx) {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
; CHECK-NEXT: rdvl x8, #1
; CHECK-NEXT: mov w9, w0
; CHECK-NEXT: addvl x8, x8, #1
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: cmp x9, x8
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: mov x9, sp
Expand Down Expand Up @@ -141,9 +141,9 @@ define i16 @split_extract_16i16(<vscale x 16 x i16> %a) {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
; CHECK-NEXT: rdvl x8, #1
; CHECK-NEXT: mov w9, #128 // =0x80
; CHECK-NEXT: addvl x8, x8, #1
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: cmp x8, #128
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: mov x9, sp
Expand All @@ -165,10 +165,10 @@ define i32 @split_extract_16i32(<vscale x 16 x i32> %a) {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
; CHECK-NEXT: rdvl x8, #1
; CHECK-NEXT: mov w9, #34464 // =0x86a0
; CHECK-NEXT: movk w9, #1, lsl #16
; CHECK-NEXT: addvl x8, x8, #1
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: mov x9, sp
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ define <vscale x 32 x i8> @split_insert_32i8_idx(<vscale x 32 x i8> %a, i8 %elt,
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
; CHECK-NEXT: rdvl x8, #2
; CHECK-NEXT: mov x9, sp
; CHECK-NEXT: addvl x8, x8, #2
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: cmp x1, x8
; CHECK-NEXT: csel x8, x1, x8, lo
; CHECK-NEXT: st1b { z1.b }, p0, [sp, #1, mul vl]
Expand Down Expand Up @@ -136,9 +136,9 @@ define <vscale x 32 x i16> @split_insert_32i16(<vscale x 32 x i16> %a, i16 %elt)
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, #-1 // =0xffffffffffffffff
; CHECK-NEXT: rdvl x8, #2
; CHECK-NEXT: mov w9, #128 // =0x80
; CHECK-NEXT: addvl x8, x8, #2
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: cmp x8, #128
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: mov x9, sp
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/AArch64/sve-vl-arith.ll
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,8 @@ define <vscale x 2 x i64> @decd_vec(<vscale x 2 x i64> %a) {
define i64 @incb_scalar_i64(i64 %a) {
; NO_SCALAR_INC-LABEL: incb_scalar_i64:
; NO_SCALAR_INC: // %bb.0:
; NO_SCALAR_INC-NEXT: addvl x0, x0, #1
; NO_SCALAR_INC-NEXT: rdvl x8, #1
; NO_SCALAR_INC-NEXT: add x0, x0, x8
; NO_SCALAR_INC-NEXT: ret
;
; CHECK-LABEL: incb_scalar_i64:
Expand Down Expand Up @@ -185,7 +186,8 @@ define i64 @incd_scalar_i64(i64 %a) {
define i64 @decb_scalar_i64(i64 %a) {
; NO_SCALAR_INC-LABEL: decb_scalar_i64:
; NO_SCALAR_INC: // %bb.0:
; NO_SCALAR_INC-NEXT: addvl x0, x0, #-2
; NO_SCALAR_INC-NEXT: rdvl x8, #-2
; NO_SCALAR_INC-NEXT: add x0, x0, x8
; NO_SCALAR_INC-NEXT: ret
;
; CHECK-LABEL: decb_scalar_i64:
Expand Down Expand Up @@ -257,9 +259,8 @@ define i64 @decd_scalar_i64(i64 %a) {
define i32 @incb_scalar_i32(i32 %a) {
; NO_SCALAR_INC-LABEL: incb_scalar_i32:
; NO_SCALAR_INC: // %bb.0:
; NO_SCALAR_INC-NEXT: // kill: def $w0 killed $w0 def $x0
; NO_SCALAR_INC-NEXT: addvl x0, x0, #3
; NO_SCALAR_INC-NEXT: // kill: def $w0 killed $w0 killed $x0
; NO_SCALAR_INC-NEXT: rdvl x8, #3
; NO_SCALAR_INC-NEXT: add w0, w0, w8
; NO_SCALAR_INC-NEXT: ret
;
; CHECK-LABEL: incb_scalar_i32:
Expand Down Expand Up @@ -344,9 +345,8 @@ define i32 @incd_scalar_i32(i32 %a) {
define i32 @decb_scalar_i32(i32 %a) {
; NO_SCALAR_INC-LABEL: decb_scalar_i32:
; NO_SCALAR_INC: // %bb.0:
; NO_SCALAR_INC-NEXT: // kill: def $w0 killed $w0 def $x0
; NO_SCALAR_INC-NEXT: addvl x0, x0, #-4
; NO_SCALAR_INC-NEXT: // kill: def $w0 killed $w0 killed $x0
; NO_SCALAR_INC-NEXT: rdvl x8, #-4
; NO_SCALAR_INC-NEXT: add w0, w0, w8
; NO_SCALAR_INC-NEXT: ret
;
; CHECK-LABEL: decb_scalar_i32:
Expand Down