From e54365006a46850e25bb2546c78a7e0ec88a544e Mon Sep 17 00:00:00 2001 From: Ricardo Jesus Date: Fri, 15 Nov 2024 08:50:13 +0000 Subject: [PATCH] [AArch64][SVE] Detect MOV (imm, pred, zeroing/merging) (#116032) Add patterns to fold MOV (scalar, predicated) to MOV (imm, pred, merging) or MOV (imm, pred, zeroing) as appropriate. This affects the `@llvm.aarch64.sve.dup` intrinsics, which currently generate MOV (scalar, predicated) instructions even when the immediate forms are possible. For example: ``` svuint8_t mov_z_b(svbool_t p) { return svdup_u8_z(p, 1); } ``` Currently generates: ``` mov_z_b(__SVBool_t): mov z0.b, #0 mov w8, #1 mov z0.b, p0/m, w8 ret ``` Instead of: ``` mov_z_b(__SVBool_t): mov z0.b, p0/z, #1 ret ``` --- .../lib/Target/AArch64/AArch64SVEInstrInfo.td | 4 +- llvm/lib/Target/AArch64/SVEInstrFormats.td | 22 ++++- llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll | 83 +++++++++++++++++++ 3 files changed, 105 insertions(+), 4 deletions(-) create mode 100644 llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 4f146b3ee59e9a..14856be7ac364a 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -839,8 +839,8 @@ let Predicates = [HasSVEorSME] in { defm DUPM_ZI : sve_int_dup_mask_imm<"dupm">; // Splat immediate (predicated) - defm CPY_ZPmI : sve_int_dup_imm_pred_merge<"cpy">; - defm CPY_ZPzI : sve_int_dup_imm_pred_zero<"cpy">; + defm CPY_ZPmI : sve_int_dup_imm_pred_merge<"cpy", AArch64dup_mt>; + defm CPY_ZPzI : sve_int_dup_imm_pred_zero<"cpy", AArch64dup_mt>; defm FCPY_ZPmI : sve_int_dup_fpimm_pred<"fcpy">; // Splat scalar register (unpredicated, GPR or vector + element index) diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td index 6de6aed3b2a816..60705e2b6d4e7d 100644 --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -5357,7 +5357,7 @@ multiclass sve_int_dup_imm_pred_merge_inst< (!cast(NAME) $Zd, $Pg, $imm, $shift)>; } -multiclass sve_int_dup_imm_pred_merge { +multiclass sve_int_dup_imm_pred_merge { defm _B : sve_int_dup_imm_pred_merge_inst<0b00, asm, ZPR8, cpy_imm8_opt_lsl_i8, nxv16i8, nxv16i1, i32, SVECpyDupImm8Pat>; defm _H : sve_int_dup_imm_pred_merge_inst<0b01, asm, ZPR16, cpy_imm8_opt_lsl_i16, @@ -5386,6 +5386,15 @@ multiclass sve_int_dup_imm_pred_merge { (!cast(NAME # _D) $Zd, $Pg, 0, 0)>; def : Pat<(vselect PPRAny:$Pg, (SVEDup0), (nxv2f64 ZPR:$Zd)), (!cast(NAME # _D) $Zd, $Pg, 0, 0)>; + + def : Pat<(nxv16i8 (op nxv16i1:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), nxv16i8:$zd)), + (!cast(NAME # _B) $zd, $pg, $a, $b)>; + def : Pat<(nxv8i16 (op nxv8i1:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), nxv8i16:$zd)), + (!cast(NAME # _H) $zd, $pg, $a, $b)>; + def : Pat<(nxv4i32 (op nxv4i1:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), nxv4i32:$zd)), + (!cast(NAME # _S) $zd, $pg, $a, $b)>; + def : Pat<(nxv2i64 (op nxv2i1:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), nxv2i64:$zd)), + (!cast(NAME # _D) $zd, $pg, $a, $b)>; } multiclass sve_int_dup_imm_pred_zero_inst< @@ -5407,7 +5416,7 @@ multiclass sve_int_dup_imm_pred_zero_inst< (!cast(NAME) $Pg, $imm, $shift)>; } -multiclass sve_int_dup_imm_pred_zero { +multiclass sve_int_dup_imm_pred_zero { defm _B : sve_int_dup_imm_pred_zero_inst<0b00, asm, ZPR8, cpy_imm8_opt_lsl_i8, nxv16i8, nxv16i1, i32, SVECpyDupImm8Pat>; defm _H : sve_int_dup_imm_pred_zero_inst<0b01, asm, ZPR16, cpy_imm8_opt_lsl_i16, @@ -5416,6 +5425,15 @@ multiclass sve_int_dup_imm_pred_zero { nxv4i32, nxv4i1, i32, SVECpyDupImm32Pat>; defm _D : sve_int_dup_imm_pred_zero_inst<0b11, asm, ZPR64, cpy_imm8_opt_lsl_i64, nxv2i64, nxv2i1, i64, SVECpyDupImm64Pat>; + + def : Pat<(nxv16i8 (op nxv16i1:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), (SVEDup0))), + (!cast(NAME # _B) $pg, $a, $b)>; + def : Pat<(nxv8i16 (op nxv8i1:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), (SVEDup0))), + (!cast(NAME # _H) $pg, $a, $b)>; + def : Pat<(nxv4i32 (op nxv4i1:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), (SVEDup0))), + (!cast(NAME # _S) $pg, $a, $b)>; + def : Pat<(nxv2i64 (op nxv2i1:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), (SVEDup0))), + (!cast(NAME # _D) $pg, $a, $b)>; } //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll b/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll new file mode 100644 index 00000000000000..7f4ff927f7b65c --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll @@ -0,0 +1,83 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; Zeroing. + +define @mov_z_b( %pg) { +; CHECK-LABEL: mov_z_b: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1 +; CHECK-NEXT: ret + %r = tail call @llvm.aarch64.sve.dup.nxv16i8( zeroinitializer, %pg, i8 1) + ret %r +} + +define @mov_z_h( %pg) { +; CHECK-LABEL: mov_z_h: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1 +; CHECK-NEXT: ret + %r = tail call @llvm.aarch64.sve.dup.nxv8i16( zeroinitializer, %pg, i16 1) + ret %r +} + +define @mov_z_s( %pg) { +; CHECK-LABEL: mov_z_s: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1 +; CHECK-NEXT: ret + %r = tail call @llvm.aarch64.sve.dup.nxv4i32( zeroinitializer, %pg, i32 1) + ret %r +} + +define @mov_z_d( %pg) { +; CHECK-LABEL: mov_z_d: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, p0/z, #1 // =0x1 +; CHECK-NEXT: ret + %r = tail call @llvm.aarch64.sve.dup.nxv2i64( zeroinitializer, %pg, i64 1) + ret %r +} + +; Merging. + +define @mov_m_b( %zd, %pg) { +; CHECK-LABEL: mov_m_b: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.b, p0/m, #1 // =0x1 +; CHECK-NEXT: ret + %r = tail call @llvm.aarch64.sve.dup.nxv16i8( %zd, %pg, i8 1) + ret %r +} + +define @mov_m_h( %zd, %pg) { +; CHECK-LABEL: mov_m_h: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.h, p0/m, #1 // =0x1 +; CHECK-NEXT: ret + %r = tail call @llvm.aarch64.sve.dup.nxv8i16( %zd, %pg, i16 1) + ret %r +} + +define @mov_m_s( %zd, %pg) { +; CHECK-LABEL: mov_m_s: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.s, p0/m, #1 // =0x1 +; CHECK-NEXT: ret + %r = tail call @llvm.aarch64.sve.dup.nxv4i32( %zd, %pg, i32 1) + ret %r +} + +define @mov_m_d( %zd, %pg) { +; CHECK-LABEL: mov_m_d: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, p0/m, #1 // =0x1 +; CHECK-NEXT: ret + %r = tail call @llvm.aarch64.sve.dup.nxv2i64( %zd, %pg, i64 1) + ret %r +} + +declare @llvm.aarch64.sve.dup.nxv16i8(, , i8) +declare @llvm.aarch64.sve.dup.nxv8i16(, , i16) +declare @llvm.aarch64.sve.dup.nxv4i32(, , i32) +declare @llvm.aarch64.sve.dup.nxv2i64(, , i64)