Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RISCV] default enable splitting regalloc between RVV and other #72950

Merged
merged 1 commit into from
Dec 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ static cl::opt<bool>
static cl::opt<bool>
EnableSplitRegAlloc("riscv-split-regalloc", cl::Hidden,
cl::desc("Enable Split RegisterAlloc for RVV"),
cl::init(false));
cl::init(true));

extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target());
Expand Down
1 change: 1 addition & 0 deletions llvm/test/CodeGen/RISCV/O0-pipeline.ll
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
; CHECK-NEXT: Eliminate PHI nodes for register allocation
; CHECK-NEXT: Two-Address instruction pass
; CHECK-NEXT: Fast Register Allocator
; CHECK-NEXT: Fast Register Allocator
; CHECK-NEXT: Remove Redundant DEBUG_VALUE analysis
; CHECK-NEXT: Fixup Statepoint Caller Saved
; CHECK-NEXT: Lazy Machine Block Frequency Analysis
Expand Down
4 changes: 4 additions & 0 deletions llvm/test/CodeGen/RISCV/O3-pipeline.ll
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,10 @@
; CHECK-NEXT: Machine Optimization Remark Emitter
; CHECK-NEXT: Greedy Register Allocator
; CHECK-NEXT: Virtual Register Rewriter
; CHECK-NEXT: Virtual Register Map
; CHECK-NEXT: Live Register Matrix
; CHECK-NEXT: Greedy Register Allocator
; CHECK-NEXT: Virtual Register Rewriter
; CHECK-NEXT: Register Allocation Pass Scoring
; CHECK-NEXT: Stack Slot Coloring
; CHECK-NEXT: Machine Copy Propagation Pass
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3062,24 +3062,24 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 1
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: srli a1, a2, 1
; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a2
; CHECK-NEXT: slli a1, a1, 2
; CHECK-NEXT: sub a2, a0, a1
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: vslidedown.vx v0, v0, a1
; CHECK-NEXT: slli a2, a2, 2
; CHECK-NEXT: sub a1, a0, a2
; CHECK-NEXT: sltu a3, a0, a1
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
; CHECK-NEXT: and a1, a3, a1
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vi v8, v16, 8, v0.t
; CHECK-NEXT: vsll.vi v16, v16, 8, v0.t
; CHECK-NEXT: vor.vv v16, v16, v8, v0.t
; CHECK-NEXT: vsrl.vi v8, v16, 4, v0.t
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: addi a2, a2, -241
; CHECK-NEXT: vand.vx v8, v8, a2, v0.t
; CHECK-NEXT: vand.vx v16, v16, a2, v0.t
; CHECK-NEXT: lui a1, 1
; CHECK-NEXT: addi a1, a1, -241
; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
; CHECK-NEXT: vand.vx v16, v16, a1, v0.t
; CHECK-NEXT: vsll.vi v16, v16, 4, v0.t
; CHECK-NEXT: vor.vv v16, v8, v16, v0.t
; CHECK-NEXT: vsrl.vi v8, v16, 2, v0.t
Expand All @@ -3098,9 +3098,9 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
; CHECK-NEXT: addi a5, sp, 16
; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a0, a1, .LBB46_2
; CHECK-NEXT: bltu a0, a2, .LBB46_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: .LBB46_2:
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
Expand All @@ -3113,8 +3113,8 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t
; CHECK-NEXT: vand.vx v16, v16, a2, v0.t
; CHECK-NEXT: vand.vx v8, v8, a2, v0.t
; CHECK-NEXT: vand.vx v16, v16, a1, v0.t
; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t
; CHECK-NEXT: vor.vv v8, v16, v8, v0.t
; CHECK-NEXT: vsrl.vi v16, v8, 2, v0.t
Expand Down
88 changes: 44 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -976,24 +976,24 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a3, a1, 3
; CHECK-NEXT: add a5, a0, a3
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a1, a3, 3
; CHECK-NEXT: add a5, a0, a1
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: slli a5, a5, 3
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
; CHECK-NEXT: srli a5, a1, 3
; CHECK-NEXT: srli a5, a3, 3
; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a5
; CHECK-NEXT: add a5, a2, a3
; CHECK-NEXT: sub a3, a4, a1
; CHECK-NEXT: sltu a6, a4, a3
; CHECK-NEXT: add a5, a2, a1
; CHECK-NEXT: sub a1, a4, a3
; CHECK-NEXT: sltu a6, a4, a1
; CHECK-NEXT: addi a6, a6, -1
; CHECK-NEXT: and a6, a6, a3
; CHECK-NEXT: li a3, 63
; CHECK-NEXT: and a6, a6, a1
; CHECK-NEXT: li a1, 63
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: li a7, 40
Expand Down Expand Up @@ -1021,7 +1021,7 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vand.vx v8, v8, a3, v0.t
; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a0, vlenb
Expand All @@ -1044,7 +1044,7 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: vand.vx v16, v8, a3, v0.t
; CHECK-NEXT: vand.vx v16, v8, a1, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a2, 24
; CHECK-NEXT: mul a0, a0, a2
Expand All @@ -1065,22 +1065,22 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a4, a1, .LBB46_2
; CHECK-NEXT: bltu a4, a3, .LBB46_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB46_2:
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: li a2, 48
; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vand.vx v8, v8, a3, v0.t
; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: li a2, 24
; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
Expand All @@ -1090,26 +1090,26 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: li a2, 24
; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: li a2, 24
; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: li a2, 48
; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vnot.v v16, v8, v0.t
; CHECK-NEXT: vand.vx v16, v16, a3, v0.t
; CHECK-NEXT: vand.vx v16, v16, a1, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add a0, sp, a0
Expand Down Expand Up @@ -1162,24 +1162,24 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a3, a1, 3
; CHECK-NEXT: add a5, a0, a3
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: slli a1, a3, 3
; CHECK-NEXT: add a5, a0, a1
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: slli a5, a5, 4
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
; CHECK-NEXT: srli a5, a1, 3
; CHECK-NEXT: srli a5, a3, 3
; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a5
; CHECK-NEXT: add a5, a2, a3
; CHECK-NEXT: sub a3, a4, a1
; CHECK-NEXT: sltu a6, a4, a3
; CHECK-NEXT: add a5, a2, a1
; CHECK-NEXT: sub a1, a4, a3
; CHECK-NEXT: sltu a6, a4, a1
; CHECK-NEXT: addi a6, a6, -1
; CHECK-NEXT: and a6, a6, a3
; CHECK-NEXT: li a3, 63
; CHECK-NEXT: and a6, a6, a1
; CHECK-NEXT: li a1, 63
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: li a7, 40
Expand Down Expand Up @@ -1208,7 +1208,7 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vand.vx v8, v8, a3, v0.t
; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a0, vlenb
Expand All @@ -1231,7 +1231,7 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: vand.vx v16, v8, a3, v0.t
; CHECK-NEXT: vand.vx v16, v8, a1, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
Expand All @@ -1251,19 +1251,19 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a4, a1, .LBB47_2
; CHECK-NEXT: bltu a4, a3, .LBB47_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB47_2:
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: li a2, 48
; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vand.vx v8, v8, a3, v0.t
; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
Expand All @@ -1286,13 +1286,13 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: li a2, 48
; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vnot.v v16, v8, v0.t
; CHECK-NEXT: vand.vx v16, v16, a3, v0.t
; CHECK-NEXT: vand.vx v16, v16, a1, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
Expand Down
1 change: 0 additions & 1 deletion llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x
define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3512,7 +3512,7 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul t0, a3, a1
; CHECK-NEXT: mul t2, a3, a1
; CHECK-NEXT: slli t1, a3, 3
; CHECK-NEXT: srli a4, a3, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
Expand All @@ -3521,17 +3521,17 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; CHECK-NEXT: add a5, a2, t1
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: slli t3, a3, 4
; CHECK-NEXT: slli t0, a3, 4
; CHECK-NEXT: slli a5, a3, 1
; CHECK-NEXT: vslidedown.vx v0, v0, a1
; CHECK-NEXT: mv a7, a6
; CHECK-NEXT: bltu a6, a5, .LBB171_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a7, a5
; CHECK-NEXT: .LBB171_2:
; CHECK-NEXT: add t2, a2, t0
; CHECK-NEXT: add t2, a2, t2
; CHECK-NEXT: add t1, a0, t1
; CHECK-NEXT: add t0, a2, t3
; CHECK-NEXT: add t0, a2, t0
; CHECK-NEXT: vl8re64.v v16, (a2)
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 4
Expand Down