Skip to content

Commit

Permalink
[LLVM+Clang][XTHeadVector] Implement intrinsics for vsmul (llvm#82)
Browse files Browse the repository at this point in the history
* [LLVM][XTHeadVector] Change vsmul test cases

Make test more likely to rvv vsmul
Change intrinsic interface
Add csrwi vxrm sentence

* [LLVM][XTHeadVector] Redefine vsmul

* [Clang][XTHeadVector] Add test cases for vsmul

* [Clang][XTHeadVector] Define vsmul
  • Loading branch information
zhanyi22333 authored and RevySR committed Apr 3, 2024
1 parent cbe0563 commit c47148b
Show file tree
Hide file tree
Showing 7 changed files with 2,597 additions and 1,121 deletions.
3 changes: 3 additions & 0 deletions clang/include/clang/Basic/riscv_vector_xtheadv.td
Original file line number Diff line number Diff line change
Expand Up @@ -1151,6 +1151,9 @@ let ManualCodegen = [{
// 13.2. Vector Single-Width Averaging Add and Subtract
defm th_vaadd : RVVSignedBinBuiltinSetRoundingMode;
defm th_vasub : RVVSignedBinBuiltinSetRoundingMode;
// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation Operations
defm th_vsmul : RVVSignedBinBuiltinSetRoundingMode;
}


include "riscv_vector_xtheadv_wrappers.td"
74 changes: 74 additions & 0 deletions clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td
Original file line number Diff line number Diff line change
Expand Up @@ -1843,3 +1843,77 @@ let HeaderCode =

}] in
def th_single_width_averaging_add_and_subtract_wrapper_macros: RVVHeader;

// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation

let HeaderCode =
[{

#define __riscv_vsmul_vv_i8m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m1(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i8m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m1(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i8m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m2(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i8m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m2(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i8m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m4(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i8m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m4(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i8m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m8(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i8m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m8(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i16m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m1(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i16m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m1(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i16m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m2(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i16m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m2(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i16m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m4(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i16m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m4(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i16m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m8(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i16m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m8(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i32m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m1(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i32m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m1(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i32m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m2(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i32m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m2(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i32m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m4(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i32m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m4(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i32m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m8(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i32m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m8(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i64m1(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m1(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i64m1(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m1(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i64m2(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m2(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i64m2(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m2(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i64m4(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m4(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i64m4(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m4(op1, op2, rm, vl)
#define __riscv_vsmul_vv_i64m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8(op1, op2, rm, vl)
#define __riscv_vsmul_vx_i64m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8(op1, op2, rm, vl)

#define __riscv_vsmul_vv_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m1_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m1_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m2_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m2_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m4_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m4_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m8_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m8_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m1_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m1_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m2_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m2_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m4_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m4_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m8_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m8_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m1_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m1_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m2_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m2_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m4_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m4_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m8_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m8_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m1_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m1_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m2_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m2_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m4_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m4_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vv_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8_m(mask, op1, op2, rm, vl)
#define __riscv_vsmul_vx_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8_m(mask, op1, op2, rm, vl)

}] in
def th_single_width_fractional_multiply_with_rounding_and_saturation_wrapper_macros: RVVHeader;
Loading

0 comments on commit c47148b

Please sign in to comment.