Skip to content

Commit

Permalink
q
Browse files Browse the repository at this point in the history
  • Loading branch information
nihui committed Aug 25, 2024
1 parent d1ff67f commit cfd92c3
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 10 deletions.
4 changes: 2 additions & 2 deletions src/layer/riscv/convolution1d_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ int Convolution1D_riscv::forward(const std::vector<Mat>& bottom_blobs, std::vect
return -100;

#if NCNN_RVV
if (opt.use_fp16_storage && cpu_support_riscv_v() && cpu_support_riscv_zfh() && weight_data_flattened.elembits() == 16)
if (opt.use_fp16_storage && cpu_support_riscv_zvfh() && weight_data_flattened.elembits() == 16)
{
Mat weight_data_flattened_fp32;
cast_float16_to_float32(weight_data_flattened, weight_data_flattened_fp32, opt);
Expand All @@ -376,7 +376,7 @@ int Convolution1D_riscv::forward(const std::vector<Mat>& bottom_blobs, std::vect
return -100;

#if NCNN_RVV
if (opt.use_fp16_storage && cpu_support_riscv_v() && cpu_support_riscv_zfh() && bias_data_flattened.elembits() == 16)
if (opt.use_fp16_storage && cpu_support_riscv_zvfh() && bias_data_flattened.elembits() == 16)
{
Mat bias_data_flattened_fp32;
cast_float16_to_float32(bias_data_flattened, bias_data_flattened_fp32, opt);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/riscv/convolution_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,7 @@ int Convolution_riscv::forward(const std::vector<Mat>& bottom_blobs, std::vector
return -100;

#if NCNN_RVV
if (opt.use_fp16_storage && cpu_support_riscv_v() && cpu_support_riscv_zfh() && weight_data_flattened.elembits() == 16)
if (opt.use_fp16_storage && cpu_support_riscv_zvfh() && weight_data_flattened.elembits() == 16)
{
Mat weight_data_flattened_fp32;
cast_float16_to_float32(weight_data_flattened, weight_data_flattened_fp32, opt);
Expand All @@ -661,7 +661,7 @@ int Convolution_riscv::forward(const std::vector<Mat>& bottom_blobs, std::vector
return -100;

#if NCNN_RVV
if (opt.use_fp16_storage && cpu_support_riscv_v() && cpu_support_riscv_zfh() && bias_data_flattened.elembits() == 16)
if (opt.use_fp16_storage && cpu_support_riscv_zvfh() && bias_data_flattened.elembits() == 16)
{
Mat bias_data_flattened_fp32;
cast_float16_to_float32(bias_data_flattened, bias_data_flattened_fp32, opt);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/riscv/convolutiondepthwise_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ int ConvolutionDepthWise_riscv::forward(const std::vector<Mat>& bottom_blobs, st
return -100;

#if NCNN_RVV
if (opt.use_fp16_storage && cpu_support_riscv_v() && cpu_support_riscv_zfh() && weight_data_flattened.elembits() == 16)
if (opt.use_fp16_storage && cpu_support_riscv_zvfh() && weight_data_flattened.elembits() == 16)
{
Mat weight_data_flattened_fp32;
cast_float16_to_float32(weight_data_flattened, weight_data_flattened_fp32, opt);
Expand All @@ -595,7 +595,7 @@ int ConvolutionDepthWise_riscv::forward(const std::vector<Mat>& bottom_blobs, st
return -100;

#if NCNN_RVV
if (opt.use_fp16_storage && cpu_support_riscv_v() && cpu_support_riscv_zfh() && bias_data_flattened.elembits() == 16)
if (opt.use_fp16_storage && cpu_support_riscv_zvfh() && bias_data_flattened.elembits() == 16)
{
Mat bias_data_flattened_fp32;
cast_float16_to_float32(bias_data_flattened, bias_data_flattened_fp32, opt);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/riscv/deconvolution_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ int Deconvolution_riscv::forward(const std::vector<Mat>& bottom_blobs, std::vect
return -100;

#if NCNN_RVV
if (opt.use_fp16_storage && cpu_support_riscv_v() && cpu_support_riscv_zfh() && weight_data_flattened.elembits() == 16)
if (opt.use_fp16_storage && cpu_support_riscv_zvfh() && weight_data_flattened.elembits() == 16)
{
Mat weight_data_flattened_fp32;
cast_float16_to_float32(weight_data_flattened, weight_data_flattened_fp32, opt);
Expand Down Expand Up @@ -388,7 +388,7 @@ int Deconvolution_riscv::forward(const std::vector<Mat>& bottom_blobs, std::vect
return -100;

#if NCNN_RVV
if (opt.use_fp16_storage && cpu_support_riscv_v() && cpu_support_riscv_zfh() && bias_data_flattened.elembits() == 16)
if (opt.use_fp16_storage && cpu_support_riscv_zvfh() && bias_data_flattened.elembits() == 16)
{
Mat bias_data_flattened_fp32;
cast_float16_to_float32(bias_data_flattened, bias_data_flattened_fp32, opt);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/riscv/deconvolutiondepthwise_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ int DeconvolutionDepthWise_riscv::forward(const std::vector<Mat>& bottom_blobs,
return -100;

#if NCNN_RVV
if (opt.use_fp16_storage && cpu_support_riscv_v() && cpu_support_riscv_zfh() && weight_data_flattened.elembits() == 16)
if (opt.use_fp16_storage && cpu_support_riscv_zvfh() && weight_data_flattened.elembits() == 16)
{
Mat weight_data_flattened_fp32;
cast_float16_to_float32(weight_data_flattened, weight_data_flattened_fp32, opt);
Expand Down Expand Up @@ -513,7 +513,7 @@ int DeconvolutionDepthWise_riscv::forward(const std::vector<Mat>& bottom_blobs,
return -100;

#if NCNN_RVV
if (opt.use_fp16_storage && cpu_support_riscv_v() && cpu_support_riscv_zfh() && bias_data_flattened.elembits() == 16)
if (opt.use_fp16_storage && cpu_support_riscv_zvfh() && bias_data_flattened.elembits() == 16)
{
Mat bias_data_flattened_fp32;
cast_float16_to_float32(bias_data_flattened, bias_data_flattened_fp32, opt);
Expand Down

0 comments on commit cfd92c3

Please sign in to comment.