diff --git a/paddle/fluid/platform/cpu_info.cc b/paddle/fluid/platform/cpu_info.cc index e379832593c78..2df1f291f9f8c 100644 --- a/paddle/fluid/platform/cpu_info.cc +++ b/paddle/fluid/platform/cpu_info.cc @@ -164,6 +164,13 @@ bool MayIUse(const cpu_isa_t cpu_isa) { // AVX512F: EBX Bit 16 int avx512f_mask = (1 << 16); return (reg[1] & avx512f_mask) != 0; + } else if (cpu_isa == avx512_core) { + unsigned int avx512f_mask = (1 << 16); + unsigned int avx512dq_mask = (1 << 17); + unsigned int avx512bw_mask = (1 << 30); + unsigned int avx512vl_mask = (1 << 31); + return ((reg[1] & avx512f_mask) && (reg[1] & avx512dq_mask) && + (reg[1] & avx512bw_mask) && (reg[1] & avx512vl_mask)); } } #endif diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index fc5b077466794..a710c2d6003e2 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -142,6 +142,17 @@ bool IsCompiledWithMKLDNN() { #endif } +bool SupportsBfloat16() { +#ifndef PADDLE_WITH_MKLDNN + return false; +#else + if (platform::MayIUse(platform::cpu_isa_t::avx512_core)) + return true; + else + return false; +#endif +} + bool IsCompiledWithBrpc() { #ifndef PADDLE_WITH_DISTRIBUTE return false; @@ -1661,6 +1672,7 @@ All parameter, weight, gradient are variables in Paddle. m.def("is_compiled_with_cuda", IsCompiledWithCUDA); m.def("is_compiled_with_xpu", IsCompiledWithXPU); m.def("is_compiled_with_mkldnn", IsCompiledWithMKLDNN); + m.def("supports_bfloat16", SupportsBfloat16); m.def("is_compiled_with_brpc", IsCompiledWithBrpc); m.def("is_compiled_with_dist", IsCompiledWithDIST); m.def("_cuda_synchronize", [](const platform::CUDAPlace &place) { diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py index 0ac33383fb26b..4b7b4b5811a67 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py @@ -19,7 +19,7 @@ import struct import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16 +from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2dOp @@ -205,4 +205,5 @@ def init_group(self): if __name__ == '__main__': - unittest.main() + if core.supports_bfloat16(): + unittest.main()