Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PaddlePaddle Hackathon 4】No.56 : add fp16 test and bf16 for bernoulli and trunc #51657

Closed
wants to merge 28 commits into from
Closed
Show file tree
Hide file tree
Changes from 5 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
a1d0522
add fp16 and bf16 support for bernoulli
longranger2 Mar 14, 2023
f6455e7
add fp16 and bf16 support for trunc
longranger2 Mar 14, 2023
3279c68
Merge branch 'PaddlePaddle:develop' into fp16_56_2
longranger2 Mar 22, 2023
99f5854
fix bug
longranger2 Mar 22, 2023
9ee7d3a
Merge branch 'develop' into fp16_56_2
longranger2 Mar 25, 2023
dce1754
fix bug
longranger2 Apr 3, 2023
63c6f39
Merge branch 'PaddlePaddle:develop' into fp16_56_2
longranger2 Apr 3, 2023
b1771eb
fix bug
longranger2 Apr 22, 2023
528e5b8
fix PR-CI-Codestyle-Check
longranger2 Apr 22, 2023
2fc39e1
fix bug of trunc_kernel.cu
longranger2 Apr 22, 2023
8b8361d
fix bug of trunc_kernel.cu
longranger2 Apr 22, 2023
099d3bb
fix bug of trunc_kernel.cu
longranger2 Apr 22, 2023
22dbf8d
fix bug of trunc and bernoulli
longranger2 May 3, 2023
9db702f
fix bug
longranger2 May 9, 2023
38d7bc1
fix bug
longranger2 May 9, 2023
f4ce773
fix bug of MPType
longranger2 May 10, 2023
bd62029
fix check_variable_and_dtype
longranger2 May 10, 2023
3782bd1
fix bug of MPType
longranger2 May 10, 2023
b20ac1a
fix bug of undefined T
longranger2 May 10, 2023
7def562
fix bug
longranger2 May 11, 2023
3f44c3d
Merge branch 'PaddlePaddle:develop' into fp16_56_2
longranger2 May 12, 2023
3e9063a
Update test_bernoulli_op.py
longranger2 May 12, 2023
13a2c74
Update test_bernoulli_op.py
longranger2 May 15, 2023
3c4e333
Update test_bernoulli_op.py
longranger2 May 15, 2023
e7ad7f2
fix bug of import
longranger2 May 16, 2023
10336f8
Merge branch 'PaddlePaddle:develop' into fp16_56_2
longranger2 May 16, 2023
f922dd8
remove the trunc
longranger2 May 31, 2023
ea1d0ed
Merge branch 'PaddlePaddle:develop' into fp16_56_2
longranger2 May 31, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions paddle/phi/kernels/gpu/bernoulli_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -85,5 +85,11 @@ void BernoulliKernel(const Context& ctx,

} // namespace phi

PD_REGISTER_KERNEL(
bernoulli, GPU, ALL_LAYOUT, phi::BernoulliKernel, float, double) {}
PD_REGISTER_KERNEL(bernoulli,
GPU,
ALL_LAYOUT,
phi::BernoulliKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
4 changes: 3 additions & 1 deletion paddle/phi/kernels/gpu/trunc_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,6 @@ PD_REGISTER_KERNEL(trunc_grad,
float,
double,
int,
int64_t) {}
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}
12 changes: 10 additions & 2 deletions paddle/phi/kernels/gpu/trunc_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -78,5 +78,13 @@ void TruncKernel(const Context& dev_ctx,

} // namespace phi

PD_REGISTER_KERNEL(
trunc, GPU, ALL_LAYOUT, phi::TruncKernel, float, double, int, int64_t) {}
PD_REGISTER_KERNEL(trunc,
GPU,
ALL_LAYOUT,
phi::TruncKernel,
float,
double,
int,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}
12 changes: 11 additions & 1 deletion python/paddle/fluid/tests/unittests/test_bernoulli_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,15 @@ def output_hist(out):
class TestBernoulliOp(OpTest):
def setUp(self):
self.op_type = "bernoulli"
self.inputs = {"X": np.random.uniform(size=(1000, 784))}
self.inputs = {
"X": np.random.uniform(size=(1000, 784)).astype(self.dtype)
}
self.attrs = {}
self.outputs = {"Out": np.zeros((1000, 784)).astype("float32")}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

float16的输出不应该是float32类型吧

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

好的👌


def init_dtype(self):
self.dtype = np.float32

def test_check_output(self):
self.check_output_customized(self.verify_output)

Expand Down Expand Up @@ -98,5 +103,10 @@ def test_fixed_random_number(self):
paddle.enable_static()


class TestBernoulliFP16OP(TestBernoulliOp):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

是不是还要添加一下BF16的单测

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

对的,已经添加好了~

def init_dtype(self):
self.dtype = np.float16


if __name__ == "__main__":
unittest.main()
36 changes: 35 additions & 1 deletion python/paddle/fluid/tests/unittests/test_trunc_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@
import unittest

import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16

import paddle
import paddle.fluid.core as core

paddle.enable_static()

Expand Down Expand Up @@ -90,5 +91,38 @@ def test_errors(self):
self.assertRaises(TypeError, paddle.trunc, x)


class TestTruncFP16OP(TestTruncOp):
def init_dtype_type(self):
self.dtype = np.float16


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not complied with CUDA and not support the bfloat16",
)
class TestTruncBF16OP(OpTest):
def setUp(self):
self.op_type = "trunc"
self.python_api = paddle.trunc
self.init_dtype_type()
np.random.seed(2021)
x = np.random.random((20, 20)).astype(np.float64)
out = np.trunc(x)
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': convert_float_to_uint16(out)}

def init_dtype_type(self):
self.dtype = np.uint16

def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)

def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')


if __name__ == "__main__":
unittest.main()