diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_3.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_3.py index 23ad1a821c43c..3ace9a72f769b 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_3.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_3.py @@ -48,8 +48,8 @@ def forward( var_9 = var_8.reshape((0, 16, 49)) var_10 = paddle.tensor.linalg.transpose(var_1, perm=[0, 1, 3, 2]) var_11 = paddle.tensor.linalg.matmul(var_5, var_10) - var_12 = var_11.__mul__(0.25) - var_13 = var_12.__add__(var_9) + var_12 = var_11 * 0.25 + var_13 = var_12 + var_9 var_14 = paddle.nn.functional.activation.softmax(var_13) var_15 = paddle.tensor.linalg.matmul(var_14, var_2) var_16 = paddle.tensor.linalg.transpose(var_15, perm=[0, 2, 1, 3]) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py new file mode 100644 index 0000000000000..c335a9df27a8a --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py @@ -0,0 +1,80 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^DPN^DPN98 +# api:paddle.tensor.manipulation.split||api:paddle.tensor.math.add||api:paddle.tensor.manipulation.concat +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [22, 1056, 14, 14], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [22, 1024, 14, 14], dtype: paddle.float32, stop_gradient: False) + var_2, # (shape: [22, 288, 14, 14], dtype: paddle.float32, stop_gradient: False) + ): + var_3, var_4 = paddle.tensor.manipulation.split( + var_0, num_or_sections=[1024, 32], axis=1 + ) + var_5 = paddle.tensor.math.add(x=var_1, y=var_3) + var_6 = paddle.tensor.manipulation.concat([var_2, var_4], axis=1) + return var_5, var_6 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[22, 1056, 14, 14], dtype=paddle.float32), + paddle.rand(shape=[22, 1024, 14, 14], dtype=paddle.float32), + paddle.rand(shape=[22, 288, 14, 14], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py new file mode 100644 index 0000000000000..5dc014a873d05 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py @@ -0,0 +1,79 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^ResNeSt^ResNeSt50 +# api:paddle.tensor.manipulation.reshape||api:paddle.tensor.linalg.transpose||api:paddle.nn.functional.activation.softmax||api:paddle.tensor.manipulation.reshape +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [22, 1024, 1, 1], dtype: paddle.float32, stop_gradient: True) + ): + var_1 = paddle.tensor.manipulation.reshape( + x=var_0, shape=[22, 1, 2, 512] + ) + var_2 = paddle.tensor.linalg.transpose(x=var_1, perm=[0, 2, 1, 3]) + var_3 = paddle.nn.functional.activation.softmax(var_2, axis=1) + var_4 = paddle.tensor.manipulation.reshape( + x=var_3, shape=[22, 1024, 1, 1] + ) + return var_4 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[22, 1024, 1, 1], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py new file mode 100644 index 0000000000000..95b55b9bfe331 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py @@ -0,0 +1,102 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^RedNet^RedNet38 +# api:paddle.nn.functional.conv._conv_nd||method:reshape||method:unsqueeze||api:paddle.nn.functional.common.unfold||method:reshape||method:__mul__||method:sum||method:reshape +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[784], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[784, 64, 1, 1], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [10, 64, 14, 14], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [10, 256, 14, 14], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.conv._conv_nd( + var_0, + self.parameter_1, + bias=self.parameter_0, + stride=[1, 1], + padding=[0, 0], + padding_algorithm='EXPLICIT', + dilation=[1, 1], + groups=1, + data_format='NCHW', + channel_dim=1, + op_type='conv2d', + use_cudnn=True, + ) + var_3 = var_2.reshape((10, 16, 49, 14, 14)) + var_4 = var_3.unsqueeze(2) + var_5 = paddle.nn.functional.common.unfold(var_1, 7, 1, 3, 1) + var_6 = var_5.reshape((10, 16, 16, 49, 14, 14)) + var_7 = var_4 * var_6 + var_8 = var_7.sum(axis=3) + var_9 = var_8.reshape((10, 256, 14, 14)) + return var_9 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[10, 64, 14, 14], dtype=paddle.float32), + paddle.rand(shape=[10, 256, 14, 14], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_34.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_34.py new file mode 100644 index 0000000000000..c4ce15aed780a --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_34.py @@ -0,0 +1,76 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^HRNet^HRNet_W32_C +# api:paddle.nn.functional.common.upsample||api:paddle.tensor.math.add +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [10, 32, 14, 14], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [10, 32, 56, 56], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.common.upsample( + var_0, scale_factor=4, mode='nearest' + ) + var_3 = paddle.tensor.math.add(x=var_1, y=var_2) + return var_2, var_3 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[10, 32, 14, 14], dtype=paddle.float32), + paddle.rand(shape=[10, 32, 56, 56], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=True + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py new file mode 100644 index 0000000000000..78311b8c6a05e --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py @@ -0,0 +1,110 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# api:paddle.nn.functional.conv._conv_nd||method:flatten||method:transpose||api:paddle.nn.functional.norm.layer_norm +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[96], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[96], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[96, 3, 7, 7], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[96], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [4, 3, 384, 384], dtype: paddle.float32, stop_gradient: True) + ): + var_1 = paddle.nn.functional.conv._conv_nd( + var_0, + self.parameter_2, + bias=self.parameter_3, + stride=[4, 4], + padding=[2, 2], + padding_algorithm='EXPLICIT', + dilation=[1, 1], + groups=1, + data_format='NCHW', + channel_dim=1, + op_type='conv2d', + use_cudnn=True, + ) + var_2 = var_1.flatten(start_axis=2, stop_axis=-1) + var_3 = var_2.transpose([0, 2, 1]) + var_4 = paddle.nn.functional.norm.layer_norm( + var_3, + normalized_shape=[96], + weight=self.parameter_1, + bias=self.parameter_0, + epsilon=1e-05, + ) + return var_4 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[4, 3, 384, 384], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=False, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py new file mode 100644 index 0000000000000..10e7eacac4c14 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py @@ -0,0 +1,96 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||method:chunk +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[288], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[96, 288], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[96], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[96], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [6, 9216, 96], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = paddle.nn.functional.norm.layer_norm( + var_0, + normalized_shape=[96], + weight=self.parameter_2, + bias=self.parameter_3, + epsilon=1e-05, + ) + var_2 = paddle.nn.functional.common.linear( + x=var_1, weight=self.parameter_1, bias=self.parameter_0, name=None + ) + var_3, var_4, var_5 = var_2.chunk(3, axis=-1) + return var_3, var_4, var_5 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = (paddle.rand(shape=[6, 9216, 96], dtype=paddle.float32),) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=False, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py new file mode 100644 index 0000000000000..6b6858d673bb6 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py @@ -0,0 +1,85 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# api:paddle.nn.functional.norm.layer_norm||api:paddle.tensor.stat.mean +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[768], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[768], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [4, 144, 768], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = paddle.nn.functional.norm.layer_norm( + var_0, + normalized_shape=[768], + weight=self.parameter_0, + bias=self.parameter_1, + epsilon=1e-05, + ) + var_2 = paddle.tensor.stat.mean(var_1, axis=1) + return var_2 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = (paddle.rand(shape=[4, 144, 768], dtype=paddle.float32),) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=False, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_38.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_38.py new file mode 100644 index 0000000000000..c9b53869aa8cb --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_38.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# method:reshape||method:transpose||method:reshape +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [4, 48, 96, 96], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = var_0.reshape([4, 48, 1, 96, 96, 1]) + var_2 = var_1.transpose([0, 2, 4, 3, 5, 1]) + var_3 = var_2.reshape([-1, 96, 48]) + return var_3 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[4, 48, 96, 96], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=True + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py new file mode 100644 index 0000000000000..c2cfa2786670d --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# api:paddle.tensor.creation.to_tensor||method:reshape||method:transpose||method:reshape +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [12, 288, 192], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = paddle.tensor.creation.to_tensor(6, 'int32') + var_2 = var_0.reshape([var_1, 2, 1, 12, 24, 192]) + var_3 = var_2.transpose([0, 1, 3, 2, 4, 5]) + var_4 = var_3.reshape([var_1, 24, 24, 192]) + return var_4 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = (paddle.rand(shape=[12, 288, 192], dtype=paddle.float32),) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_7.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_7.py index 8341736299761..da5d0491b84b6 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_7.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_7.py @@ -51,8 +51,8 @@ def forward( var_12 = paddle.tensor.linalg.transpose(var_11, (1, 0)) var_13 = var_12.reshape((0, 49, 49)) var_14 = paddle.tensor.linalg.matmul(var_6, var_9) - var_15 = var_14.__mul__(0.25) - var_16 = var_15.__add__(var_13) + var_15 = var_14 * 0.25 + var_16 = var_15 + var_13 var_17 = paddle.nn.functional.activation.softmax(var_16) var_18 = paddle.tensor.linalg.matmul(var_17, var_8) var_19 = paddle.tensor.linalg.transpose(var_18, perm=[0, 2, 1, 3]) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_9.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_9.py index 073175ced160b..abd5282e9d1b7 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_9.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_9.py @@ -48,8 +48,8 @@ def forward( var_9 = var_8.reshape((0, 49, 196)) var_10 = paddle.tensor.linalg.transpose(var_1, perm=[0, 1, 3, 2]) var_11 = paddle.tensor.linalg.matmul(var_5, var_10) - var_12 = var_11.__mul__(0.25) - var_13 = var_12.__add__(var_9) + var_12 = var_11 * 0.25 + var_13 = var_12 + var_9 var_14 = paddle.nn.functional.activation.softmax(var_13) var_15 = paddle.tensor.linalg.matmul(var_14, var_2) var_16 = paddle.tensor.linalg.transpose(var_15, perm=[0, 2, 1, 3]) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py new file mode 100644 index 0000000000000..6b9d3e3c94557 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py @@ -0,0 +1,72 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# method:chunk +import unittest + +import numpy as np + +import paddle + + +class ChunkCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [10, 2304, 192], dtype: paddle.float32, stop_gradient: False) + ): + var_1, var_2 = var_0.chunk(2, axis=-1) + return var_1, var_2 + + +class TestChunk(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[10, 2304, 192], dtype=paddle.float32), + ) + self.net = ChunkCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main()