From 04673bf51cde5f21960d0a3f7c74f669f008d322 Mon Sep 17 00:00:00 2001 From: feifei-111 <2364819892@qq.com> Date: Mon, 29 Jan 2024 06:21:05 +0000 Subject: [PATCH 1/3] update --- .../pir/cinn/sub_graphs/test_sub_graph_31.py | 81 +++++++++++++ .../pir/cinn/sub_graphs/test_sub_graph_32.py | 80 +++++++++++++ .../pir/cinn/sub_graphs/test_sub_graph_33.py | 103 ++++++++++++++++ .../pir/cinn/sub_graphs/test_sub_graph_34.py | 76 ++++++++++++ .../pir/cinn/sub_graphs/test_sub_graph_35.py | 111 ++++++++++++++++++ .../pir/cinn/sub_graphs/test_sub_graph_36.py | 97 +++++++++++++++ .../pir/cinn/sub_graphs/test_sub_graph_37.py | 86 ++++++++++++++ .../pir/cinn/sub_graphs/test_sub_graph_38.py | 73 ++++++++++++ .../pir/cinn/sub_graphs/test_sub_graph_39.py | 74 ++++++++++++ .../cinn/sub_graphs/test_sub_graph_chunk.py | 73 ++++++++++++ 10 files changed, 854 insertions(+) create mode 100644 test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py create mode 100644 test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py create mode 100644 test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py create mode 100644 test/ir/pir/cinn/sub_graphs/test_sub_graph_34.py create mode 100644 test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py create mode 100644 test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py create mode 100644 test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py create mode 100644 test/ir/pir/cinn/sub_graphs/test_sub_graph_38.py create mode 100644 test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py create mode 100644 test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py new file mode 100644 index 0000000000000..095b001dd76b8 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py @@ -0,0 +1,81 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^DPN^DPN98 +# api:paddle.tensor.manipulation.split||api:paddle.tensor.math.add||api:paddle.tensor.manipulation.concat +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [22, 1056, 14, 14], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [22, 1024, 14, 14], dtype: paddle.float32, stop_gradient: False) + var_2, # (shape: [22, 288, 14, 14], dtype: paddle.float32, stop_gradient: False) + ): + var_3, var_4 = paddle.tensor.manipulation.split( + var_0, num_or_sections=[1024, 32], axis=1 + ) + var_5 = paddle.tensor.math.add(x=var_1, y=var_3) + var_6 = paddle.tensor.manipulation.concat([var_2, var_4], axis=1) + return var_5, var_6 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[22, 1056, 14, 14], dtype=paddle.float32), + paddle.rand(shape=[22, 1024, 14, 14], dtype=paddle.float32), + paddle.rand(shape=[22, 288, 14, 14], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py new file mode 100644 index 0000000000000..8c2d3a7d555ce --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py @@ -0,0 +1,80 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^ResNeSt^ResNeSt50 +# api:paddle.tensor.manipulation.reshape||api:paddle.tensor.linalg.transpose||api:paddle.nn.functional.activation.softmax||api:paddle.tensor.manipulation.reshape +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [22, 1024, 1, 1], dtype: paddle.float32, stop_gradient: True) + ): + var_1 = paddle.tensor.manipulation.reshape( + x=var_0, shape=[22, 1, 2, 512] + ) + var_2 = paddle.tensor.linalg.transpose(x=var_1, perm=[0, 2, 1, 3]) + var_3 = paddle.nn.functional.activation.softmax(var_2, axis=1) + var_4 = paddle.tensor.manipulation.reshape( + x=var_3, shape=[22, 1024, 1, 1] + ) + return var_4 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[22, 1024, 1, 1], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py new file mode 100644 index 0000000000000..5f46a63fc0346 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py @@ -0,0 +1,103 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^RedNet^RedNet38 +# api:paddle.nn.functional.conv._conv_nd||method:reshape||method:unsqueeze||api:paddle.nn.functional.common.unfold||method:reshape||method:__mul__||method:sum||method:reshape +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[784], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[784, 64, 1, 1], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [10, 64, 14, 14], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [10, 256, 14, 14], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.conv._conv_nd( + var_0, + self.parameter_1, + bias=self.parameter_0, + stride=[1, 1], + padding=[0, 0], + padding_algorithm='EXPLICIT', + dilation=[1, 1], + groups=1, + data_format='NCHW', + channel_dim=1, + op_type='conv2d', + use_cudnn=True, + ) + var_3 = var_2.reshape((10, 16, 49, 14, 14)) + var_4 = var_3.unsqueeze(2) + var_5 = paddle.nn.functional.common.unfold(var_1, 7, 1, 3, 1) + var_6 = var_5.reshape((10, 16, 16, 49, 14, 14)) + var_7 = var_4.__mul__(var_6) + var_8 = var_7.sum(axis=3) + var_9 = var_8.reshape((10, 256, 14, 14)) + return var_9 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[10, 64, 14, 14], dtype=paddle.float32), + paddle.rand(shape=[10, 256, 14, 14], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_34.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_34.py new file mode 100644 index 0000000000000..c4ce15aed780a --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_34.py @@ -0,0 +1,76 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^HRNet^HRNet_W32_C +# api:paddle.nn.functional.common.upsample||api:paddle.tensor.math.add +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [10, 32, 14, 14], dtype: paddle.float32, stop_gradient: False) + var_1, # (shape: [10, 32, 56, 56], dtype: paddle.float32, stop_gradient: False) + ): + var_2 = paddle.nn.functional.common.upsample( + var_0, scale_factor=4, mode='nearest' + ) + var_3 = paddle.tensor.math.add(x=var_1, y=var_2) + return var_2, var_3 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[10, 32, 14, 14], dtype=paddle.float32), + paddle.rand(shape=[10, 32, 56, 56], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=True + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py new file mode 100644 index 0000000000000..f2f2d121053f7 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py @@ -0,0 +1,111 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# api:paddle.nn.functional.conv._conv_nd||method:flatten||method:transpose||api:paddle.nn.functional.norm.layer_norm +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[96], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[96], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[96, 3, 7, 7], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[96], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [4, 3, 384, 384], dtype: paddle.float32, stop_gradient: True) + ): + var_1 = paddle.nn.functional.conv._conv_nd( + var_0, + self.parameter_2, + bias=self.parameter_3, + stride=[4, 4], + padding=[2, 2], + padding_algorithm='EXPLICIT', + dilation=[1, 1], + groups=1, + data_format='NCHW', + channel_dim=1, + op_type='conv2d', + use_cudnn=True, + ) + var_2 = var_1.flatten(start_axis=2, stop_axis=-1) + var_3 = var_2.transpose([0, 2, 1]) + var_4 = paddle.nn.functional.norm.layer_norm( + var_3, + normalized_shape=[96], + weight=self.parameter_1, + bias=self.parameter_0, + epsilon=1e-05, + ) + return var_4 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[4, 3, 384, 384], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=False, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py new file mode 100644 index 0000000000000..91daaef23f34f --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py @@ -0,0 +1,97 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# api:paddle.nn.functional.norm.layer_norm||api:paddle.nn.functional.common.linear||method:chunk +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[288], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[96, 288], + dtype=paddle.float32, + ) + self.parameter_2 = self.create_parameter( + shape=[96], + dtype=paddle.float32, + ) + self.parameter_3 = self.create_parameter( + shape=[96], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [6, 9216, 96], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = paddle.nn.functional.norm.layer_norm( + var_0, + normalized_shape=[96], + weight=self.parameter_2, + bias=self.parameter_3, + epsilon=1e-05, + ) + var_2 = paddle.nn.functional.common.linear( + x=var_1, weight=self.parameter_1, bias=self.parameter_0, name=None + ) + var_3, var_4, var_5 = var_2.chunk(3, axis=-1) + return var_3, var_4, var_5 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = (paddle.rand(shape=[6, 9216, 96], dtype=paddle.float32),) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=False, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py new file mode 100644 index 0000000000000..153803e79c821 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py @@ -0,0 +1,86 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# api:paddle.nn.functional.norm.layer_norm||api:paddle.tensor.stat.mean +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + self.parameter_0 = self.create_parameter( + shape=[768], + dtype=paddle.float32, + ) + self.parameter_1 = self.create_parameter( + shape=[768], + dtype=paddle.float32, + ) + + def forward( + self, + var_0, # (shape: [4, 144, 768], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = paddle.nn.functional.norm.layer_norm( + var_0, + normalized_shape=[768], + weight=self.parameter_0, + bias=self.parameter_1, + epsilon=1e-05, + ) + var_2 = paddle.tensor.stat.mean(var_1, axis=1) + return var_2 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = (paddle.rand(shape=[4, 144, 768], dtype=paddle.float32),) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=False, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_38.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_38.py new file mode 100644 index 0000000000000..c9b53869aa8cb --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_38.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# method:reshape||method:transpose||method:reshape +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [4, 48, 96, 96], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = var_0.reshape([4, 48, 1, 96, 96, 1]) + var_2 = var_1.transpose([0, 2, 4, 3, 5, 1]) + var_3 = var_2.reshape([-1, 96, 48]) + return var_3 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[4, 48, 96, 96], dtype=paddle.float32), + ) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=True + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py new file mode 100644 index 0000000000000..7f986ffce5678 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py @@ -0,0 +1,74 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# api:paddle.tensor.creation.to_tensor||method:reshape||method:transpose||method:reshape +import unittest + +import numpy as np + +import paddle + + +class LayerCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [12, 288, 192], dtype: paddle.float32, stop_gradient: False) + ): + var_1 = paddle.tensor.creation.to_tensor(6, 'int32') + var_2 = var_0.reshape([var_1, 2, 1, 12, 24, 192]) + var_3 = var_2.transpose([0, 1, 3, 2, 4, 5]) + var_4 = var_3.reshape([var_1, 24, 24, 192]) + return var_4 + + +class TestLayer(unittest.TestCase): + def setUp(self): + self.inputs = (paddle.rand(shape=[12, 288, 192], dtype=paddle.float32),) + self.net = LayerCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py new file mode 100644 index 0000000000000..b3a5f8967bfb9 --- /dev/null +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# repo: PaddleClas +# model: ppcls^configs^ImageNet^CSWinTransformer^CSWinTransformer_base_384 +# method:chunk +import unittest + +import numpy as np + +import paddle + + +class ChunkCase(paddle.nn.Layer): + def __init__(self): + super().__init__() + + def forward( + self, + var_0, # (shape: [10, 2304, 192], dtype: paddle.float32, stop_gradient: False) + ): + var_1, var_2 = var_0.chunk(2, axis=-1) + return var_1, var_2 + + +class TestChunk(unittest.TestCase): + def setUp(self): + self.inputs = ( + paddle.rand(shape=[10, 2304, 192], dtype=paddle.float32), + ) + self.net = ChunkCase() + + def train(self, net, to_static, with_prim=False, with_cinn=False): + if to_static: + paddle.set_flags({'FLAGS_prim_all': with_prim}) + if with_cinn: + build_strategy = paddle.static.BuildStrategy() + build_strategy.build_cinn_pass = True + net = paddle.jit.to_static( + net, build_strategy=build_strategy, full_graph=True + ) + else: + net = paddle.jit.to_static(net, full_graph=True) + paddle.seed(123) + outs = net(*self.inputs) + return outs + + # NOTE prim + cinn lead to error + # NOTE prim + cinn lead to error + def test_ast_prim_cinn(self): + st_out = self.train(self.net, to_static=True) + cinn_out = self.train( + self.net, to_static=True, with_prim=True, with_cinn=False + ) + for st, cinn in zip( + paddle.utils.flatten(st_out), paddle.utils.flatten(cinn_out) + ): + np.testing.assert_allclose(st.numpy(), cinn.numpy(), atol=1e-8) + + +if __name__ == '__main__': + unittest.main() From a6e93f581b02496300cf49265f18f552616a3805 Mon Sep 17 00:00:00 2001 From: feifei-111 <2364819892@qq.com> Date: Mon, 29 Jan 2024 07:11:42 +0000 Subject: [PATCH 2/3] update --- test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py | 1 - test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py | 1 - test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py | 1 - test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py | 1 - test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py | 1 - test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py | 1 - test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py | 1 - test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py | 1 - 8 files changed, 8 deletions(-) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py index 095b001dd76b8..c335a9df27a8a 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_31.py @@ -64,7 +64,6 @@ def train(self, net, to_static, with_prim=False, with_cinn=False): outs = net(*self.inputs) return outs - # NOTE prim + cinn lead to error # NOTE prim + cinn lead to error def test_ast_prim_cinn(self): st_out = self.train(self.net, to_static=True) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py index 8c2d3a7d555ce..5dc014a873d05 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_32.py @@ -63,7 +63,6 @@ def train(self, net, to_static, with_prim=False, with_cinn=False): outs = net(*self.inputs) return outs - # NOTE prim + cinn lead to error # NOTE prim + cinn lead to error def test_ast_prim_cinn(self): st_out = self.train(self.net, to_static=True) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py index 5f46a63fc0346..d835639aea4ff 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py @@ -86,7 +86,6 @@ def train(self, net, to_static, with_prim=False, with_cinn=False): outs = net(*self.inputs) return outs - # NOTE prim + cinn lead to error # NOTE prim + cinn lead to error def test_ast_prim_cinn(self): st_out = self.train(self.net, to_static=True) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py index f2f2d121053f7..78311b8c6a05e 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_35.py @@ -94,7 +94,6 @@ def train(self, net, to_static, with_prim=False, with_cinn=False): outs = net(*self.inputs) return outs - # NOTE prim + cinn lead to error # NOTE prim + cinn lead to error def test_ast_prim_cinn(self): st_out = self.train(self.net, to_static=True) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py index 91daaef23f34f..10e7eacac4c14 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_36.py @@ -80,7 +80,6 @@ def train(self, net, to_static, with_prim=False, with_cinn=False): outs = net(*self.inputs) return outs - # NOTE prim + cinn lead to error # NOTE prim + cinn lead to error def test_ast_prim_cinn(self): st_out = self.train(self.net, to_static=True) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py index 153803e79c821..6b6858d673bb6 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_37.py @@ -69,7 +69,6 @@ def train(self, net, to_static, with_prim=False, with_cinn=False): outs = net(*self.inputs) return outs - # NOTE prim + cinn lead to error # NOTE prim + cinn lead to error def test_ast_prim_cinn(self): st_out = self.train(self.net, to_static=True) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py index 7f986ffce5678..c2cfa2786670d 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_39.py @@ -57,7 +57,6 @@ def train(self, net, to_static, with_prim=False, with_cinn=False): outs = net(*self.inputs) return outs - # NOTE prim + cinn lead to error # NOTE prim + cinn lead to error def test_ast_prim_cinn(self): st_out = self.train(self.net, to_static=True) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py index b3a5f8967bfb9..6b9d3e3c94557 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_chunk.py @@ -56,7 +56,6 @@ def train(self, net, to_static, with_prim=False, with_cinn=False): outs = net(*self.inputs) return outs - # NOTE prim + cinn lead to error # NOTE prim + cinn lead to error def test_ast_prim_cinn(self): st_out = self.train(self.net, to_static=True) From 893928624cbffd166c64de1e7ec310bf47a9b12f Mon Sep 17 00:00:00 2001 From: feifei-111 <2364819892@qq.com> Date: Tue, 30 Jan 2024 11:06:41 +0000 Subject: [PATCH 3/3] update --- test/ir/pir/cinn/sub_graphs/test_sub_graph_3.py | 4 ++-- test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py | 2 +- test/ir/pir/cinn/sub_graphs/test_sub_graph_7.py | 4 ++-- test/ir/pir/cinn/sub_graphs/test_sub_graph_9.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_3.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_3.py index 23ad1a821c43c..3ace9a72f769b 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_3.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_3.py @@ -48,8 +48,8 @@ def forward( var_9 = var_8.reshape((0, 16, 49)) var_10 = paddle.tensor.linalg.transpose(var_1, perm=[0, 1, 3, 2]) var_11 = paddle.tensor.linalg.matmul(var_5, var_10) - var_12 = var_11.__mul__(0.25) - var_13 = var_12.__add__(var_9) + var_12 = var_11 * 0.25 + var_13 = var_12 + var_9 var_14 = paddle.nn.functional.activation.softmax(var_13) var_15 = paddle.tensor.linalg.matmul(var_14, var_2) var_16 = paddle.tensor.linalg.transpose(var_15, perm=[0, 2, 1, 3]) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py index d835639aea4ff..95b55b9bfe331 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_33.py @@ -57,7 +57,7 @@ def forward( var_4 = var_3.unsqueeze(2) var_5 = paddle.nn.functional.common.unfold(var_1, 7, 1, 3, 1) var_6 = var_5.reshape((10, 16, 16, 49, 14, 14)) - var_7 = var_4.__mul__(var_6) + var_7 = var_4 * var_6 var_8 = var_7.sum(axis=3) var_9 = var_8.reshape((10, 256, 14, 14)) return var_9 diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_7.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_7.py index 8341736299761..da5d0491b84b6 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_7.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_7.py @@ -51,8 +51,8 @@ def forward( var_12 = paddle.tensor.linalg.transpose(var_11, (1, 0)) var_13 = var_12.reshape((0, 49, 49)) var_14 = paddle.tensor.linalg.matmul(var_6, var_9) - var_15 = var_14.__mul__(0.25) - var_16 = var_15.__add__(var_13) + var_15 = var_14 * 0.25 + var_16 = var_15 + var_13 var_17 = paddle.nn.functional.activation.softmax(var_16) var_18 = paddle.tensor.linalg.matmul(var_17, var_8) var_19 = paddle.tensor.linalg.transpose(var_18, perm=[0, 2, 1, 3]) diff --git a/test/ir/pir/cinn/sub_graphs/test_sub_graph_9.py b/test/ir/pir/cinn/sub_graphs/test_sub_graph_9.py index 073175ced160b..abd5282e9d1b7 100644 --- a/test/ir/pir/cinn/sub_graphs/test_sub_graph_9.py +++ b/test/ir/pir/cinn/sub_graphs/test_sub_graph_9.py @@ -48,8 +48,8 @@ def forward( var_9 = var_8.reshape((0, 49, 196)) var_10 = paddle.tensor.linalg.transpose(var_1, perm=[0, 1, 3, 2]) var_11 = paddle.tensor.linalg.matmul(var_5, var_10) - var_12 = var_11.__mul__(0.25) - var_13 = var_12.__add__(var_9) + var_12 = var_11 * 0.25 + var_13 = var_12 + var_9 var_14 = paddle.nn.functional.activation.softmax(var_13) var_15 = paddle.tensor.linalg.matmul(var_14, var_2) var_16 = paddle.tensor.linalg.transpose(var_15, perm=[0, 2, 1, 3])