From 28ed5355f8a2d9504a4a9f7ad9238050f8eca1fc Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 19 Sep 2024 11:49:52 +0800 Subject: [PATCH 01/53] fix --- paconvert/api_mapping.json | 175 +++++++++++++++++++ tests/test_signal_windows_blackman.py | 77 ++++++++ tests/test_signal_windows_cosine.py | 77 ++++++++ tests/test_signal_windows_exponential.py | 85 +++++++++ tests/test_signal_windows_gaussian.py | 85 +++++++++ tests/test_signal_windows_general_cosine.py | 85 +++++++++ tests/test_signal_windows_general_hamming.py | 85 +++++++++ tests/test_signal_windows_hamming | 76 ++++++++ tests/test_signal_windows_hann.py | 76 ++++++++ 9 files changed, 821 insertions(+) create mode 100644 tests/test_signal_windows_blackman.py create mode 100644 tests/test_signal_windows_cosine.py create mode 100644 tests/test_signal_windows_exponential.py create mode 100644 tests/test_signal_windows_gaussian.py create mode 100644 tests/test_signal_windows_general_cosine.py create mode 100644 tests/test_signal_windows_general_hamming.py create mode 100644 tests/test_signal_windows_hamming create mode 100644 tests/test_signal_windows_hann.py diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 10232f952..8a621a203 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14560,6 +14560,181 @@ "input": "x" } }, + "torch.signal.windows.blackman":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.audio.functional._blackman", + "min_input_args": 1, + "args_list": [ + "M", + "*", + "sym", + "dtype", + "layout", + "device", + "requires_grad" + ], + "kwargs_change": { + "dtype": "dtype", + "device": "place" + }, + "paddle_default_kwargs": { + "dtype": "paddle.float64" + } + }, + "torch.signal.windows.cosine":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.audio.functional._cosine", + "min_input_args": 1, + "args_list": [ + "M", + "*", + "sym", + "dtype", + "layout", + "device", + "requires_grad" + ], + "kwargs_change": { + "dtype": "dtype", + "device": "place" + }, + "paddle_default_kwargs": { + "dtype": "paddle.float64" + } + }, + "torch.signal.windows.exponential":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.audio.functional._exponential", + "min_input_args": 1, + "args_list": [ + "M", + "*", + "center", + "tau", + "sym", + "dtype", + "layout", + "device", + "requires_grad" + ], + "kwargs_change": { + "dtype": "dtype", + "device": "place" + }, + "paddle_default_kwargs": { + "dtype": "paddle.float64" + } + }, + "torch.signal.windows.gaussian":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.audio.functional._gaussian", + "min_input_args": 1, + "args_list": [ + "M", + "*", + "std", + "sym", + "dtype", + "layout", + "device", + "requires_grad" + ], + "kwargs_change": { + "dtype": "dtype", + "device": "place" + }, + "paddle_default_kwargs": { + "std": 1.0, + "dtype": "paddle.float64" + } + }, + "torch.signal.windows.general_cosine":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.audio.functional._general_cosine", + "min_input_args": 1, + "args_list": [ + "M", + "*", + "a", + "sym", + "dtype", + "layout", + "device", + "requires_grad" + ], + "kwargs_change": { + "dtype": "dtype", + "device": "place" + }, + "paddle_default_kwargs": { + "dtype": "paddle.float64" + } + }, + "torch.signal.windows.general_hamming":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.audio.functional._general_hamming", + "min_input_args": 1, + "args_list": [ + "M", + "*", + "alpha", + "sym", + "dtype", + "layout", + "device", + "requires_grad" + ], + "kwargs_change": { + "dtype": "dtype", + "device": "place" + }, + "paddle_default_kwargs": { + "alpha": 0.54, + "dtype": "paddle.float64" + } + }, + "torch.signal.windows.hamming":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.audio.functional._hamming", + "min_input_args": 1, + "args_list": [ + "M", + "*", + "sym", + "dtype", + "layout", + "device", + "requires_grad" + ], + "kwargs_change": { + "dtype": "dtype", + "device": "place" + }, + "paddle_default_kwargs": { + "dtype": "paddle.float64" + } + }, + "torch.signal.windows.hann":{ + "Matcher": "GenericMatcher", + "paddle_api": "paddle.audio.functional._hann", + "min_input_args": 1, + "args_list": [ + "M", + "*", + "sym", + "dtype", + "layout", + "device", + "requires_grad" + ], + "kwargs_change": { + "dtype": "dtype", + "device": "place" + }, + "paddle_default_kwargs": { + "dtype": "paddle.float64" + } + }, "torch.sign": { "Matcher": "GenericMatcher", "paddle_api": "paddle.sign", diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py new file mode 100644 index 000000000..7fd239006 --- /dev/null +++ b/tests/test_signal_windows_blackman.py @@ -0,0 +1,77 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.signal.windows.blackman") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["out"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) \ No newline at end of file diff --git a/tests/test_signal_windows_cosine.py b/tests/test_signal_windows_cosine.py new file mode 100644 index 000000000..e520d6fc0 --- /dev/null +++ b/tests/test_signal_windows_cosine.py @@ -0,0 +1,77 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.signal.windows.cosine") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(10) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(10, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(10, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["out"]) + + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(10, sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) \ No newline at end of file diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py new file mode 100644 index 000000000..3285d1e06 --- /dev/null +++ b/tests/test_signal_windows_exponential.py @@ -0,0 +1,85 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.signal.windows.exponential") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, tau=.5, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["out"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) \ No newline at end of file diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py new file mode 100644 index 000000000..5a3c330e4 --- /dev/null +++ b/tests/test_signal_windows_gaussian.py @@ -0,0 +1,85 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.signal.windows.gaussian") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["out"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.9, sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py new file mode 100644 index 000000000..e579ae5a0 --- /dev/null +++ b/tests/test_signal_windows_general_cosine.py @@ -0,0 +1,85 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.signal.windows.general_cosine") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31]) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["out"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) \ No newline at end of file diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py new file mode 100644 index 000000000..8bcfb83a6 --- /dev/null +++ b/tests/test_signal_windows_general_hamming.py @@ -0,0 +1,85 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.signal.windows.general_hamming") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, alpha=0.5, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["out"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, alpha=0.5, sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) \ No newline at end of file diff --git a/tests/test_signal_windows_hamming b/tests/test_signal_windows_hamming new file mode 100644 index 000000000..d446915a7 --- /dev/null +++ b/tests/test_signal_windows_hamming @@ -0,0 +1,76 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.signal.windows.hamming") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10, sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["out"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) \ No newline at end of file diff --git a/tests/test_signal_windows_hann.py b/tests/test_signal_windows_hann.py new file mode 100644 index 000000000..5556d30da --- /dev/null +++ b/tests/test_signal_windows_hann.py @@ -0,0 +1,76 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import textwrap + +from apibase import APIBase + +obj = APIBase("torch.signal.windows.hann") + + +def test_case_1(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_2(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_3(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10, sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_4(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["out"]) + + +def test_case_5(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"]) \ No newline at end of file From cdc060b7f8d9b318fe02c931d972847a19adf52e Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 19 Sep 2024 11:59:58 +0800 Subject: [PATCH 02/53] code style --- paconvert/api_mapping.json | 42 +++++++++---------- tests/test_signal_windows_blackman.py | 4 +- tests/test_signal_windows_cosine.py | 4 +- tests/test_signal_windows_exponential.py | 6 ++- tests/test_signal_windows_gaussian.py | 4 ++ tests/test_signal_windows_general_cosine.py | 6 ++- tests/test_signal_windows_general_hamming.py | 6 ++- ...hamming => test_signal_windows_hamming.py} | 5 ++- tests/test_signal_windows_hann.py | 5 ++- 9 files changed, 54 insertions(+), 28 deletions(-) rename tests/{test_signal_windows_hamming => test_signal_windows_hamming.py} (98%) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 8a621a203..b3c5b4194 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14560,7 +14560,20 @@ "input": "x" } }, - "torch.signal.windows.blackman":{ + "torch.sign": { + "Matcher": "GenericMatcher", + "paddle_api": "paddle.sign", + "min_input_args": 1, + "args_list": [ + "input", + "*", + "out" + ], + "kwargs_change": { + "input": "x" + } + }, + "torch.signal.windows.blackman": { "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional._blackman", "min_input_args": 1, @@ -14581,7 +14594,7 @@ "dtype": "paddle.float64" } }, - "torch.signal.windows.cosine":{ + "torch.signal.windows.cosine": { "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional._cosine", "min_input_args": 1, @@ -14602,7 +14615,7 @@ "dtype": "paddle.float64" } }, - "torch.signal.windows.exponential":{ + "torch.signal.windows.exponential": { "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional._exponential", "min_input_args": 1, @@ -14625,7 +14638,7 @@ "dtype": "paddle.float64" } }, - "torch.signal.windows.gaussian":{ + "torch.signal.windows.gaussian": { "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional._gaussian", "min_input_args": 1, @@ -14648,7 +14661,7 @@ "dtype": "paddle.float64" } }, - "torch.signal.windows.general_cosine":{ + "torch.signal.windows.general_cosine": { "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional._general_cosine", "min_input_args": 1, @@ -14670,7 +14683,7 @@ "dtype": "paddle.float64" } }, - "torch.signal.windows.general_hamming":{ + "torch.signal.windows.general_hamming": { "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional._general_hamming", "min_input_args": 1, @@ -14693,7 +14706,7 @@ "dtype": "paddle.float64" } }, - "torch.signal.windows.hamming":{ + "torch.signal.windows.hamming": { "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional._hamming", "min_input_args": 1, @@ -14714,7 +14727,7 @@ "dtype": "paddle.float64" } }, - "torch.signal.windows.hann":{ + "torch.signal.windows.hann": { "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional._hann", "min_input_args": 1, @@ -14735,19 +14748,6 @@ "dtype": "paddle.float64" } }, - "torch.sign": { - "Matcher": "GenericMatcher", - "paddle_api": "paddle.sign", - "min_input_args": 1, - "args_list": [ - "input", - "*", - "out" - ], - "kwargs_change": { - "input": "x" - } - }, "torch.signbit": { "Matcher": "GenericMatcher", "paddle_api": "paddle.signbit", diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index 7fd239006..3f42176d3 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -58,6 +58,7 @@ def test_case_4(): ) obj.run(pytorch_code, ["result"]) + def test_case_5(): pytorch_code = textwrap.dedent( """ @@ -67,6 +68,7 @@ def test_case_5(): ) obj.run(pytorch_code, ["result"]) + def test_case_7(): pytorch_code = textwrap.dedent( """ @@ -74,4 +76,4 @@ def test_case_7(): result = torch.signal.windows.blackman(5, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) \ No newline at end of file + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_cosine.py b/tests/test_signal_windows_cosine.py index e520d6fc0..f9470c4b2 100644 --- a/tests/test_signal_windows_cosine.py +++ b/tests/test_signal_windows_cosine.py @@ -58,6 +58,7 @@ def test_case_4(): ) obj.run(pytorch_code, ["result"]) + def test_case_5(): pytorch_code = textwrap.dedent( """ @@ -67,6 +68,7 @@ def test_case_5(): ) obj.run(pytorch_code, ["result"]) + def test_case_7(): pytorch_code = textwrap.dedent( """ @@ -74,4 +76,4 @@ def test_case_7(): result = torch.signal.windows.cosine(10, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) \ No newline at end of file + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index 3285d1e06..d14a7e3ec 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -38,6 +38,7 @@ def test_case_2(): ) obj.run(pytorch_code, ["result"]) + def test_case_3(): pytorch_code = textwrap.dedent( """ @@ -47,6 +48,7 @@ def test_case_3(): ) obj.run(pytorch_code, ["result"]) + def test_case_4(): pytorch_code = textwrap.dedent( """ @@ -66,6 +68,7 @@ def test_case_5(): ) obj.run(pytorch_code, ["result"]) + def test_case_6(): pytorch_code = textwrap.dedent( """ @@ -75,6 +78,7 @@ def test_case_6(): ) obj.run(pytorch_code, ["result"]) + def test_case_7(): pytorch_code = textwrap.dedent( """ @@ -82,4 +86,4 @@ def test_case_7(): result = torch.signal.windows.exponential(10, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) \ No newline at end of file + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index 5a3c330e4..79582c77b 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -38,6 +38,7 @@ def test_case_2(): ) obj.run(pytorch_code, ["result"]) + def test_case_3(): pytorch_code = textwrap.dedent( """ @@ -47,6 +48,7 @@ def test_case_3(): ) obj.run(pytorch_code, ["result"]) + def test_case_4(): pytorch_code = textwrap.dedent( """ @@ -66,6 +68,7 @@ def test_case_5(): ) obj.run(pytorch_code, ["result"]) + def test_case_6(): pytorch_code = textwrap.dedent( """ @@ -75,6 +78,7 @@ def test_case_6(): ) obj.run(pytorch_code, ["result"]) + def test_case_7(): pytorch_code = textwrap.dedent( """ diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index e579ae5a0..2351251f0 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -38,6 +38,7 @@ def test_case_2(): ) obj.run(pytorch_code, ["result"]) + def test_case_3(): pytorch_code = textwrap.dedent( """ @@ -47,6 +48,7 @@ def test_case_3(): ) obj.run(pytorch_code, ["result"]) + def test_case_4(): pytorch_code = textwrap.dedent( """ @@ -66,6 +68,7 @@ def test_case_5(): ) obj.run(pytorch_code, ["result"]) + def test_case_6(): pytorch_code = textwrap.dedent( """ @@ -75,6 +78,7 @@ def test_case_6(): ) obj.run(pytorch_code, ["result"]) + def test_case_7(): pytorch_code = textwrap.dedent( """ @@ -82,4 +86,4 @@ def test_case_7(): result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) \ No newline at end of file + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py index 8bcfb83a6..6f897bbb3 100644 --- a/tests/test_signal_windows_general_hamming.py +++ b/tests/test_signal_windows_general_hamming.py @@ -38,6 +38,7 @@ def test_case_2(): ) obj.run(pytorch_code, ["result"]) + def test_case_3(): pytorch_code = textwrap.dedent( """ @@ -47,6 +48,7 @@ def test_case_3(): ) obj.run(pytorch_code, ["result"]) + def test_case_4(): pytorch_code = textwrap.dedent( """ @@ -66,6 +68,7 @@ def test_case_5(): ) obj.run(pytorch_code, ["result"]) + def test_case_6(): pytorch_code = textwrap.dedent( """ @@ -75,6 +78,7 @@ def test_case_6(): ) obj.run(pytorch_code, ["result"]) + def test_case_7(): pytorch_code = textwrap.dedent( """ @@ -82,4 +86,4 @@ def test_case_7(): result = torch.signal.windows.general_hamming(10, alpha=0.5, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) \ No newline at end of file + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_hamming b/tests/test_signal_windows_hamming.py similarity index 98% rename from tests/test_signal_windows_hamming rename to tests/test_signal_windows_hamming.py index d446915a7..64e9a5989 100644 --- a/tests/test_signal_windows_hamming +++ b/tests/test_signal_windows_hamming.py @@ -38,6 +38,7 @@ def test_case_2(): ) obj.run(pytorch_code, ["result"]) + def test_case_3(): pytorch_code = textwrap.dedent( """ @@ -47,6 +48,7 @@ def test_case_3(): ) obj.run(pytorch_code, ["result"]) + def test_case_4(): pytorch_code = textwrap.dedent( """ @@ -66,6 +68,7 @@ def test_case_5(): ) obj.run(pytorch_code, ["result"]) + def test_case_6(): pytorch_code = textwrap.dedent( """ @@ -73,4 +76,4 @@ def test_case_6(): result = torch.signal.windows.hamming(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) \ No newline at end of file + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_hann.py b/tests/test_signal_windows_hann.py index 5556d30da..a783ea2d8 100644 --- a/tests/test_signal_windows_hann.py +++ b/tests/test_signal_windows_hann.py @@ -38,6 +38,7 @@ def test_case_2(): ) obj.run(pytorch_code, ["result"]) + def test_case_3(): pytorch_code = textwrap.dedent( """ @@ -47,6 +48,7 @@ def test_case_3(): ) obj.run(pytorch_code, ["result"]) + def test_case_4(): pytorch_code = textwrap.dedent( """ @@ -66,6 +68,7 @@ def test_case_5(): ) obj.run(pytorch_code, ["result"]) + def test_case_6(): pytorch_code = textwrap.dedent( """ @@ -73,4 +76,4 @@ def test_case_6(): result = torch.signal.windows.hann(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) \ No newline at end of file + obj.run(pytorch_code, ["result"]) From 131dac1f1e91431fd6a4b8e6e64db8079e55f00b Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 19 Sep 2024 16:46:45 +0800 Subject: [PATCH 03/53] fix --- tests/test_signal_windows_blackman.py | 2 +- tests/test_signal_windows_cosine.py | 2 +- tests/test_signal_windows_exponential.py | 2 +- tests/test_signal_windows_gaussian.py | 2 +- tests/test_signal_windows_general_cosine.py | 2 +- tests/test_signal_windows_general_hamming.py | 2 +- tests/test_signal_windows_hamming.py | 2 +- tests/test_signal_windows_hann.py | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index 3f42176d3..f7139c435 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -46,7 +46,7 @@ def test_case_3(): result = torch.signal.windows.blackman(5, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["out"]) + obj.run(pytorch_code, ["result"]) def test_case_4(): diff --git a/tests/test_signal_windows_cosine.py b/tests/test_signal_windows_cosine.py index f9470c4b2..5ef817591 100644 --- a/tests/test_signal_windows_cosine.py +++ b/tests/test_signal_windows_cosine.py @@ -46,7 +46,7 @@ def test_case_3(): result = torch.signal.windows.cosine(10, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["out"]) + obj.run(pytorch_code, ["result"]) def test_case_4(): diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index d14a7e3ec..63fbccc94 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.exponential(10, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["out"]) + obj.run(pytorch_code, ["result"]) def test_case_5(): diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index 79582c77b..de8b7dd6a 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["out"]) + obj.run(pytorch_code, ["result"]) def test_case_5(): diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index 2351251f0..5fa44e3c3 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["out"]) + obj.run(pytorch_code, ["result"]) def test_case_5(): diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py index 6f897bbb3..1adeb8925 100644 --- a/tests/test_signal_windows_general_hamming.py +++ b/tests/test_signal_windows_general_hamming.py @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.general_hamming(10, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["out"]) + obj.run(pytorch_code, ["result"]) def test_case_5(): diff --git a/tests/test_signal_windows_hamming.py b/tests/test_signal_windows_hamming.py index 64e9a5989..be2218e2e 100644 --- a/tests/test_signal_windows_hamming.py +++ b/tests/test_signal_windows_hamming.py @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.hamming(10, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["out"]) + obj.run(pytorch_code, ["result"]) def test_case_5(): diff --git a/tests/test_signal_windows_hann.py b/tests/test_signal_windows_hann.py index a783ea2d8..45ec5cb5f 100644 --- a/tests/test_signal_windows_hann.py +++ b/tests/test_signal_windows_hann.py @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.hann(10, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["out"]) + obj.run(pytorch_code, ["result"]) def test_case_5(): From fdd7ae6b792d8aa3538e4a25199dfc0215c189e1 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 19 Sep 2024 17:19:39 +0800 Subject: [PATCH 04/53] fix --- paconvert/api_mapping.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index b3c5b4194..4ec6df19a 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14575,7 +14575,7 @@ }, "torch.signal.windows.blackman": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional._blackman", + "paddle_api": "paddle.audio.functional.window._blackman", "min_input_args": 1, "args_list": [ "M", @@ -14596,7 +14596,7 @@ }, "torch.signal.windows.cosine": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional._cosine", + "paddle_api": "paddle.audio.functional.window._cosine", "min_input_args": 1, "args_list": [ "M", @@ -14617,7 +14617,7 @@ }, "torch.signal.windows.exponential": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional._exponential", + "paddle_api": "paddle.audio.functional.window._exponential", "min_input_args": 1, "args_list": [ "M", @@ -14640,7 +14640,7 @@ }, "torch.signal.windows.gaussian": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional._gaussian", + "paddle_api": "paddle.audio.functional.window._gaussian", "min_input_args": 1, "args_list": [ "M", @@ -14663,7 +14663,7 @@ }, "torch.signal.windows.general_cosine": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional._general_cosine", + "paddle_api": "paddle.audio.functional.window._general_cosine", "min_input_args": 1, "args_list": [ "M", @@ -14685,7 +14685,7 @@ }, "torch.signal.windows.general_hamming": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional._general_hamming", + "paddle_api": "paddle.audio.functional.window._general_hamming", "min_input_args": 1, "args_list": [ "M", @@ -14708,7 +14708,7 @@ }, "torch.signal.windows.hamming": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional._hamming", + "paddle_api": "paddle.audio.functional.window._hamming", "min_input_args": 1, "args_list": [ "M", @@ -14729,7 +14729,7 @@ }, "torch.signal.windows.hann": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional._hann", + "paddle_api": "paddle.audio.functional.window._hann", "min_input_args": 1, "args_list": [ "M", From 9eeb83d58950a10f21b2ee758660c1ad803259a2 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 20 Sep 2024 11:15:07 +0800 Subject: [PATCH 05/53] fix --- .vscode/settings.json | 5 +++++ tests/test_signal_windows_blackman.py | 14 +++++++------- tests/test_signal_windows_cosine.py | 12 ++++++------ tests/test_signal_windows_exponential.py | 14 +++++++------- tests/test_signal_windows_gaussian.py | 14 +++++++------- tests/test_signal_windows_general_cosine.py | 2 +- tests/test_signal_windows_general_hamming.py | 14 +++++++------- tests/test_signal_windows_hamming.py | 12 ++++++------ tests/test_signal_windows_hann.py | 12 ++++++------ 9 files changed, 52 insertions(+), 47 deletions(-) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..741ce489a --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "cSpell.words": [ + "blackman" + ] +} diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index f7139c435..616f1f0d0 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.blackman(5) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) def test_case_2(): @@ -36,7 +36,7 @@ def test_case_2(): result = torch.signal.windows.blackman(5, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_3(): @@ -46,7 +46,7 @@ def test_case_3(): result = torch.signal.windows.blackman(5, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_4(): @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.blackman(5, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_5(): @@ -66,14 +66,14 @@ def test_case_5(): result = torch.signal.windows.blackman(5, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) -def test_case_7(): +def test_case_6(): pytorch_code = textwrap.dedent( """ import torch result = torch.signal.windows.blackman(5, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_cosine.py b/tests/test_signal_windows_cosine.py index 5ef817591..4479daeb2 100644 --- a/tests/test_signal_windows_cosine.py +++ b/tests/test_signal_windows_cosine.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.cosine(10) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) def test_case_2(): @@ -36,7 +36,7 @@ def test_case_2(): result = torch.signal.windows.cosine(10, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_3(): @@ -46,7 +46,7 @@ def test_case_3(): result = torch.signal.windows.cosine(10, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_4(): @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.cosine(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_5(): @@ -66,7 +66,7 @@ def test_case_5(): result = torch.signal.windows.cosine(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_7(): @@ -76,4 +76,4 @@ def test_case_7(): result = torch.signal.windows.cosine(10, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index 63fbccc94..6df175233 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.exponential(10) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) def test_case_2(): @@ -36,7 +36,7 @@ def test_case_2(): result = torch.signal.windows.exponential(10, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_3(): @@ -46,7 +46,7 @@ def test_case_3(): result = torch.signal.windows.exponential(10, tau=.5, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_4(): @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.exponential(10, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_5(): @@ -66,7 +66,7 @@ def test_case_5(): result = torch.signal.windows.exponential(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): @@ -76,7 +76,7 @@ def test_case_6(): result = torch.signal.windows.exponential(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_7(): @@ -86,4 +86,4 @@ def test_case_7(): result = torch.signal.windows.exponential(10, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index de8b7dd6a..c5ce421c1 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.gaussian(10) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) def test_case_2(): @@ -36,7 +36,7 @@ def test_case_2(): result = torch.signal.windows.gaussian(10, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_3(): @@ -46,7 +46,7 @@ def test_case_3(): result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_4(): @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_5(): @@ -66,7 +66,7 @@ def test_case_5(): result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): @@ -76,7 +76,7 @@ def test_case_6(): result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_7(): @@ -86,4 +86,4 @@ def test_case_7(): result = torch.signal.windows.gaussian(10, std=0.9, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index 5fa44e3c3..ba42fbb21 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31]) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) def test_case_2(): diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py index 1adeb8925..88d5eeeaf 100644 --- a/tests/test_signal_windows_general_hamming.py +++ b/tests/test_signal_windows_general_hamming.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.general_hamming(10) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) def test_case_2(): @@ -36,7 +36,7 @@ def test_case_2(): result = torch.signal.windows.general_hamming(10, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_3(): @@ -46,7 +46,7 @@ def test_case_3(): result = torch.signal.windows.general_hamming(10, alpha=0.5, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_4(): @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.general_hamming(10, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_5(): @@ -66,7 +66,7 @@ def test_case_5(): result = torch.signal.windows.general_hamming(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): @@ -76,7 +76,7 @@ def test_case_6(): result = torch.signal.windows.general_hamming(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_7(): @@ -86,4 +86,4 @@ def test_case_7(): result = torch.signal.windows.general_hamming(10, alpha=0.5, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_hamming.py b/tests/test_signal_windows_hamming.py index be2218e2e..d3ff10417 100644 --- a/tests/test_signal_windows_hamming.py +++ b/tests/test_signal_windows_hamming.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.hamming(10) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) def test_case_2(): @@ -36,7 +36,7 @@ def test_case_2(): result = torch.signal.windows.hamming(10, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_3(): @@ -46,7 +46,7 @@ def test_case_3(): result = torch.signal.windows.hamming(10, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_4(): @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.hamming(10, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_5(): @@ -66,7 +66,7 @@ def test_case_5(): result = torch.signal.windows.hamming(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): @@ -76,4 +76,4 @@ def test_case_6(): result = torch.signal.windows.hamming(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_hann.py b/tests/test_signal_windows_hann.py index 45ec5cb5f..e803b5bc6 100644 --- a/tests/test_signal_windows_hann.py +++ b/tests/test_signal_windows_hann.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.hann(10) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) def test_case_2(): @@ -36,7 +36,7 @@ def test_case_2(): result = torch.signal.windows.hann(10, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_3(): @@ -46,7 +46,7 @@ def test_case_3(): result = torch.signal.windows.hann(10, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_4(): @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.hann(10, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_5(): @@ -66,7 +66,7 @@ def test_case_5(): result = torch.signal.windows.hann(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): @@ -76,4 +76,4 @@ def test_case_6(): result = torch.signal.windows.hann(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) From 73e092945ba14348f9b7c0f9dc9bc4ae909e48ff Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 24 Sep 2024 09:00:38 +0800 Subject: [PATCH 06/53] fix --- .vscode/settings.json | 5 ----- paconvert/api_mapping.json | 24 ++++++++---------------- 2 files changed, 8 insertions(+), 21 deletions(-) delete mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 741ce489a..000000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "cSpell.words": [ - "blackman" - ] -} diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 4ec6df19a..057ef23db 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14587,8 +14587,7 @@ "requires_grad" ], "kwargs_change": { - "dtype": "dtype", - "device": "place" + "dtype": "dtype" }, "paddle_default_kwargs": { "dtype": "paddle.float64" @@ -14608,8 +14607,7 @@ "requires_grad" ], "kwargs_change": { - "dtype": "dtype", - "device": "place" + "dtype": "dtype" }, "paddle_default_kwargs": { "dtype": "paddle.float64" @@ -14631,8 +14629,7 @@ "requires_grad" ], "kwargs_change": { - "dtype": "dtype", - "device": "place" + "dtype": "dtype" }, "paddle_default_kwargs": { "dtype": "paddle.float64" @@ -14653,8 +14650,7 @@ "requires_grad" ], "kwargs_change": { - "dtype": "dtype", - "device": "place" + "dtype": "dtype" }, "paddle_default_kwargs": { "std": 1.0, @@ -14676,8 +14672,7 @@ "requires_grad" ], "kwargs_change": { - "dtype": "dtype", - "device": "place" + "dtype": "dtype" }, "paddle_default_kwargs": { "dtype": "paddle.float64" @@ -14698,8 +14693,7 @@ "requires_grad" ], "kwargs_change": { - "dtype": "dtype", - "device": "place" + "dtype": "dtype" }, "paddle_default_kwargs": { "alpha": 0.54, @@ -14720,8 +14714,7 @@ "requires_grad" ], "kwargs_change": { - "dtype": "dtype", - "device": "place" + "dtype": "dtype" }, "paddle_default_kwargs": { "dtype": "paddle.float64" @@ -14741,8 +14734,7 @@ "requires_grad" ], "kwargs_change": { - "dtype": "dtype", - "device": "place" + "dtype": "dtype" }, "paddle_default_kwargs": { "dtype": "paddle.float64" From 9e9cab5c0a15b847938cd25d7132e9c304dad264 Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 24 Sep 2024 14:30:41 +0800 Subject: [PATCH 07/53] CI From ce46bff97d19b0a09426be8261045bf117c91486 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 25 Sep 2024 16:27:07 +0800 Subject: [PATCH 08/53] fix --- paconvert/api_mapping.json | 70 ++++++++++++++++++++++++++------------ paconvert/api_matcher.py | 27 +++++++++++++++ 2 files changed, 75 insertions(+), 22 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 7c6e05030..a8ac1d40f 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14573,7 +14573,7 @@ }, "torch.signal.windows.blackman": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional.window._blackman", + "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ "M", @@ -14585,15 +14585,18 @@ "requires_grad" ], "kwargs_change": { + "M": "win_length", + "sym": "fftbins", "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64" + "dtype": "paddle.float64", + "window": "blackman" } }, "torch.signal.windows.cosine": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional.window._cosine", + "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ "M", @@ -14605,15 +14608,18 @@ "requires_grad" ], "kwargs_change": { + "M": "win_length", + "sym": "fftbins", "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64" + "dtype": "paddle.float64", + "window": "cosine" } }, "torch.signal.windows.exponential": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional.window._exponential", + "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ "M", @@ -14626,16 +14632,23 @@ "device", "requires_grad" ], + "unsupport_args": [ + "center", + "tau" + ], "kwargs_change": { + "M": "win_length", + "sym": "fftbins", "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64" + "dtype": "paddle.float64", + "window": "exponential" } }, "torch.signal.windows.gaussian": { - "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional.window._gaussian", + "Matcher": "GaussianMatcher", + "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ "M", @@ -14648,17 +14661,19 @@ "requires_grad" ], "kwargs_change": { + "M": "win_length", + "sym": "fftbins", "dtype": "dtype" }, "paddle_default_kwargs": { - "std": 1.0, - "dtype": "paddle.float64" + "dtype": "paddle.float64", + "window": "gaussian" } }, "torch.signal.windows.general_cosine": { - "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional.window._general_cosine", - "min_input_args": 1, + "Matcher": "GaussianMatcher", + "paddle_api": "paddle.audio.functional.get_window", + "min_input_args": 2, "args_list": [ "M", "*", @@ -14670,15 +14685,18 @@ "requires_grad" ], "kwargs_change": { + "M": "win_length", + "sym": "fftbins", "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64" + "dtype": "paddle.float64", + "window": "general_cosine" } }, "torch.signal.windows.general_hamming": { - "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional.window._general_hamming", + "Matcher": "GaussianMatcher", + "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ "M", @@ -14691,16 +14709,18 @@ "requires_grad" ], "kwargs_change": { + "M": "win_length", + "sym": "fftbins", "dtype": "dtype" }, "paddle_default_kwargs": { - "alpha": 0.54, - "dtype": "paddle.float64" + "dtype": "paddle.float64", + "window": "general_hamming" } }, "torch.signal.windows.hamming": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional.window._hamming", + "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ "M", @@ -14712,15 +14732,18 @@ "requires_grad" ], "kwargs_change": { + "M": "win_length", + "sym": "fftbins", "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64" + "dtype": "paddle.float64", + "window": "hamming" } }, "torch.signal.windows.hann": { "Matcher": "GenericMatcher", - "paddle_api": "paddle.audio.functional.window._hann", + "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ "M", @@ -14732,10 +14755,13 @@ "requires_grad" ], "kwargs_change": { + "M": "win_length", + "sym": "fftbins", "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64" + "dtype": "paddle.float64", + "window": "hann" } }, "torch.signbit": { diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index fa0dfb2be..25980840e 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -519,6 +519,33 @@ def generate_code(self, kwargs): return super().generate_code(kwargs) +class GaussianMatcher(BaseMatcher): + def generate_code(self, kwargs): + kwargs_window = self.set_paddle_default_kwargs(kwargs) + kwargs_change = self.api_mapping.get("kwargs_change", {}) + for k in kwargs_change: + if k in kwargs: + kwargs[kwargs_change[k]] = kwargs.pop(k) + + if "std" in kwargs: + code = "paddle.audio.functional.get_window(({},{}),{})".format( + kwargs_window.pop("window"), kwargs.pop("std"), kwargs.pop("win_length") + ) + + if "a" in kwargs: + code = "paddle.audio.functional.get_window(({},{}),{})".format( + kwargs_window.pop("window"), kwargs.pop("a"), kwargs.pop("win_length") + ) + if "alpha" in kwargs: + code = "paddle.audio.functional.get_window(({},{}),{})".format( + kwargs_window.pop("window"), + kwargs.pop("alpha"), + kwargs.pop("win_length"), + ) + + return code + + class Num2TensorBinaryWithAlphaMatcher(BaseMatcher): def generate_code(self, kwargs): kwargs_change = self.api_mapping.get("kwargs_change", {}) From 70cfd551076230a99557de3a8284d90a819332b7 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 25 Sep 2024 16:40:45 +0800 Subject: [PATCH 09/53] fix --- paconvert/api_mapping.json | 16 ++++++++-------- paconvert/api_matcher.py | 7 +++++-- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index a8ac1d40f..5b1381b24 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14572,7 +14572,7 @@ } }, "torch.signal.windows.blackman": { - "Matcher": "GenericMatcher", + "Matcher": "WindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14595,7 +14595,7 @@ } }, "torch.signal.windows.cosine": { - "Matcher": "GenericMatcher", + "Matcher": "WindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14618,7 +14618,7 @@ } }, "torch.signal.windows.exponential": { - "Matcher": "GenericMatcher", + "Matcher": "WindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14647,7 +14647,7 @@ } }, "torch.signal.windows.gaussian": { - "Matcher": "GaussianMatcher", + "Matcher": "WindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14671,7 +14671,7 @@ } }, "torch.signal.windows.general_cosine": { - "Matcher": "GaussianMatcher", + "Matcher": "WindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 2, "args_list": [ @@ -14695,7 +14695,7 @@ } }, "torch.signal.windows.general_hamming": { - "Matcher": "GaussianMatcher", + "Matcher": "WindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14719,7 +14719,7 @@ } }, "torch.signal.windows.hamming": { - "Matcher": "GenericMatcher", + "Matcher": "WindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14742,7 +14742,7 @@ } }, "torch.signal.windows.hann": { - "Matcher": "GenericMatcher", + "Matcher": "WindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 25980840e..846b6612d 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -519,7 +519,7 @@ def generate_code(self, kwargs): return super().generate_code(kwargs) -class GaussianMatcher(BaseMatcher): +class WindowsMatcher(BaseMatcher): def generate_code(self, kwargs): kwargs_window = self.set_paddle_default_kwargs(kwargs) kwargs_change = self.api_mapping.get("kwargs_change", {}) @@ -542,7 +542,10 @@ def generate_code(self, kwargs): kwargs.pop("alpha"), kwargs.pop("win_length"), ) - + else: + code = "paddle.audio.functional.get_window(({}),{})".format( + kwargs_window.pop("window"), kwargs.pop("win_length") + ) return code From 206980a3407eff412163192e34dc20b65539a008 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 25 Sep 2024 16:53:26 +0800 Subject: [PATCH 10/53] fix --- paconvert/api_matcher.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 846b6612d..94a0e0674 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -521,30 +521,40 @@ def generate_code(self, kwargs): class WindowsMatcher(BaseMatcher): def generate_code(self, kwargs): - kwargs_window = self.set_paddle_default_kwargs(kwargs) + default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) + for k in default_kwargs: + if k not in kwargs: + kwargs[k] = default_kwargs[k] + kwargs_change = self.api_mapping.get("kwargs_change", {}) for k in kwargs_change: if k in kwargs: kwargs[kwargs_change[k]] = kwargs.pop(k) if "std" in kwargs: - code = "paddle.audio.functional.get_window(({},{}),{})".format( - kwargs_window.pop("window"), kwargs.pop("std"), kwargs.pop("win_length") + code = "{}(({},{}),{})".format( + self.get_paddle_api(), + kwargs.pop("window"), + kwargs.pop("std"), + kwargs.pop("win_length"), ) - if "a" in kwargs: - code = "paddle.audio.functional.get_window(({},{}),{})".format( - kwargs_window.pop("window"), kwargs.pop("a"), kwargs.pop("win_length") + code = "{}(({},{}),{})".format( + self.get_paddle_api(), + kwargs.pop("window"), + kwargs.pop("a"), + kwargs.pop("win_length"), ) if "alpha" in kwargs: - code = "paddle.audio.functional.get_window(({},{}),{})".format( - kwargs_window.pop("window"), + code = "{}(({},{}),{})".format( + self.get_paddle_api(), + kwargs.pop("window"), kwargs.pop("alpha"), kwargs.pop("win_length"), ) else: - code = "paddle.audio.functional.get_window(({}),{})".format( - kwargs_window.pop("window"), kwargs.pop("win_length") + code = "{}({},{})".format( + self.get_paddle_api(), kwargs.pop("window"), kwargs.pop("win_length") ) return code From 7c8b49e181ef6f389699d5ed46c5c7194ebedaba Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 25 Sep 2024 17:27:08 +0800 Subject: [PATCH 11/53] fix --- paconvert/api_matcher.py | 64 ++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 36 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 94a0e0674..441b1d964 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -520,43 +520,35 @@ def generate_code(self, kwargs): class WindowsMatcher(BaseMatcher): - def generate_code(self, kwargs): - default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) - for k in default_kwargs: - if k not in kwargs: - kwargs[k] = default_kwargs[k] - - kwargs_change = self.api_mapping.get("kwargs_change", {}) - for k in kwargs_change: - if k in kwargs: - kwargs[kwargs_change[k]] = kwargs.pop(k) + def generate_aux_code(self): + CODE_TEMPLATE = textwrap.dedent( + """ + def add(self, *args, **kwargs): + if 'M' in kwargs: + win_length = kwargs['M'] + elif 'win_length' in kwargs: + win_length = kwargs['win_length'] + else: + win_length = args[1] + + if 'std' in kwargs: + std = kwargs['std'] + return paddle.audio.functional.get_window((args[0],std),win_length) + elif 'a' in kwargs: + a = kwargs['a'] + return paddle.audio.functional.get_window((args[0],a),win_length) + elif 'alpha' in kwargs: + alpha = kwargs['alpha'] + return paddle.audio.functional.get_window((args[0],alpha),win_length) + else: + return paddle.audio.functional.get_window(args[0], win_length) + """ + ) + return CODE_TEMPLATE - if "std" in kwargs: - code = "{}(({},{}),{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("std"), - kwargs.pop("win_length"), - ) - if "a" in kwargs: - code = "{}(({},{}),{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("a"), - kwargs.pop("win_length"), - ) - if "alpha" in kwargs: - code = "{}(({},{}),{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("alpha"), - kwargs.pop("win_length"), - ) - else: - code = "{}({},{})".format( - self.get_paddle_api(), kwargs.pop("window"), kwargs.pop("win_length") - ) - return code + def generate_code(self, kwargs): + self.write_aux_code() + return "unchange" class Num2TensorBinaryWithAlphaMatcher(BaseMatcher): From 6a892d0a2c68da792f44d9730f7a80bb6aaf88b0 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 25 Sep 2024 17:29:16 +0800 Subject: [PATCH 12/53] fix --- paconvert/api_matcher.py | 64 ++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 28 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 441b1d964..94a0e0674 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -520,35 +520,43 @@ def generate_code(self, kwargs): class WindowsMatcher(BaseMatcher): - def generate_aux_code(self): - CODE_TEMPLATE = textwrap.dedent( - """ - def add(self, *args, **kwargs): - if 'M' in kwargs: - win_length = kwargs['M'] - elif 'win_length' in kwargs: - win_length = kwargs['win_length'] - else: - win_length = args[1] - - if 'std' in kwargs: - std = kwargs['std'] - return paddle.audio.functional.get_window((args[0],std),win_length) - elif 'a' in kwargs: - a = kwargs['a'] - return paddle.audio.functional.get_window((args[0],a),win_length) - elif 'alpha' in kwargs: - alpha = kwargs['alpha'] - return paddle.audio.functional.get_window((args[0],alpha),win_length) - else: - return paddle.audio.functional.get_window(args[0], win_length) - """ - ) - return CODE_TEMPLATE - def generate_code(self, kwargs): - self.write_aux_code() - return "unchange" + default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) + for k in default_kwargs: + if k not in kwargs: + kwargs[k] = default_kwargs[k] + + kwargs_change = self.api_mapping.get("kwargs_change", {}) + for k in kwargs_change: + if k in kwargs: + kwargs[kwargs_change[k]] = kwargs.pop(k) + + if "std" in kwargs: + code = "{}(({},{}),{})".format( + self.get_paddle_api(), + kwargs.pop("window"), + kwargs.pop("std"), + kwargs.pop("win_length"), + ) + if "a" in kwargs: + code = "{}(({},{}),{})".format( + self.get_paddle_api(), + kwargs.pop("window"), + kwargs.pop("a"), + kwargs.pop("win_length"), + ) + if "alpha" in kwargs: + code = "{}(({},{}),{})".format( + self.get_paddle_api(), + kwargs.pop("window"), + kwargs.pop("alpha"), + kwargs.pop("win_length"), + ) + else: + code = "{}({},{})".format( + self.get_paddle_api(), kwargs.pop("window"), kwargs.pop("win_length") + ) + return code class Num2TensorBinaryWithAlphaMatcher(BaseMatcher): From f155669549c9eaca12a2e3bc4cb3189b8252bd82 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 11:28:25 +0800 Subject: [PATCH 13/53] fix --- paconvert/api_mapping.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 5b1381b24..facc7199c 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14591,7 +14591,7 @@ }, "paddle_default_kwargs": { "dtype": "paddle.float64", - "window": "blackman" + "window": "'blackman'" } }, "torch.signal.windows.cosine": { @@ -14614,7 +14614,7 @@ }, "paddle_default_kwargs": { "dtype": "paddle.float64", - "window": "cosine" + "window": "'cosine'" } }, "torch.signal.windows.exponential": { @@ -14643,7 +14643,7 @@ }, "paddle_default_kwargs": { "dtype": "paddle.float64", - "window": "exponential" + "window": "'exponential'" } }, "torch.signal.windows.gaussian": { @@ -14667,7 +14667,7 @@ }, "paddle_default_kwargs": { "dtype": "paddle.float64", - "window": "gaussian" + "window": "'gaussian'" } }, "torch.signal.windows.general_cosine": { @@ -14691,7 +14691,7 @@ }, "paddle_default_kwargs": { "dtype": "paddle.float64", - "window": "general_cosine" + "window": "'general_cosine'" } }, "torch.signal.windows.general_hamming": { @@ -14715,7 +14715,7 @@ }, "paddle_default_kwargs": { "dtype": "paddle.float64", - "window": "general_hamming" + "window": "'general_hamming'" } }, "torch.signal.windows.hamming": { @@ -14738,7 +14738,7 @@ }, "paddle_default_kwargs": { "dtype": "paddle.float64", - "window": "hamming" + "window": "'hamming'" } }, "torch.signal.windows.hann": { @@ -14761,7 +14761,7 @@ }, "paddle_default_kwargs": { "dtype": "paddle.float64", - "window": "hann" + "window": "'hann'" } }, "torch.signbit": { From 7c8108fd5bed8b8623d68bdca379640f0ea9da37 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 12:14:09 +0800 Subject: [PATCH 14/53] test blackman --- paconvert/api_mapping.json | 2 +- paconvert/api_matcher.py | 77 +++++++++++++++++++++++++------------- 2 files changed, 52 insertions(+), 27 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index facc7199c..9510200d9 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14572,7 +14572,7 @@ } }, "torch.signal.windows.blackman": { - "Matcher": "WindowsMatcher", + "Matcher": "BlackmanWindowMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 94a0e0674..c5440a8d4 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -519,6 +519,21 @@ def generate_code(self, kwargs): return super().generate_code(kwargs) +class BlackmanWindowMatcher(BaseMatcher): + def generate_code(self, kwargs): + default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) + for k in default_kwargs: + if k not in kwargs: + kwargs[k] = default_kwargs[k] + + kwargs_change = self.api_mapping.get("kwargs_change", {}) + for k in kwargs_change: + if k in kwargs: + kwargs[kwargs_change[k]] = kwargs.pop(k) + + return GenericMatcher.generate_code(self, kwargs) + + class WindowsMatcher(BaseMatcher): def generate_code(self, kwargs): default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) @@ -531,32 +546,42 @@ def generate_code(self, kwargs): if k in kwargs: kwargs[kwargs_change[k]] = kwargs.pop(k) - if "std" in kwargs: - code = "{}(({},{}),{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("std"), - kwargs.pop("win_length"), - ) - if "a" in kwargs: - code = "{}(({},{}),{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("a"), - kwargs.pop("win_length"), - ) - if "alpha" in kwargs: - code = "{}(({},{}),{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("alpha"), - kwargs.pop("win_length"), - ) - else: - code = "{}({},{})".format( - self.get_paddle_api(), kwargs.pop("window"), kwargs.pop("win_length") - ) - return code + if "std" in kwargs: + code = "{}(({},{}),{})".format( + self.get_paddle_api(), + kwargs.pop("window"), + kwargs.pop("std"), + kwargs.pop("win_length"), + ) + if "a" in kwargs: + code = "{}(({},{}),{})".format( + self.get_paddle_api(), + kwargs.pop("window"), + kwargs.pop("a"), + kwargs.pop("win_length"), + ) + if "alpha" in kwargs: + code = "{}(({},{}),{})".format( + self.get_paddle_api(), + kwargs.pop("window"), + kwargs.pop("alpha"), + kwargs.pop("win_length"), + ) + if "tau" in kwargs: + code = "{}(({},{}),{})".format( + self.get_paddle_api(), + kwargs.pop("window"), + kwargs.pop("tau"), + kwargs.pop("win_length"), + ) + else: + code = "{}({},{})".format( + self.get_paddle_api(), + kwargs.pop("window"), + kwargs.pop("win_length"), + ) + return code + return GenericMatcher.generate_code(self, kwargs) class Num2TensorBinaryWithAlphaMatcher(BaseMatcher): From 563a1be80863435f64ff054fffe0c7eb1dc49e04 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 13:10:17 +0800 Subject: [PATCH 15/53] fix --- paconvert/api_mapping.json | 8 ++--- paconvert/api_matcher.py | 68 +++++++++++++++++--------------------- 2 files changed, 35 insertions(+), 41 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 9510200d9..0b00b5eaf 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14572,7 +14572,7 @@ } }, "torch.signal.windows.blackman": { - "Matcher": "BlackmanWindowMatcher", + "Matcher": "RegularWindowMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14595,7 +14595,7 @@ } }, "torch.signal.windows.cosine": { - "Matcher": "WindowsMatcher", + "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14719,7 +14719,7 @@ } }, "torch.signal.windows.hamming": { - "Matcher": "WindowsMatcher", + "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14742,7 +14742,7 @@ } }, "torch.signal.windows.hann": { - "Matcher": "WindowsMatcher", + "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index c5440a8d4..68b312ce7 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -519,22 +519,17 @@ def generate_code(self, kwargs): return super().generate_code(kwargs) -class BlackmanWindowMatcher(BaseMatcher): +class RegularWindowMatcher(BaseMatcher): def generate_code(self, kwargs): default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) for k in default_kwargs: if k not in kwargs: kwargs[k] = default_kwargs[k] - kwargs_change = self.api_mapping.get("kwargs_change", {}) - for k in kwargs_change: - if k in kwargs: - kwargs[kwargs_change[k]] = kwargs.pop(k) - return GenericMatcher.generate_code(self, kwargs) -class WindowsMatcher(BaseMatcher): +class SpecialWindowsMatcher(BaseMatcher): def generate_code(self, kwargs): default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) for k in default_kwargs: @@ -546,40 +541,39 @@ def generate_code(self, kwargs): if k in kwargs: kwargs[kwargs_change[k]] = kwargs.pop(k) + new_kwargs = {} + if kwargs["window"] == '"""exponential"""': + if "tau" in kwargs: + new_kwargs["p_x"] = kwargs.pop("tau") + else: + new_kwargs["p_x"] = 1.0 + elif kwargs["window"] == '"""gaussian"""': if "std" in kwargs: - code = "{}(({},{}),{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("std"), - kwargs.pop("win_length"), - ) + new_kwargs["p_x"] = kwargs.pop("std") + else: + new_kwargs["p_x"] = 1.0 + elif kwargs["window"] == '"""general_cosine"""': if "a" in kwargs: - code = "{}(({},{}),{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("a"), - kwargs.pop("win_length"), - ) + new_kwargs["p_x"] = kwargs.pop("a") + else: + new_kwargs["p_x"] = [0.46, 0.23, 0.31] + elif kwargs["window"] == '"""general_hamming"""': if "alpha" in kwargs: - code = "{}(({},{}),{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("alpha"), - kwargs.pop("win_length"), - ) - if "tau" in kwargs: - code = "{}(({},{}),{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("tau"), - kwargs.pop("win_length"), - ) + new_kwargs["p_x"] = kwargs.pop("alpha") else: - code = "{}({},{})".format( - self.get_paddle_api(), - kwargs.pop("window"), - kwargs.pop("win_length"), - ) + new_kwargs["p_x"] = 0.54 + + API_TEMPLATE = textwrap.dedent( + """ + {}(({}, {}),{}) + """ + ) + code = API_TEMPLATE.format( + self.get_paddle_api(), + kwargs.pop("window"), + new_kwargs["p_x"], + kwargs.pop("win_length"), + ) return code return GenericMatcher.generate_code(self, kwargs) From 4aee5c140d2464dee101c8dd130c52855becbf2b Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 13:16:58 +0800 Subject: [PATCH 16/53] fix --- paconvert/api_mapping.json | 10 +++++----- paconvert/api_matcher.py | 10 ---------- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 0b00b5eaf..72a672937 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14572,7 +14572,7 @@ } }, "torch.signal.windows.blackman": { - "Matcher": "RegularWindowMatcher", + "Matcher": "GenericMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14618,7 +14618,7 @@ } }, "torch.signal.windows.exponential": { - "Matcher": "WindowsMatcher", + "Matcher": "SpecialWindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14647,7 +14647,7 @@ } }, "torch.signal.windows.gaussian": { - "Matcher": "WindowsMatcher", + "Matcher": "SpecialWindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14671,7 +14671,7 @@ } }, "torch.signal.windows.general_cosine": { - "Matcher": "WindowsMatcher", + "Matcher": "SpecialWindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 2, "args_list": [ @@ -14695,7 +14695,7 @@ } }, "torch.signal.windows.general_hamming": { - "Matcher": "WindowsMatcher", + "Matcher": "SpecialWindowsMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 68b312ce7..848d87db5 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -519,16 +519,6 @@ def generate_code(self, kwargs): return super().generate_code(kwargs) -class RegularWindowMatcher(BaseMatcher): - def generate_code(self, kwargs): - default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) - for k in default_kwargs: - if k not in kwargs: - kwargs[k] = default_kwargs[k] - - return GenericMatcher.generate_code(self, kwargs) - - class SpecialWindowsMatcher(BaseMatcher): def generate_code(self, kwargs): default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) From f40a61e28d2a39686a5aec4d325fbe707adbe054 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 13:32:36 +0800 Subject: [PATCH 17/53] fix --- paconvert/api_matcher.py | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 848d87db5..5b81aef2c 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -531,28 +531,16 @@ def generate_code(self, kwargs): if k in kwargs: kwargs[kwargs_change[k]] = kwargs.pop(k) - new_kwargs = {} if kwargs["window"] == '"""exponential"""': - if "tau" in kwargs: - new_kwargs["p_x"] = kwargs.pop("tau") - else: - new_kwargs["p_x"] = 1.0 + p_x = kwargs.pop("tau") if "tau" in kwargs else 1.0 elif kwargs["window"] == '"""gaussian"""': - if "std" in kwargs: - new_kwargs["p_x"] = kwargs.pop("std") - else: - new_kwargs["p_x"] = 1.0 + p_x = kwargs.pop("std") if "std" in kwargs else 1.0 elif kwargs["window"] == '"""general_cosine"""': - if "a" in kwargs: - new_kwargs["p_x"] = kwargs.pop("a") - else: - new_kwargs["p_x"] = [0.46, 0.23, 0.31] + p_x = kwargs.pop("a") if "a" in kwargs else [0.46, 0.23, 0.31] elif kwargs["window"] == '"""general_hamming"""': - if "alpha" in kwargs: - new_kwargs["p_x"] = kwargs.pop("alpha") - else: - new_kwargs["p_x"] = 0.54 + p_x = kwargs.pop("alpha") if "alpha" in kwargs else 0.54 + if p_x: API_TEMPLATE = textwrap.dedent( """ {}(({}, {}),{}) @@ -560,9 +548,9 @@ def generate_code(self, kwargs): ) code = API_TEMPLATE.format( self.get_paddle_api(), - kwargs.pop("window"), - new_kwargs["p_x"], - kwargs.pop("win_length"), + kwargs["window"], + p_x, + kwargs["win_length"], ) return code return GenericMatcher.generate_code(self, kwargs) From ecd7677bde11f9d99f8933e193679e550f29a645 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 13:52:05 +0800 Subject: [PATCH 18/53] fix --- paconvert/api_matcher.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 5b81aef2c..b46d39e53 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -530,9 +530,13 @@ def generate_code(self, kwargs): for k in kwargs_change: if k in kwargs: kwargs[kwargs_change[k]] = kwargs.pop(k) - + p_x = None if kwargs["window"] == '"""exponential"""': - p_x = kwargs.pop("tau") if "tau" in kwargs else 1.0 + if "tau" in kwargs: + p_x = kwargs.pop("tau") + print(p_x) + else: + p_x = 1.0 elif kwargs["window"] == '"""gaussian"""': p_x = kwargs.pop("std") if "std" in kwargs else 1.0 elif kwargs["window"] == '"""general_cosine"""': From 6b227460aabda35f8e329f53a875f0cc876bad2a Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 14:19:08 +0800 Subject: [PATCH 19/53] fix --- paconvert/api_matcher.py | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index b46d39e53..384d19faa 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -530,33 +530,33 @@ def generate_code(self, kwargs): for k in kwargs_change: if k in kwargs: kwargs[kwargs_change[k]] = kwargs.pop(k) - p_x = None + if kwargs["window"] == '"""exponential"""': if "tau" in kwargs: p_x = kwargs.pop("tau") - print(p_x) + kwargs["window"] = (kwargs.pop("window"), p_x) else: - p_x = 1.0 + kwargs["window"] = (kwargs.pop("window"), 1.0) elif kwargs["window"] == '"""gaussian"""': - p_x = kwargs.pop("std") if "std" in kwargs else 1.0 + if "std" in kwargs: + p_x = kwargs.pop("std") + kwargs["window"] = (kwargs.pop("window"), p_x) + else: + kwargs["window"] = (kwargs.pop("window"), 1.0) elif kwargs["window"] == '"""general_cosine"""': - p_x = kwargs.pop("a") if "a" in kwargs else [0.46, 0.23, 0.31] + if "a" in kwargs: + p_x = kwargs.pop("a") + kwargs["window"] = (kwargs.pop("window"), p_x) + else: + temp = [0.46, 0.23, 0.31] + kwargs["window"] = (kwargs.pop("window"), temp) elif kwargs["window"] == '"""general_hamming"""': - p_x = kwargs.pop("alpha") if "alpha" in kwargs else 0.54 + if "alpha" in kwargs: + p_x = kwargs.pop("alpha") + kwargs["window"] = (kwargs.pop("window"), p_x) + else: + kwargs["window"] = (kwargs.pop("window"), 0.54) - if p_x: - API_TEMPLATE = textwrap.dedent( - """ - {}(({}, {}),{}) - """ - ) - code = API_TEMPLATE.format( - self.get_paddle_api(), - kwargs["window"], - p_x, - kwargs["win_length"], - ) - return code return GenericMatcher.generate_code(self, kwargs) From 5aca4f5ce1671b12ebbb7b55fce302f2c1862ab9 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 14:45:24 +0800 Subject: [PATCH 20/53] test --- paconvert/api_mapping.json | 5 ++--- paconvert/api_matcher.py | 41 ++++++++++++++++++++++---------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 72a672937..84e07f54e 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14618,7 +14618,7 @@ } }, "torch.signal.windows.exponential": { - "Matcher": "SpecialWindowsMatcher", + "Matcher": "ExponentialMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14642,8 +14642,7 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64", - "window": "'exponential'" + "dtype": "paddle.float64" } }, "torch.signal.windows.gaussian": { diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 384d19faa..5b39bd4dd 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -519,6 +519,21 @@ def generate_code(self, kwargs): return super().generate_code(kwargs) +class ExponentialMatcher(BaseMatcher): + def generate_code(self, kwargs): + kwargs_change = self.api_mapping.get("kwargs_change", {}) + for k in kwargs_change: + if k in kwargs: + kwargs[kwargs_change[k]] = kwargs.pop(k) + new_kwargs = {} + if "tau" in kwargs: + new_kwargs["window"] = (kwargs.pop("window"), kwargs.pop("tau")) + else: + new_kwargs["window"] = (kwargs.pop("window"), 1.0) + new_kwargs.update(kwargs) + return GenericMatcher.generate_code(self, new_kwargs) + + class SpecialWindowsMatcher(BaseMatcher): def generate_code(self, kwargs): default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) @@ -531,32 +546,24 @@ def generate_code(self, kwargs): if k in kwargs: kwargs[kwargs_change[k]] = kwargs.pop(k) - if kwargs["window"] == '"""exponential"""': - if "tau" in kwargs: - p_x = kwargs.pop("tau") - kwargs["window"] = (kwargs.pop("window"), p_x) - else: - kwargs["window"] = (kwargs.pop("window"), 1.0) - elif kwargs["window"] == '"""gaussian"""': + new_kwargs = {} + if kwargs["window"] == '"""gaussian"""': if "std" in kwargs: - p_x = kwargs.pop("std") - kwargs["window"] = (kwargs.pop("window"), p_x) + new_kwargs["window"] = (kwargs.pop("window"), kwargs.pop("std")) else: - kwargs["window"] = (kwargs.pop("window"), 1.0) + new_kwargs["window"] = (kwargs.pop("window"), 1.0) elif kwargs["window"] == '"""general_cosine"""': if "a" in kwargs: - p_x = kwargs.pop("a") - kwargs["window"] = (kwargs.pop("window"), p_x) + new_kwargs["window"] = (kwargs.pop("window"), kwargs.pop("a")) else: temp = [0.46, 0.23, 0.31] - kwargs["window"] = (kwargs.pop("window"), temp) + new_kwargs["window"] = (kwargs.pop("window"), temp) elif kwargs["window"] == '"""general_hamming"""': if "alpha" in kwargs: - p_x = kwargs.pop("alpha") - kwargs["window"] = (kwargs.pop("window"), p_x) + new_kwargs["window"] = (kwargs.pop("window"), kwargs.pop("alpha")) else: - kwargs["window"] = (kwargs.pop("window"), 0.54) - + new_kwargs["window"] = (kwargs.pop("window"), 0.54) + new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, kwargs) From 4d9440dad45040d449014e0d7dd4c008b68c6355 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 15:03:18 +0800 Subject: [PATCH 21/53] fix --- paconvert/api_mapping.json | 15 ++++------ paconvert/api_matcher.py | 60 +++++++++++++++++++++++--------------- 2 files changed, 42 insertions(+), 33 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 84e07f54e..95dc4109c 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14646,7 +14646,7 @@ } }, "torch.signal.windows.gaussian": { - "Matcher": "SpecialWindowsMatcher", + "Matcher": "GaussianMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14665,12 +14665,11 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64", - "window": "'gaussian'" + "dtype": "paddle.float64" } }, "torch.signal.windows.general_cosine": { - "Matcher": "SpecialWindowsMatcher", + "Matcher": "GeneralCosineMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 2, "args_list": [ @@ -14689,12 +14688,11 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64", - "window": "'general_cosine'" + "dtype": "paddle.float64" } }, "torch.signal.windows.general_hamming": { - "Matcher": "SpecialWindowsMatcher", + "Matcher": "GeneralHammingMatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14713,8 +14711,7 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64", - "window": "'general_hamming'" + "dtype": "paddle.float64" } }, "torch.signal.windows.hamming": { diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 5b39bd4dd..8bd940a8a 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -527,44 +527,56 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "tau" in kwargs: - new_kwargs["window"] = (kwargs.pop("window"), kwargs.pop("tau")) + new_kwargs["window"] = ("'exponential'", kwargs.pop("tau")) else: - new_kwargs["window"] = (kwargs.pop("window"), 1.0) + new_kwargs["window"] = ("'exponential'", 1.0) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) -class SpecialWindowsMatcher(BaseMatcher): +class GaussianMatcher(BaseMatcher): def generate_code(self, kwargs): - default_kwargs = self.api_mapping.get("paddle_default_kwargs", {}) - for k in default_kwargs: - if k not in kwargs: - kwargs[k] = default_kwargs[k] + kwargs_change = self.api_mapping.get("kwargs_change", {}) + for k in kwargs_change: + if k in kwargs: + kwargs[kwargs_change[k]] = kwargs.pop(k) + new_kwargs = {} + if "std" in kwargs: + new_kwargs["window"] = ("'gaussian'", kwargs.pop("std")) + else: + new_kwargs["window"] = ("'gaussian'", 1.0) + new_kwargs.update(kwargs) + return GenericMatcher.generate_code(self, new_kwargs) + +class GeneralCosineMatcher(BaseMatcher): + def generate_code(self, kwargs): kwargs_change = self.api_mapping.get("kwargs_change", {}) for k in kwargs_change: if k in kwargs: kwargs[kwargs_change[k]] = kwargs.pop(k) + new_kwargs = {} + if "a" in kwargs: + new_kwargs["window"] = ("'general_cosine'", kwargs.pop("a")) + else: + new_kwargs["window"] = ("'general_cosine'", [0.46, 0.23, 0.31]) + new_kwargs.update(kwargs) + return GenericMatcher.generate_code(self, new_kwargs) + +class GeneralHammingMatcher(BaseMatcher): + def generate_code(self, kwargs): + kwargs_change = self.api_mapping.get("kwargs_change", {}) + for k in kwargs_change: + if k in kwargs: + kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} - if kwargs["window"] == '"""gaussian"""': - if "std" in kwargs: - new_kwargs["window"] = (kwargs.pop("window"), kwargs.pop("std")) - else: - new_kwargs["window"] = (kwargs.pop("window"), 1.0) - elif kwargs["window"] == '"""general_cosine"""': - if "a" in kwargs: - new_kwargs["window"] = (kwargs.pop("window"), kwargs.pop("a")) - else: - temp = [0.46, 0.23, 0.31] - new_kwargs["window"] = (kwargs.pop("window"), temp) - elif kwargs["window"] == '"""general_hamming"""': - if "alpha" in kwargs: - new_kwargs["window"] = (kwargs.pop("window"), kwargs.pop("alpha")) - else: - new_kwargs["window"] = (kwargs.pop("window"), 0.54) + if "alpha" in kwargs: + new_kwargs["window"] = ("'general_hamming',", kwargs.pop("alpha")) + else: + new_kwargs["window"] = ("'general_hamming',", 0.54) new_kwargs.update(kwargs) - return GenericMatcher.generate_code(self, kwargs) + return GenericMatcher.generate_code(self, new_kwargs) class Num2TensorBinaryWithAlphaMatcher(BaseMatcher): From fb27f135d6ccce05dd1d97cd38b37a1183d9e4a7 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 15:42:49 +0800 Subject: [PATCH 22/53] fix --- paconvert/api_matcher.py | 22 ++++++++++++++-------- tests/test_signal_windows_blackman.py | 6 +++--- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 8bd940a8a..f1fd8b891 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -527,9 +527,12 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "tau" in kwargs: - new_kwargs["window"] = ("'exponential'", kwargs.pop("tau")) + tau_value = float(str(kwargs.pop("tau")).split("=")[-1]) + print(tau_value) + new_kwargs["window"] = ("exponential", tau_value) + print(new_kwargs["window"]) else: - new_kwargs["window"] = ("'exponential'", 1.0) + new_kwargs["window"] = ("exponential", 1.0) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) @@ -542,9 +545,10 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "std" in kwargs: - new_kwargs["window"] = ("'gaussian'", kwargs.pop("std")) + std_value = float(str(kwargs.pop("std")).split("=")[-1]) + new_kwargs["window"] = tuple("gaussian", std_value) else: - new_kwargs["window"] = ("'gaussian'", 1.0) + new_kwargs["window"] = tuple("gaussian", 1.0) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) @@ -557,9 +561,10 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "a" in kwargs: - new_kwargs["window"] = ("'general_cosine'", kwargs.pop("a")) + a_value = str(kwargs.pop("a")).split("=")[-1] + new_kwargs["window"] = ("general_cosine", a_value) else: - new_kwargs["window"] = ("'general_cosine'", [0.46, 0.23, 0.31]) + new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) @@ -572,9 +577,10 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "alpha" in kwargs: - new_kwargs["window"] = ("'general_hamming',", kwargs.pop("alpha")) + alpha_value = float(str(kwargs.pop("alpha")).split("=")[-1]) + new_kwargs["window"] = tuple("general_hamming", alpha_value) else: - new_kwargs["window"] = ("'general_hamming',", 0.54) + new_kwargs["window"] = tuple("general_hamming", 0.54) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index 616f1f0d0..8ad0c8fa6 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -22,9 +22,9 @@ def test_case_1(): pytorch_code = textwrap.dedent( """ - import torch - result = torch.signal.windows.blackman(5) - """ +import torch +result = torch.signal.windows.blackman(5) +""" ) obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) From 854bd4cab9eed455ad6c88ff5d7e325df2143aab Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 16:14:46 +0800 Subject: [PATCH 23/53] fix --- paconvert/api_matcher.py | 18 ++++++++---------- tests/test_signal_windows_exponential.py | 18 ++++-------------- tests/test_signal_windows_gaussian.py | 10 ---------- tests/test_signal_windows_general_cosine.py | 10 ---------- tests/test_signal_windows_general_hamming.py | 10 ---------- 5 files changed, 12 insertions(+), 54 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index f1fd8b891..6c0adc1ea 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -527,10 +527,8 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "tau" in kwargs: - tau_value = float(str(kwargs.pop("tau")).split("=")[-1]) - print(tau_value) + tau_value = kwargs.pop("tau").split("=")[-1] new_kwargs["window"] = ("exponential", tau_value) - print(new_kwargs["window"]) else: new_kwargs["window"] = ("exponential", 1.0) new_kwargs.update(kwargs) @@ -545,10 +543,10 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "std" in kwargs: - std_value = float(str(kwargs.pop("std")).split("=")[-1]) - new_kwargs["window"] = tuple("gaussian", std_value) + std_value = kwargs.pop("std").split("=")[-1] + new_kwargs["window"] = ("gaussian", std_value) else: - new_kwargs["window"] = tuple("gaussian", 1.0) + new_kwargs["window"] = ("gaussian", 1.0) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) @@ -561,7 +559,7 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "a" in kwargs: - a_value = str(kwargs.pop("a")).split("=")[-1] + a_value = kwargs.pop("a").split("=")[-1] new_kwargs["window"] = ("general_cosine", a_value) else: new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) @@ -577,10 +575,10 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "alpha" in kwargs: - alpha_value = float(str(kwargs.pop("alpha")).split("=")[-1]) - new_kwargs["window"] = tuple("general_hamming", alpha_value) + alpha_value = kwargs.pop("alpha").split("=")[-1] + new_kwargs["window"] = ("general_hamming", alpha_value) else: - new_kwargs["window"] = tuple("general_hamming", 0.54) + new_kwargs["window"] = ("general_hamming", 0.54) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index 6df175233..139e025fc 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -40,16 +40,6 @@ def test_case_2(): def test_case_3(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.exponential(10, tau=.5, dtype=torch.float64) - """ - ) - obj.run(pytorch_code, ["result"], check_value=False) - - -def test_case_4(): pytorch_code = textwrap.dedent( """ import torch @@ -59,7 +49,7 @@ def test_case_4(): obj.run(pytorch_code, ["result"], check_value=False) -def test_case_5(): +def test_case_4(): pytorch_code = textwrap.dedent( """ import torch @@ -69,7 +59,7 @@ def test_case_5(): obj.run(pytorch_code, ["result"], check_value=False) -def test_case_6(): +def test_case_5(): pytorch_code = textwrap.dedent( """ import torch @@ -79,11 +69,11 @@ def test_case_6(): obj.run(pytorch_code, ["result"], check_value=False) -def test_case_7(): +def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, sym=False, dtype=torch.float64) + result = torch.signal.windows.exponential(10, tau=0.5, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index c5ce421c1..5b8392061 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -77,13 +77,3 @@ def test_case_6(): """ ) obj.run(pytorch_code, ["result"], check_value=False) - - -def test_case_7(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.gaussian(10, std=0.9, sym=False, dtype=torch.float64) - """ - ) - obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index ba42fbb21..2e03935b1 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -77,13 +77,3 @@ def test_case_6(): """ ) obj.run(pytorch_code, ["result"]) - - -def test_case_7(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64) - """ - ) - obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py index 88d5eeeaf..fd5fbbd5a 100644 --- a/tests/test_signal_windows_general_hamming.py +++ b/tests/test_signal_windows_general_hamming.py @@ -77,13 +77,3 @@ def test_case_6(): """ ) obj.run(pytorch_code, ["result"], check_value=False) - - -def test_case_7(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.general_hamming(10, alpha=0.5, sym=False, dtype=torch.float64) - """ - ) - obj.run(pytorch_code, ["result"], check_value=False) From 6c0d0040627168182d29c9f6da38fb731258f655 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 17:04:04 +0800 Subject: [PATCH 24/53] fix --- paconvert/api_matcher.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 6c0adc1ea..eb5df4c80 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -527,7 +527,7 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "tau" in kwargs: - tau_value = kwargs.pop("tau").split("=")[-1] + tau_value = float(str(kwargs.pop("tau")).split("=")[-1].strip("()")) new_kwargs["window"] = ("exponential", tau_value) else: new_kwargs["window"] = ("exponential", 1.0) @@ -543,8 +543,10 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "std" in kwargs: - std_value = kwargs.pop("std").split("=")[-1] + std_value = float(str(kwargs.pop("std")).split("=")[-1].strip("()")) new_kwargs["window"] = ("gaussian", std_value) + print("--------std-----------", std_value) + print("--------std-----------", type(std_value)) else: new_kwargs["window"] = ("gaussian", 1.0) new_kwargs.update(kwargs) @@ -560,6 +562,8 @@ def generate_code(self, kwargs): new_kwargs = {} if "a" in kwargs: a_value = kwargs.pop("a").split("=")[-1] + print("--------a_value-----------", a_value) + print("--------a_value-----------", type(a_value)) new_kwargs["window"] = ("general_cosine", a_value) else: new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) @@ -575,7 +579,7 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "alpha" in kwargs: - alpha_value = kwargs.pop("alpha").split("=")[-1] + alpha_value = float(str(kwargs.pop("alpha")).split("=")[-1].strip("()")) new_kwargs["window"] = ("general_hamming", alpha_value) else: new_kwargs["window"] = ("general_hamming", 0.54) From 2b56f1ae392fb030920643dd27878e9aef7a89a7 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 17:13:25 +0800 Subject: [PATCH 25/53] fix --- paconvert/api_matcher.py | 6 +----- tests/test_signal_windows_exponential.py | 10 ---------- 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index eb5df4c80..e508b732d 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -545,8 +545,6 @@ def generate_code(self, kwargs): if "std" in kwargs: std_value = float(str(kwargs.pop("std")).split("=")[-1].strip("()")) new_kwargs["window"] = ("gaussian", std_value) - print("--------std-----------", std_value) - print("--------std-----------", type(std_value)) else: new_kwargs["window"] = ("gaussian", 1.0) new_kwargs.update(kwargs) @@ -561,9 +559,7 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "a" in kwargs: - a_value = kwargs.pop("a").split("=")[-1] - print("--------a_value-----------", a_value) - print("--------a_value-----------", type(a_value)) + a_value = str(kwargs.pop("a")).split("=")[-1].strip("()") new_kwargs["window"] = ("general_cosine", a_value) else: new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index 139e025fc..9fff3e748 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -67,13 +67,3 @@ def test_case_5(): """ ) obj.run(pytorch_code, ["result"], check_value=False) - - -def test_case_6(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.exponential(10, tau=0.5, dtype=torch.float64) - """ - ) - obj.run(pytorch_code, ["result"], check_value=False) From 3761ca976aa6b6b180a90272882cb1cc3e1fa704 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 26 Sep 2024 17:27:37 +0800 Subject: [PATCH 26/53] fix --- paconvert/api_matcher.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index e508b732d..aac3b2b4b 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -559,7 +559,8 @@ def generate_code(self, kwargs): kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "a" in kwargs: - a_value = str(kwargs.pop("a")).split("=")[-1].strip("()") + temp_value = str(kwargs.pop("a")).split("=")[-1].strip("()") + a_value = ast.literal_eval(temp_value) new_kwargs["window"] = ("general_cosine", a_value) else: new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) From 51f6f81ccc953ffdf2bcb33486568e5ed50f84af Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 09:27:51 +0800 Subject: [PATCH 27/53] fix --- paconvert/api_mapping.json | 3 +-- paconvert/api_matcher.py | 4 ---- tests/test_signal_windows_blackman.py | 6 +++--- tests/test_signal_windows_exponential.py | 10 ++++++++++ 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 95dc4109c..d54104661 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14633,8 +14633,7 @@ "requires_grad" ], "unsupport_args": [ - "center", - "tau" + "center" ], "kwargs_change": { "M": "win_length", diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index aac3b2b4b..b8b61724f 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -521,10 +521,6 @@ def generate_code(self, kwargs): class ExponentialMatcher(BaseMatcher): def generate_code(self, kwargs): - kwargs_change = self.api_mapping.get("kwargs_change", {}) - for k in kwargs_change: - if k in kwargs: - kwargs[kwargs_change[k]] = kwargs.pop(k) new_kwargs = {} if "tau" in kwargs: tau_value = float(str(kwargs.pop("tau")).split("=")[-1].strip("()")) diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index 8ad0c8fa6..616f1f0d0 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -22,9 +22,9 @@ def test_case_1(): pytorch_code = textwrap.dedent( """ -import torch -result = torch.signal.windows.blackman(5) -""" + import torch + result = torch.signal.windows.blackman(5) + """ ) obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index 9fff3e748..139e025fc 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -67,3 +67,13 @@ def test_case_5(): """ ) obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, tau=0.5, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) From c66d5ede2f98636957e4971ebc67ab4ebad20a53 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 10:11:23 +0800 Subject: [PATCH 28/53] fix --- paconvert/api_matcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index b8b61724f..8afc7965a 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -556,7 +556,7 @@ def generate_code(self, kwargs): new_kwargs = {} if "a" in kwargs: temp_value = str(kwargs.pop("a")).split("=")[-1].strip("()") - a_value = ast.literal_eval(temp_value) + a_value = "list[{}]".format(astor.to_source(temp_value).strip("\n")) new_kwargs["window"] = ("general_cosine", a_value) else: new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) From 6eef5831528a460f9f79ca433360256b8ef02590 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 11:02:43 +0800 Subject: [PATCH 29/53] fix --- paconvert/api_mapping.json | 6 +-- paconvert/api_matcher.py | 79 ++++++++++++++------------------------ 2 files changed, 32 insertions(+), 53 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index d54104661..951bc122b 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14618,7 +14618,7 @@ } }, "torch.signal.windows.exponential": { - "Matcher": "ExponentialMatcher", + "Matcher": "SignalWindowsWatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14645,7 +14645,7 @@ } }, "torch.signal.windows.gaussian": { - "Matcher": "GaussianMatcher", + "Matcher": "SignalWindowsWatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14691,7 +14691,7 @@ } }, "torch.signal.windows.general_hamming": { - "Matcher": "GeneralHammingMatcher", + "Matcher": "SignalWindowsWatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 8afc7965a..67024e6e7 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -519,65 +519,44 @@ def generate_code(self, kwargs): return super().generate_code(kwargs) -class ExponentialMatcher(BaseMatcher): +class SignalWindowsWatcher(BaseMatcher): def generate_code(self, kwargs): new_kwargs = {} - if "tau" in kwargs: - tau_value = float(str(kwargs.pop("tau")).split("=")[-1].strip("()")) - new_kwargs["window"] = ("exponential", tau_value) - else: - new_kwargs["window"] = ("exponential", 1.0) - new_kwargs.update(kwargs) - return GenericMatcher.generate_code(self, new_kwargs) - - -class GaussianMatcher(BaseMatcher): - def generate_code(self, kwargs): - kwargs_change = self.api_mapping.get("kwargs_change", {}) - for k in kwargs_change: - if k in kwargs: - kwargs[kwargs_change[k]] = kwargs.pop(k) - new_kwargs = {} - if "std" in kwargs: - std_value = float(str(kwargs.pop("std")).split("=")[-1].strip("()")) - new_kwargs["window"] = ("gaussian", std_value) - else: - new_kwargs["window"] = ("gaussian", 1.0) + if "exponential" in self.torch_api: + if "tau" in kwargs: + tau_value = float(str(kwargs.pop("tau")).split("=")[-1].strip("()")) + new_kwargs["window"] = ("exponential", tau_value) + else: + new_kwargs["window"] = ("exponential", 1.0) + if "gaussian" in self.torch_api: + if "std" in kwargs: + std_value = float(str(kwargs.pop("std")).split("=")[-1].strip("()")) + new_kwargs["window"] = ("gaussian", std_value) + else: + new_kwargs["window"] = ("gaussian", 1.0) + if "general_hamming" in self.torch_api: + if "alpha" in kwargs: + alpha_value = float(str(kwargs.pop("alpha")).split("=")[-1].strip("()")) + new_kwargs["window"] = ("general_hamming", alpha_value) + else: + new_kwargs["window"] = ("general_hamming", 0.54) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) class GeneralCosineMatcher(BaseMatcher): - def generate_code(self, kwargs): - kwargs_change = self.api_mapping.get("kwargs_change", {}) - for k in kwargs_change: - if k in kwargs: - kwargs[kwargs_change[k]] = kwargs.pop(k) - new_kwargs = {} + def get_paddle_nodes(self, args, kwargs): + kwargs = self.parse_kwargs(kwargs) + if kwargs is None: + return None if "a" in kwargs: - temp_value = str(kwargs.pop("a")).split("=")[-1].strip("()") - a_value = "list[{}]".format(astor.to_source(temp_value).strip("\n")) - new_kwargs["window"] = ("general_cosine", a_value) - else: - new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) - new_kwargs.update(kwargs) - return GenericMatcher.generate_code(self, new_kwargs) - - -class GeneralHammingMatcher(BaseMatcher): - def generate_code(self, kwargs): - kwargs_change = self.api_mapping.get("kwargs_change", {}) - for k in kwargs_change: - if k in kwargs: - kwargs[kwargs_change[k]] = kwargs.pop(k) - new_kwargs = {} - if "alpha" in kwargs: - alpha_value = float(str(kwargs.pop("alpha")).split("=")[-1].strip("()")) - new_kwargs["window"] = ("general_hamming", alpha_value) + a_value = "list[{}]".format(astor.to_source(args[1].value).strip("\n")) + gc_kwargs = "tuple({},{})".format("general_cosine", a_value) + kwargs = {"window": gc_kwargs, **kwargs} else: - new_kwargs["window"] = ("general_hamming", 0.54) - new_kwargs.update(kwargs) - return GenericMatcher.generate_code(self, new_kwargs) + kwargs = {"window": ("general_cosine", [0.46, 0.23, 0.31])} + code = GenericMatcher.generate_code(self, kwargs) + return ast.parse(code).body class Num2TensorBinaryWithAlphaMatcher(BaseMatcher): From 476edb3fbc31616d91ca90a3c816cad2a75eca23 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 11:18:45 +0800 Subject: [PATCH 30/53] fix --- paconvert/api_matcher.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 67024e6e7..8cee08f22 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -550,7 +550,10 @@ def get_paddle_nodes(self, args, kwargs): if kwargs is None: return None if "a" in kwargs: - a_value = "list[{}]".format(astor.to_source(args[1].value).strip("\n")) + a_value = "list[{}]".format(astor.to_source(args[0].value).strip("\n")) + print("------------", a_value) + temp_value = str(kwargs.pop("a")).split("=")[-1].strip("()") + print("------------", temp_value) gc_kwargs = "tuple({},{})".format("general_cosine", a_value) kwargs = {"window": gc_kwargs, **kwargs} else: From 9c2aa0032ba3ea21b800a3d0d918e3a2178efd33 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 11:44:36 +0800 Subject: [PATCH 31/53] fix --- paconvert/api_matcher.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 8cee08f22..1d389462d 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -550,11 +550,17 @@ def get_paddle_nodes(self, args, kwargs): if kwargs is None: return None if "a" in kwargs: - a_value = "list[{}]".format(astor.to_source(args[0].value).strip("\n")) - print("------------", a_value) - temp_value = str(kwargs.pop("a")).split("=")[-1].strip("()") - print("------------", temp_value) - gc_kwargs = "tuple({},{})".format("general_cosine", a_value) + test_value = self.parse_args(args) + print("------test_value------", test_value) + test_value1 = self.parse_args(args)[1] + print("------test_value------", test_value1) + a_value = astor.to_source(args[1].value).strip("\n") + print("------a_value------", a_value) + temp_value = str(kwargs.pop("a")).split("=")[-1] + print("------temp_value------", temp_value) + temp_value1 = ast.literal_eval(temp_value) + print("------a_value------", temp_value1) + gc_kwargs = "tuple({},{})".format("general_cosine", temp_value1) kwargs = {"window": gc_kwargs, **kwargs} else: kwargs = {"window": ("general_cosine", [0.46, 0.23, 0.31])} From 67972d817db8b26c5b59a484e103d1c19d830950 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 12:08:58 +0800 Subject: [PATCH 32/53] fix --- paconvert/api_mapping.json | 2 +- paconvert/api_matcher.py | 30 ++++++------------------------ 2 files changed, 7 insertions(+), 25 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 951bc122b..327ae9644 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14668,7 +14668,7 @@ } }, "torch.signal.windows.general_cosine": { - "Matcher": "GeneralCosineMatcher", + "Matcher": "SignalWindowsWatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 2, "args_list": [ diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 1d389462d..9ccd40103 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -540,34 +540,16 @@ def generate_code(self, kwargs): new_kwargs["window"] = ("general_hamming", alpha_value) else: new_kwargs["window"] = ("general_hamming", 0.54) + if "general_cosine" in self.torch_api: + if "a" in kwargs: + a_value = list(kwargs.values())[0] + new_kwargs["window"] = ("general_cosine", a_value) + else: + new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) -class GeneralCosineMatcher(BaseMatcher): - def get_paddle_nodes(self, args, kwargs): - kwargs = self.parse_kwargs(kwargs) - if kwargs is None: - return None - if "a" in kwargs: - test_value = self.parse_args(args) - print("------test_value------", test_value) - test_value1 = self.parse_args(args)[1] - print("------test_value------", test_value1) - a_value = astor.to_source(args[1].value).strip("\n") - print("------a_value------", a_value) - temp_value = str(kwargs.pop("a")).split("=")[-1] - print("------temp_value------", temp_value) - temp_value1 = ast.literal_eval(temp_value) - print("------a_value------", temp_value1) - gc_kwargs = "tuple({},{})".format("general_cosine", temp_value1) - kwargs = {"window": gc_kwargs, **kwargs} - else: - kwargs = {"window": ("general_cosine", [0.46, 0.23, 0.31])} - code = GenericMatcher.generate_code(self, kwargs) - return ast.parse(code).body - - class Num2TensorBinaryWithAlphaMatcher(BaseMatcher): def generate_code(self, kwargs): kwargs_change = self.api_mapping.get("kwargs_change", {}) From 466d826c25bb6807bcb3c330e9000dec0e3e89ff Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 12:18:12 +0800 Subject: [PATCH 33/53] test --- paconvert/api_matcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 9ccd40103..e5ece7b00 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -542,6 +542,7 @@ def generate_code(self, kwargs): new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: if "a" in kwargs: + print("------kwargs-------", kwargs.values()) a_value = list(kwargs.values())[0] new_kwargs["window"] = ("general_cosine", a_value) else: From 3c5e0e4a4e7c3f3e0900b79c236e831f032def1e Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 12:28:10 +0800 Subject: [PATCH 34/53] fix --- paconvert/api_matcher.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index e5ece7b00..92a9213d3 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -541,12 +541,8 @@ def generate_code(self, kwargs): else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: - if "a" in kwargs: - print("------kwargs-------", kwargs.values()) - a_value = list(kwargs.values())[0] - new_kwargs["window"] = ("general_cosine", a_value) - else: - new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) + a_value = [v for v in kwargs.values()][0] + new_kwargs["window"] = ("general_cosine", a_value) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) From 38d7bc8d35d5669510d72fba3706cf0257ddebee Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 12:37:14 +0800 Subject: [PATCH 35/53] fix --- paconvert/api_matcher.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 92a9213d3..12f328dc1 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -541,8 +541,11 @@ def generate_code(self, kwargs): else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: + print("-------kwargs-------", kwargs) + print("-------kwargs-------", kwargs.values()) a_value = [v for v in kwargs.values()][0] - new_kwargs["window"] = ("general_cosine", a_value) + print("------a_value-----", a_value) + new_kwargs["window"] = ("general_cosine", list(a_value)) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) From 3ce42da600f72d3383c1d392e358a2131ecfe5f2 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 12:48:49 +0800 Subject: [PATCH 36/53] fix --- paconvert/api_mapping.json | 2 +- paconvert/api_matcher.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 327ae9644..8dd3bcd79 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14670,7 +14670,7 @@ "torch.signal.windows.general_cosine": { "Matcher": "SignalWindowsWatcher", "paddle_api": "paddle.audio.functional.get_window", - "min_input_args": 2, + "min_input_args": 1, "args_list": [ "M", "*", diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 12f328dc1..67ac621d0 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -541,10 +541,7 @@ def generate_code(self, kwargs): else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: - print("-------kwargs-------", kwargs) - print("-------kwargs-------", kwargs.values()) a_value = [v for v in kwargs.values()][0] - print("------a_value-----", a_value) new_kwargs["window"] = ("general_cosine", list(a_value)) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) From a34f023160cd09e4799f3a15536d3e2b9cf00ffb Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 12:57:05 +0800 Subject: [PATCH 37/53] fix --- paconvert/api_matcher.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 67ac621d0..ab67172c1 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -541,8 +541,12 @@ def generate_code(self, kwargs): else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: - a_value = [v for v in kwargs.values()][0] - new_kwargs["window"] = ("general_cosine", list(a_value)) + if "a" in kwargs: + a_value = [v for v in kwargs.values()][0] + print("----------a_value----------", a_value) + new_kwargs["window"] = ("general_cosine", a_value) + else: + new_kwargs["window"] = ("general_hamming", [0.46, 0.23, 0.31]) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) From 814e9fdfc60151063ea160b969dbd55239d28359 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 13:05:36 +0800 Subject: [PATCH 38/53] fix --- paconvert/api_matcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index ab67172c1..aa63eedbb 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -542,11 +542,11 @@ def generate_code(self, kwargs): new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: if "a" in kwargs: - a_value = [v for v in kwargs.values()][0] + a_value = [v for v in kwargs.values()][1] print("----------a_value----------", a_value) new_kwargs["window"] = ("general_cosine", a_value) else: - new_kwargs["window"] = ("general_hamming", [0.46, 0.23, 0.31]) + new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) From d2d0a8247f5a51db7aeac1b9a2373ae4ac6badf2 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 13:22:01 +0800 Subject: [PATCH 39/53] fix --- paconvert/api_mapping.json | 1 + paconvert/api_matcher.py | 8 ++------ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 8dd3bcd79..8513dbc8b 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14683,6 +14683,7 @@ ], "kwargs_change": { "M": "win_length", + "a": "", "sym": "fftbins", "dtype": "dtype" }, diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index aa63eedbb..5295b3ac9 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -541,12 +541,8 @@ def generate_code(self, kwargs): else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: - if "a" in kwargs: - a_value = [v for v in kwargs.values()][1] - print("----------a_value----------", a_value) - new_kwargs["window"] = ("general_cosine", a_value) - else: - new_kwargs["window"] = ("general_cosine", [0.46, 0.23, 0.31]) + a_value = [v for v in kwargs.values()][1] + new_kwargs["window"] = ("general_cosine", a_value) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) From fc5b9306c6f03c53eec130d3d187564aa67820a0 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 27 Sep 2024 13:29:50 +0800 Subject: [PATCH 40/53] fix --- tests/test_signal_windows_general_cosine.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index 2e03935b1..8ba3f7d1b 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -33,10 +33,10 @@ def test_case_2(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64) + result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_3(): @@ -46,7 +46,7 @@ def test_case_3(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_4(): @@ -56,7 +56,7 @@ def test_case_4(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_5(): @@ -66,7 +66,7 @@ def test_case_5(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): @@ -76,4 +76,4 @@ def test_case_6(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], check_value=False) From 9e213033eb7ba5a9f691189df3f3f2eab53b6b41 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 9 Oct 2024 11:22:05 +0800 Subject: [PATCH 41/53] fix --- paconvert/api_mapping.json | 16 +-- paconvert/api_matcher.py | 26 +++- tests/test_signal_windows_blackman.py | 70 ++++++++++- tests/test_signal_windows_cosine.py | 76 ++++++++++-- tests/test_signal_windows_exponential.py | 121 ++++++++++++++++++- tests/test_signal_windows_gaussian.py | 109 ++++++++++++++++- tests/test_signal_windows_general_cosine.py | 71 ++++++++++- tests/test_signal_windows_general_hamming.py | 119 +++++++++++++++++- tests/test_signal_windows_hamming.py | 59 ++++++++- tests/test_signal_windows_hann.py | 59 ++++++++- 10 files changed, 675 insertions(+), 51 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index d13af1dba..69d942e7b 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14689,7 +14689,7 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64", + "dtype": "'float32'", "window": "'blackman'" } }, @@ -14712,7 +14712,7 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64", + "dtype": "'float32'", "window": "'cosine'" } }, @@ -14740,7 +14740,7 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64" + "dtype": "'float32'" } }, "torch.signal.windows.gaussian": { @@ -14763,7 +14763,7 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64" + "dtype": "'float32'" } }, "torch.signal.windows.general_cosine": { @@ -14787,7 +14787,7 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64" + "dtype": "'float32'" } }, "torch.signal.windows.general_hamming": { @@ -14810,7 +14810,7 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64" + "dtype": "'float32'" } }, "torch.signal.windows.hamming": { @@ -14832,7 +14832,7 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64", + "dtype": "'float32'", "window": "'hamming'" } }, @@ -14855,7 +14855,7 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "paddle.float64", + "dtype": "'float32'", "window": "'hann'" } }, diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 5295b3ac9..ff9a5566a 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -523,20 +523,34 @@ class SignalWindowsWatcher(BaseMatcher): def generate_code(self, kwargs): new_kwargs = {} if "exponential" in self.torch_api: - if "tau" in kwargs: - tau_value = float(str(kwargs.pop("tau")).split("=")[-1].strip("()")) - new_kwargs["window"] = ("exponential", tau_value) + if "sym" in kwargs: + if kwargs["sym"] != "(False)": + if "tau" in kwargs: + tau_value = float(str(kwargs.pop("tau")).strip("()")) + new_kwargs["window"] = ("exponential", tau_value) + else: + new_kwargs["window"] = ("exponential", 1.0) + else: + if "tau" in kwargs: + tau_value = float(str(kwargs.pop("tau")).strip("()")) + new_kwargs["window"] = ("exponential", None, tau_value) + else: + new_kwargs["window"] = ("exponential", None, 1.0) else: - new_kwargs["window"] = ("exponential", 1.0) + if "tau" in kwargs: + tau_value = float(str(kwargs.pop("tau")).strip("()")) + new_kwargs["window"] = ("exponential", tau_value) + else: + new_kwargs["window"] = ("exponential", 1.0) if "gaussian" in self.torch_api: if "std" in kwargs: - std_value = float(str(kwargs.pop("std")).split("=")[-1].strip("()")) + std_value = float(str(kwargs.pop("std")).strip("()")) new_kwargs["window"] = ("gaussian", std_value) else: new_kwargs["window"] = ("gaussian", 1.0) if "general_hamming" in self.torch_api: if "alpha" in kwargs: - alpha_value = float(str(kwargs.pop("alpha")).split("=")[-1].strip("()")) + alpha_value = float(str(kwargs.pop("alpha")).strip("()")) new_kwargs["window"] = ("general_hamming", alpha_value) else: new_kwargs["window"] = ("general_hamming", 0.54) diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index 616f1f0d0..b3f825d95 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.blackman(5) """ ) - obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_2(): @@ -43,7 +43,8 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, dtype=torch.float64, requires_grad=True) + a = 5 + result = torch.signal.windows.blackman(a) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -53,7 +54,8 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, dtype=torch.float64, layout=torch.strided, requires_grad=True) + a = 5 + result = torch.signal.windows.blackman(M = a, dtype=torch.float32) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -63,13 +65,43 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.blackman(5, dtype=torch.float64, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_8(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, sym=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_9(): pytorch_code = textwrap.dedent( """ import torch @@ -77,3 +109,33 @@ def test_case_6(): """ ) obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_10(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, sym=False, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_11(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_12(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_cosine.py b/tests/test_signal_windows_cosine.py index 4479daeb2..07040ccd2 100644 --- a/tests/test_signal_windows_cosine.py +++ b/tests/test_signal_windows_cosine.py @@ -23,17 +23,17 @@ def test_case_1(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(10) + result = torch.signal.windows.cosine(5) """ ) - obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_2(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(10, dtype=torch.float64) + result = torch.signal.windows.cosine(5, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -43,7 +43,8 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(10, dtype=torch.float64, requires_grad=True) + a = 5 + result = torch.signal.windows.cosine(a) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -53,7 +54,8 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + a = 5 + result = torch.signal.windows.cosine(M = a, dtype=torch.float32) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -63,7 +65,17 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.cosine(5, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(5, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -73,7 +85,57 @@ def test_case_7(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(10, sym=False, dtype=torch.float64) + result = torch.signal.windows.cosine(5, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_8(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(5, sym=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_9(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(5, sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_10(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(5, sym=False, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_11(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(5, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_12(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index 139e025fc..db6a80c4a 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.exponential(10) """ ) - obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_2(): @@ -43,7 +43,8 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, dtype=torch.float64, requires_grad=True) + a = 10 + result = torch.signal.windows.exponential(a, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -53,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.exponential(M=10, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -63,7 +64,7 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.exponential(10, dtype=torch.float64, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -73,7 +74,117 @@ def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, tau=0.5, dtype=torch.float64) + result = torch.signal.windows.exponential(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_8(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, tau=0.5) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_9(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, tau=0.5, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_10(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, tau=0.5, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_11(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, tau=0.5, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_12(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, sym=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_13(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_14(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, tau=0.5, sym=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_15(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, tau=0.5, sym=False, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_16(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, tau=0.5, sym=False, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_17(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(10, tau=0.5, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index 5b8392061..e5799d075 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.gaussian(10) """ ) - obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_2(): @@ -43,7 +43,8 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64) + a=10 + result = torch.signal.windows.gaussian(a, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -53,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.gaussian(M=10, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -63,13 +64,33 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_8(): pytorch_code = textwrap.dedent( """ import torch @@ -77,3 +98,83 @@ def test_case_6(): """ ) obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_9(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, sym=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_10(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, sym=False, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_11(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, sym=False, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_12(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_13(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.6, sym=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_14(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.6, sym=False, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_15(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.6, sym=False, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_16(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(10, std=0.6, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index 8ba3f7d1b..d3d2c78b6 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -26,14 +26,14 @@ def test_case_1(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31]) """ ) - obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_2(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64) + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -43,7 +43,8 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64) + m=10 + result = torch.signal.windows.general_cosine(m, a=[0.46, 0.23, 0.31], dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -53,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.general_cosine(M=10, a=[0.46, 0.23, 0.31], dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -63,13 +64,23 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_7(): pytorch_code = textwrap.dedent( """ import torch @@ -77,3 +88,53 @@ def test_case_6(): """ ) obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_8(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_9(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_10(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_11(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_12(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py index fd5fbbd5a..f0c1244e2 100644 --- a/tests/test_signal_windows_general_hamming.py +++ b/tests/test_signal_windows_general_hamming.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.general_hamming(10) """ ) - obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_2(): @@ -43,7 +43,8 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, alpha=0.5, dtype=torch.float64) + a=10 + result = torch.signal.windows.general_hamming(a) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -53,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.general_hamming(M=10) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -63,13 +64,43 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.general_hamming(10, alpha=0.5, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(M=10, alpha=0.8, dtype=torch.float64) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_8(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_9(): pytorch_code = textwrap.dedent( """ import torch @@ -77,3 +108,83 @@ def test_case_6(): """ ) obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_10(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, alpha=0.8, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_11(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, alpha=0.8, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_12(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_13(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, sym=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_14(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, alpha=0.8, sym=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_15(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, alpha=0.8, sym=False, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_16(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, alpha=0.8, sym=False, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_17(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(10, alpha=0.8, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_hamming.py b/tests/test_signal_windows_hamming.py index d3ff10417..95aa88c09 100644 --- a/tests/test_signal_windows_hamming.py +++ b/tests/test_signal_windows_hamming.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.hamming(10) """ ) - obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_2(): @@ -43,7 +43,8 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, sym=False, dtype=torch.float64) + a=10 + result = torch.signal.windows.hamming(a, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -53,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.hamming(M=10, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -63,13 +64,33 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.hamming(10, sym=False, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_8(): pytorch_code = textwrap.dedent( """ import torch @@ -77,3 +98,33 @@ def test_case_6(): """ ) obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_9(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10, sym=False, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_10(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_11(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(10, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_hann.py b/tests/test_signal_windows_hann.py index e803b5bc6..2b070a153 100644 --- a/tests/test_signal_windows_hann.py +++ b/tests/test_signal_windows_hann.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.hann(10) """ ) - obj.run(pytorch_code, ["result"], check_value=False, check_dtype=False) + obj.run(pytorch_code, ["result"], check_value=False) def test_case_2(): @@ -43,7 +43,8 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, sym=False, dtype=torch.float64) + a=10 + result = torch.signal.windows.hann(a, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -53,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.hann(M=10, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -63,13 +64,33 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.hann(10, sym=False, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) def test_case_6(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10, dtype=torch.float64, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_7(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_8(): pytorch_code = textwrap.dedent( """ import torch @@ -77,3 +98,33 @@ def test_case_6(): """ ) obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_9(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10, sym=False, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_10(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_11(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(10, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) From c87d488b1a46b9a27e40ded29616a5b9df858ca0 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 9 Oct 2024 15:07:41 +0800 Subject: [PATCH 42/53] fix --- tests/test_signal_windows_blackman.py | 18 +++++++++++---- tests/test_signal_windows_cosine.py | 18 +++++++++++---- tests/test_signal_windows_exponential.py | 14 ++++++++++-- tests/test_signal_windows_gaussian.py | 18 +++++++++++---- tests/test_signal_windows_general_cosine.py | 18 +++++++++++---- tests/test_signal_windows_general_hamming.py | 24 ++++++++++++++------ tests/test_signal_windows_hamming.py | 16 ++++++++++--- tests/test_signal_windows_hann.py | 24 ++++++++++++++------ 8 files changed, 115 insertions(+), 35 deletions(-) diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index b3f825d95..592ff0001 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -105,7 +105,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, sym=False, dtype=torch.float64) + result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -115,7 +115,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, sym=False, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -125,7 +125,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -135,7 +135,17 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_13(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(dtype=torch.float64, M=5, layout=torch.strided, device=torch.device('cpu'), requires_grad=True, sym=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_cosine.py b/tests/test_signal_windows_cosine.py index 07040ccd2..8b1ee0d0a 100644 --- a/tests/test_signal_windows_cosine.py +++ b/tests/test_signal_windows_cosine.py @@ -105,7 +105,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(5, sym=False, dtype=torch.float64) + result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -115,7 +115,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(5, sym=False, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -125,7 +125,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(5, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -135,7 +135,17 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_13(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(dtype=torch.float64, M=5, layout=torch.strided, device=torch.device('cpu'), requires_grad=True, sym=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index db6a80c4a..7b821b0f9 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -94,7 +94,7 @@ def test_case_8(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, tau=0.5) + result = torch.signal.windows.exponential(M=10, tau=0.5) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -184,7 +184,17 @@ def test_case_17(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, tau=0.5, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.exponential(M=10, sym=False, tau=0.5, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_18(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.exponential(requires_grad=True, sym=False, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.5) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index e5799d075..78383cdac 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -144,7 +144,7 @@ def test_case_13(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.6, sym=False) + result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -154,7 +154,7 @@ def test_case_14(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.6, sym=False, requires_grad=True) + result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -164,7 +164,7 @@ def test_case_15(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.6, sym=False, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -174,7 +174,17 @@ def test_case_16(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.6, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.gaussian(M=10, std=0.7, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_17(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(requires_grad=True, std=0.5, sym=False, M=10, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index d3d2c78b6..b1caa3ccb 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -104,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64) + result = torch.signal.windows.general_cosine(M=10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -114,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, requires_grad=True) + result = torch.signal.windows.general_cosine(M=10, a=[0.5, 1 - 0.5], sym=False, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -124,7 +124,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.general_cosine(M=10, a=[0.5, 1 - 0.5], sym=False, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -134,7 +134,17 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.general_cosine(M=10, a=[0.5, 1 - 0.5], sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_13(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(sym=False, requires_grad=True, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py index f0c1244e2..4d158de3d 100644 --- a/tests/test_signal_windows_general_hamming.py +++ b/tests/test_signal_windows_general_hamming.py @@ -114,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, alpha=0.8, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, alpha=0.8, dtype=torch.float64, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -124,7 +124,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, alpha=0.8, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, alpha=0.8, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -134,7 +134,7 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -154,7 +154,7 @@ def test_case_14(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, alpha=0.8, sym=False) + result = torch.signal.windows.general_hamming(M=10, alpha=0.8, sym=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -164,7 +164,7 @@ def test_case_15(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, alpha=0.8, sym=False, requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, alpha=0.8, sym=False, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -174,7 +174,7 @@ def test_case_16(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, alpha=0.8, sym=False, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, alpha=0.8, sym=False, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -184,7 +184,17 @@ def test_case_17(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, alpha=0.8, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, alpha=0.8, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_18(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_hamming(requires_grad=True, alpha=0.2, sym=False, M=5, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_hamming.py b/tests/test_signal_windows_hamming.py index 95aa88c09..448f25d56 100644 --- a/tests/test_signal_windows_hamming.py +++ b/tests/test_signal_windows_hamming.py @@ -104,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, sym=False, requires_grad=True) + result = torch.signal.windows.hamming(M=10, sym=False, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -114,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.hamming(M=10, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -124,7 +124,17 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.hamming(M=10, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_12(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(dtype=torch.float64, sym=False, device=torch.device('cpu'), layout=torch.strided, M=10, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_hann.py b/tests/test_signal_windows_hann.py index 2b070a153..d53028c45 100644 --- a/tests/test_signal_windows_hann.py +++ b/tests/test_signal_windows_hann.py @@ -64,7 +64,7 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, sym=False, dtype=torch.float64) + result = torch.signal.windows.hann(M=10, sym=False, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -74,7 +74,7 @@ def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.hann(M=10, dtype=torch.float64, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -84,7 +84,7 @@ def test_case_7(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.hann(M=10, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -94,7 +94,7 @@ def test_case_8(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.hann(M=10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -104,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, sym=False, requires_grad=True) + result = torch.signal.windows.hann(M=10, sym=False, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -114,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.hann(M=10, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -124,7 +124,17 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.hann(M=10, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_12(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(dtype=torch.float64, sym=False, M=10, device=torch.device('cpu'), layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) From c7f6aa80a9efd3461dafc786a2aa72c16571318a Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 9 Oct 2024 17:06:30 +0800 Subject: [PATCH 43/53] fix --- paconvert/api_matcher.py | 22 +++++++-------------- tests/test_signal_windows_general_cosine.py | 2 +- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 6c4c68e07..2d19ee0f6 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -523,19 +523,12 @@ class SignalWindowsWatcher(BaseMatcher): def generate_code(self, kwargs): new_kwargs = {} if "exponential" in self.torch_api: - if "sym" in kwargs: - if kwargs["sym"] != "(False)": - if "tau" in kwargs: - tau_value = float(str(kwargs.pop("tau")).strip("()")) - new_kwargs["window"] = ("exponential", tau_value) - else: - new_kwargs["window"] = ("exponential", 1.0) + if "sym" in kwargs and kwargs["sym"] == "(False)": + if "tau" in kwargs: + tau_value = float(str(kwargs.pop("tau")).strip("()")) + new_kwargs["window"] = ("exponential", None, tau_value) else: - if "tau" in kwargs: - tau_value = float(str(kwargs.pop("tau")).strip("()")) - new_kwargs["window"] = ("exponential", None, tau_value) - else: - new_kwargs["window"] = ("exponential", None, 1.0) + new_kwargs["window"] = ("exponential", None, 1.0) else: if "tau" in kwargs: tau_value = float(str(kwargs.pop("tau")).strip("()")) @@ -555,9 +548,8 @@ def generate_code(self, kwargs): else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: - a_value = [v for v in kwargs.values()][1] - new_kwargs["window"] = ("general_cosine", a_value) - new_kwargs.update(kwargs) + new_kwargs["window"] = ("general_cosine", kwargs.pop("a")) + new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index b1caa3ccb..0cd01c760 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -134,7 +134,7 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(M=10, a=[0.5, 1 - 0.5], sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.general_cosine(M=10, sym=False, a=[0.5, 1 - 0.5], layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) From 9e740e42ba9a01ea2b01185e08fe004a9fd7feae Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 10 Oct 2024 08:51:26 +0800 Subject: [PATCH 44/53] fix --- paconvert/api_matcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 2d19ee0f6..3c5c73dff 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -549,7 +549,7 @@ def generate_code(self, kwargs): new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: new_kwargs["window"] = ("general_cosine", kwargs.pop("a")) - new_kwargs.update(kwargs) + new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) From cf14997e1f4bcb67ff10bd5f643f082a604ff51a Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 10 Oct 2024 14:30:33 +0800 Subject: [PATCH 45/53] fix --- tests/test_signal_windows_blackman.py | 57 ++++++++++++----- tests/test_signal_windows_cosine.py | 57 ++++++++++++----- tests/test_signal_windows_exponential.py | 36 +++++------ tests/test_signal_windows_gaussian.py | 44 ++++++++----- tests/test_signal_windows_general_cosine.py | 32 ++++++++-- tests/test_signal_windows_general_hamming.py | 52 ++++----------- tests/test_signal_windows_hamming.py | 66 ++++++++++++++++---- tests/test_signal_windows_hann.py | 66 ++++++++++++++++---- 8 files changed, 274 insertions(+), 136 deletions(-) diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index 592ff0001..074101521 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -33,7 +33,8 @@ def test_case_2(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, dtype=torch.float64) + a = 5 + result = torch.signal.windows.blackman(a) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -43,8 +44,7 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - a = 5 - result = torch.signal.windows.blackman(a) + result = torch.signal.windows.blackman(M=5) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -54,8 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - a = 5 - result = torch.signal.windows.blackman(M = a, dtype=torch.float32) + result = torch.signal.windows.blackman(M=5, sym=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -65,7 +64,7 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.blackman(M=5, sym=True, dtype=torch.float32) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -75,7 +74,7 @@ def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.blackman(M=5, sym=True, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -85,7 +84,7 @@ def test_case_7(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.blackman(5, sym=True, dtype=torch.float64, layout=torch.strided) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -95,7 +94,7 @@ def test_case_8(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(5, sym=False) + result = torch.signal.windows.blackman(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -105,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64) + result = torch.signal.windows.blackman(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -115,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.blackman(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -125,7 +124,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.blackman(M=5, sym=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -135,7 +134,7 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -145,7 +144,37 @@ def test_case_13(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.blackman(dtype=torch.float64, M=5, layout=torch.strided, device=torch.device('cpu'), requires_grad=True, sym=False) + result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu')) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_14(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_15(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_16(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.blackman(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_cosine.py b/tests/test_signal_windows_cosine.py index 8b1ee0d0a..67ebd632e 100644 --- a/tests/test_signal_windows_cosine.py +++ b/tests/test_signal_windows_cosine.py @@ -33,7 +33,8 @@ def test_case_2(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(5, dtype=torch.float64) + a = 5 + result = torch.signal.windows.cosine(a) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -43,8 +44,7 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - a = 5 - result = torch.signal.windows.cosine(a) + result = torch.signal.windows.cosine(M=5) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -54,8 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - a = 5 - result = torch.signal.windows.cosine(M = a, dtype=torch.float32) + result = torch.signal.windows.cosine(M=5, sym=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -65,7 +64,7 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(5, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.cosine(M=5, sym=True, dtype=torch.float32) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -75,7 +74,7 @@ def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(5, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.cosine(M=5, sym=True, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -85,7 +84,7 @@ def test_case_7(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(5, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.cosine(5, sym=True, dtype=torch.float64, layout=torch.strided) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -95,7 +94,7 @@ def test_case_8(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(5, sym=False) + result = torch.signal.windows.cosine(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -105,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64) + result = torch.signal.windows.cosine(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -115,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.cosine(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -125,7 +124,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.cosine(M=5, sym=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -135,7 +134,7 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -145,7 +144,37 @@ def test_case_13(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.cosine(dtype=torch.float64, M=5, layout=torch.strided, device=torch.device('cpu'), requires_grad=True, sym=False) + result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu')) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_14(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_15(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_16(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.cosine(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index 7b821b0f9..c9e6364a8 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -33,7 +33,8 @@ def test_case_2(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, dtype=torch.float64) + a = 10 + result = torch.signal.windows.exponential(a) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -43,8 +44,7 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - a = 10 - result = torch.signal.windows.exponential(a, dtype=torch.float64) + result = torch.signal.windows.exponential(M=10) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -54,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(M=10, dtype=torch.float64) + result = torch.signal.windows.exponential(M=10, sym=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -64,7 +64,7 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float32) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -74,7 +74,7 @@ def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -84,7 +84,7 @@ def test_case_7(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float64, layout=torch.strided) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -94,7 +94,7 @@ def test_case_8(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(M=10, tau=0.5) + result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -104,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, tau=0.5, requires_grad=True) + result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -114,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, tau=0.5, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -124,7 +124,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, tau=0.5, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.exponential(M=10, tau=0.5) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -134,7 +134,7 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, sym=False) + result = torch.signal.windows.exponential(10, tau=0.5, sym=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -144,7 +144,7 @@ def test_case_13(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, sym=False, dtype=torch.float64) + result = torch.signal.windows.exponential(10, tau=0.5, sym=False, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -154,7 +154,7 @@ def test_case_14(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, tau=0.5, sym=False) + result = torch.signal.windows.exponential(10, tau=0.5, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -164,7 +164,7 @@ def test_case_15(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, tau=0.5, sym=False, requires_grad=True) + result = torch.signal.windows.exponential(10, tau=0.5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -174,7 +174,7 @@ def test_case_16(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, tau=0.5, sym=False, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.exponential(10, tau=0.5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -184,7 +184,7 @@ def test_case_17(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(M=10, sym=False, tau=0.5, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.exponential(requires_grad=False, sym=True, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.6) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -194,7 +194,7 @@ def test_case_18(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(requires_grad=True, sym=False, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.5) + result = torch.signal.windows.exponential(requires_grad=True, sym=False, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.3) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index 78383cdac..3b74cc206 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -33,7 +33,8 @@ def test_case_2(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, dtype=torch.float64) + a=10 + result = torch.signal.windows.gaussian(a) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -43,8 +44,7 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - a=10 - result = torch.signal.windows.gaussian(a, dtype=torch.float64) + result = torch.signal.windows.gaussian(M=10) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -54,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(M=10, dtype=torch.float64) + result = torch.signal.windows.gaussian(M=10, sym=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -64,7 +64,7 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64) + result = torch.signal.windows.gaussian(M=10, sym=True, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -74,7 +74,7 @@ def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.gaussian(M=10, sym=True, dtype=torch.float64, layout=torch.strided) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -84,7 +84,7 @@ def test_case_7(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.gaussian(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -94,7 +94,7 @@ def test_case_8(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, std=0.9, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.gaussian(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -104,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, sym=False) + result = torch.signal.windows.gaussian(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -114,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, sym=False, requires_grad=True) + result = torch.signal.windows.gaussian(M=10, sym=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -124,7 +124,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, sym=False, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.gaussian(M=10, sym=False, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -134,7 +134,7 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(10, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.gaussian(M=10, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -144,7 +144,7 @@ def test_case_13(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False) + result = torch.signal.windows.gaussian(M=10, std=0.6, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -154,7 +154,7 @@ def test_case_14(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, requires_grad=True) + result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -164,7 +164,7 @@ def test_case_15(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -174,7 +174,7 @@ def test_case_16(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(M=10, std=0.7, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.gaussian(M=10, std=0.6, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -184,7 +184,17 @@ def test_case_17(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(requires_grad=True, std=0.5, sym=False, M=10, layout=torch.strided, device=torch.device('cpu')) + result = torch.signal.windows.gaussian(requires_grad=True, std=0.8, sym=False, M=10, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_18(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.gaussian(requires_grad=False, std=0.8, sym=True, M=10, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index 0cd01c760..243b80718 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -54,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(M=10, a=[0.46, 0.23, 0.31], dtype=torch.float64) + result = torch.signal.windows.general_cosine(M=10, a=[0.46, 0.23, 0.31], dtype=torch.float64, sym=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -64,7 +64,7 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], sym=True, dtype=torch.float64, layout=torch.strided) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -74,7 +74,7 @@ def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -84,7 +84,7 @@ def test_case_7(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -104,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(M=10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64) + result = torch.signal.windows.general_cosine(M=10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -144,7 +144,27 @@ def test_case_13(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(sym=False, requires_grad=True, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) + result = torch.signal.windows.general_cosine(M=10, sym=False, a=[0.5, 1 - 0.5], layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_14(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(sym=False, requires_grad=False, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_15(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.general_cosine(sym=True, requires_grad=True, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py index 4d158de3d..8fdc192a7 100644 --- a/tests/test_signal_windows_general_hamming.py +++ b/tests/test_signal_windows_general_hamming.py @@ -64,7 +64,7 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, alpha=0.5, dtype=torch.float64) + result = torch.signal.windows.general_hamming(M=10, sym=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -74,7 +74,7 @@ def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(M=10, alpha=0.8, dtype=torch.float64) + result = torch.signal.windows.general_hamming(M=10, sym=True, alpha=0.8, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -84,7 +84,7 @@ def test_case_7(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, sym=True, alpha=0.8, dtype=torch.float64, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -94,7 +94,7 @@ def test_case_8(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, sym=True, alpha=0.8, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -104,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, sym=True, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -114,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(M=10, alpha=0.8, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, sym=True, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -124,7 +124,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(M=10, alpha=0.8, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, sym=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -134,7 +134,7 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(M=10, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.general_hamming(M=10, sym=False, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -144,7 +144,7 @@ def test_case_13(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(10, sym=False) + result = torch.signal.windows.general_hamming(M=10, sym=False, alpha=0.8, layout=torch.strided, requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -154,7 +154,7 @@ def test_case_14(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(M=10, alpha=0.8, sym=False) + result = torch.signal.windows.general_hamming(M=10, sym=False, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -164,37 +164,7 @@ def test_case_15(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(M=10, alpha=0.8, sym=False, requires_grad=True) - """ - ) - obj.run(pytorch_code, ["result"], check_value=False) - - -def test_case_16(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.general_hamming(M=10, alpha=0.8, sym=False, layout=torch.strided, requires_grad=True) - """ - ) - obj.run(pytorch_code, ["result"], check_value=False) - - -def test_case_17(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.general_hamming(M=10, alpha=0.8, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) - """ - ) - obj.run(pytorch_code, ["result"], check_value=False) - - -def test_case_18(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.general_hamming(requires_grad=True, alpha=0.2, sym=False, M=5, layout=torch.strided, device=torch.device('cpu')) + result = torch.signal.windows.general_hamming(requires_grad=False, alpha=0.2, sym=False, M=5, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_hamming.py b/tests/test_signal_windows_hamming.py index 448f25d56..08d467c74 100644 --- a/tests/test_signal_windows_hamming.py +++ b/tests/test_signal_windows_hamming.py @@ -23,7 +23,7 @@ def test_case_1(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10) + result = torch.signal.windows.hamming(5) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -33,7 +33,8 @@ def test_case_2(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, dtype=torch.float64) + a = 5 + result = torch.signal.windows.hamming(a) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -43,8 +44,7 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - a=10 - result = torch.signal.windows.hamming(a, dtype=torch.float64) + result = torch.signal.windows.hamming(M=5) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -54,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(M=10, dtype=torch.float64) + result = torch.signal.windows.hamming(M=5, sym=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -64,7 +64,7 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, sym=False, dtype=torch.float64) + result = torch.signal.windows.hamming(M=5, sym=True, dtype=torch.float32) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -74,7 +74,7 @@ def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.hamming(M=5, sym=True, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -84,7 +84,7 @@ def test_case_7(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.hamming(5, sym=True, dtype=torch.float64, layout=torch.strided) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -94,7 +94,7 @@ def test_case_8(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.hamming(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -104,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(M=10, sym=False, requires_grad=True) + result = torch.signal.windows.hamming(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -114,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(M=10, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.hamming(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -124,7 +124,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(M=10, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.hamming(M=5, sym=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -134,7 +134,47 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hamming(dtype=torch.float64, sym=False, device=torch.device('cpu'), layout=torch.strided, M=10, requires_grad=True) + result = torch.signal.windows.hamming(M=5, sym=False, dtype=torch.float64, layout=torch.strided) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_13(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(M=5, sym=False, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu')) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_14(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_15(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_16(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hamming(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) diff --git a/tests/test_signal_windows_hann.py b/tests/test_signal_windows_hann.py index d53028c45..f61dfe675 100644 --- a/tests/test_signal_windows_hann.py +++ b/tests/test_signal_windows_hann.py @@ -23,7 +23,7 @@ def test_case_1(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10) + result = torch.signal.windows.hann(5) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -33,7 +33,8 @@ def test_case_2(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(10, dtype=torch.float64) + a = 5 + result = torch.signal.windows.hann(a) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -43,8 +44,7 @@ def test_case_3(): pytorch_code = textwrap.dedent( """ import torch - a=10 - result = torch.signal.windows.hann(a, dtype=torch.float64) + result = torch.signal.windows.hann(M=5) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -54,7 +54,7 @@ def test_case_4(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(M=10, dtype=torch.float64) + result = torch.signal.windows.hann(M=5, sym=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -64,7 +64,7 @@ def test_case_5(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(M=10, sym=False, dtype=torch.float64) + result = torch.signal.windows.hann(M=5, sym=True, dtype=torch.float32) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -74,7 +74,7 @@ def test_case_6(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(M=10, dtype=torch.float64, requires_grad=True) + result = torch.signal.windows.hann(M=5, sym=True, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -84,7 +84,7 @@ def test_case_7(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(M=10, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.hann(5, sym=True, dtype=torch.float64, layout=torch.strided) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -94,7 +94,7 @@ def test_case_8(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(M=10, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.hann(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -104,7 +104,7 @@ def test_case_9(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(M=10, sym=False, requires_grad=True) + result = torch.signal.windows.hann(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -114,7 +114,7 @@ def test_case_10(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(M=10, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) + result = torch.signal.windows.hann(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -124,7 +124,7 @@ def test_case_11(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(M=10, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.hann(M=5, sym=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) @@ -134,7 +134,47 @@ def test_case_12(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.hann(dtype=torch.float64, sym=False, M=10, device=torch.device('cpu'), layout=torch.strided, requires_grad=True) + result = torch.signal.windows.hann(M=5, sym=False, dtype=torch.float64, layout=torch.strided) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_13(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(M=5, sym=False, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu')) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_14(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_15(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"], check_value=False) + + +def test_case_16(): + pytorch_code = textwrap.dedent( + """ + import torch + result = torch.signal.windows.hann(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) """ ) obj.run(pytorch_code, ["result"], check_value=False) From 13090a918e54d94a332606aeea69d79b08e6e66d Mon Sep 17 00:00:00 2001 From: enkilee Date: Sat, 12 Oct 2024 16:49:00 +0800 Subject: [PATCH 46/53] fix --- paconvert/api_mapping.json | 8 ++--- paconvert/api_matcher.py | 13 ++++--- tests/test_signal_windows_blackman.py | 32 ++++++++--------- tests/test_signal_windows_cosine.py | 32 ++++++++--------- tests/test_signal_windows_exponential.py | 36 ++++++++++---------- tests/test_signal_windows_gaussian.py | 36 ++++++++++---------- tests/test_signal_windows_general_cosine.py | 30 ++++++++-------- tests/test_signal_windows_general_hamming.py | 32 ++++++++--------- tests/test_signal_windows_hamming.py | 32 ++++++++--------- tests/test_signal_windows_hann.py | 32 ++++++++--------- 10 files changed, 144 insertions(+), 139 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index 1e654e781..cb109f509 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14667,7 +14667,7 @@ } }, "torch.signal.windows.blackman": { - "Matcher": "GenericMatcher", + "Matcher": "SignalWindowsWatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14690,7 +14690,7 @@ } }, "torch.signal.windows.cosine": { - "Matcher": "GenericMatcher", + "Matcher": "SignalWindowsWatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14810,7 +14810,7 @@ } }, "torch.signal.windows.hamming": { - "Matcher": "GenericMatcher", + "Matcher": "SignalWindowsWatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ @@ -14833,7 +14833,7 @@ } }, "torch.signal.windows.hann": { - "Matcher": "GenericMatcher", + "Matcher": "SignalWindowsWatcher", "paddle_api": "paddle.audio.functional.get_window", "min_input_args": 1, "args_list": [ diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 3c5c73dff..6b1dee202 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -522,19 +522,24 @@ def generate_code(self, kwargs): class SignalWindowsWatcher(BaseMatcher): def generate_code(self, kwargs): new_kwargs = {} + if "sym" not in kwargs or kwargs["sym"] == "(True)": + kwargs["fftbins"] = "(False)" + elif kwargs["sym"] == "(False)": + kwargs["fftbins"] = "(True)" if "exponential" in self.torch_api: - if "sym" in kwargs and kwargs["sym"] == "(False)": + if "sym" not in kwargs or kwargs["sym"] == "(True)": if "tau" in kwargs: tau_value = float(str(kwargs.pop("tau")).strip("()")) new_kwargs["window"] = ("exponential", None, tau_value) else: new_kwargs["window"] = ("exponential", None, 1.0) else: + c_value = float(str(kwargs["M"]).strip("()")) / 2 if "tau" in kwargs: tau_value = float(str(kwargs.pop("tau")).strip("()")) - new_kwargs["window"] = ("exponential", tau_value) + new_kwargs["window"] = ("exponential", c_value, tau_value) else: - new_kwargs["window"] = ("exponential", 1.0) + new_kwargs["window"] = ("exponential", c_value, 1.0) if "gaussian" in self.torch_api: if "std" in kwargs: std_value = float(str(kwargs.pop("std")).strip("()")) @@ -548,7 +553,7 @@ def generate_code(self, kwargs): else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: - new_kwargs["window"] = ("general_cosine", kwargs.pop("a")) + new_kwargs["window"] = ("general_cosine", eval(kwargs["a"])) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index 074101521..3cb90cc39 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.blackman(5) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"], atol=1e-05) def test_case_2(): @@ -37,7 +37,7 @@ def test_case_2(): result = torch.signal.windows.blackman(a) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"], atol=1e-05) def test_case_3(): @@ -47,7 +47,7 @@ def test_case_3(): result = torch.signal.windows.blackman(M=5) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"], atol=1e-05) def test_case_4(): @@ -57,7 +57,7 @@ def test_case_4(): result = torch.signal.windows.blackman(M=5, sym=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"], atol=1e-05) def test_case_5(): @@ -67,7 +67,7 @@ def test_case_5(): result = torch.signal.windows.blackman(M=5, sym=True, dtype=torch.float32) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"], atol=1e-05) def test_case_6(): @@ -77,7 +77,7 @@ def test_case_6(): result = torch.signal.windows.blackman(M=5, sym=True, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_7(): @@ -87,7 +87,7 @@ def test_case_7(): result = torch.signal.windows.blackman(5, sym=True, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_8(): @@ -97,7 +97,7 @@ def test_case_8(): result = torch.signal.windows.blackman(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_9(): @@ -107,7 +107,7 @@ def test_case_9(): result = torch.signal.windows.blackman(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_10(): @@ -117,7 +117,7 @@ def test_case_10(): result = torch.signal.windows.blackman(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_11(): @@ -127,7 +127,7 @@ def test_case_11(): result = torch.signal.windows.blackman(M=5, sym=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"], atol=1e-05) def test_case_12(): @@ -137,7 +137,7 @@ def test_case_12(): result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_13(): @@ -147,7 +147,7 @@ def test_case_13(): result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"], atol=1e-05) def test_case_14(): @@ -157,7 +157,7 @@ def test_case_14(): result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_15(): @@ -167,7 +167,7 @@ def test_case_15(): result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_16(): @@ -177,4 +177,4 @@ def test_case_16(): result = torch.signal.windows.blackman(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"], atol=1e-05) diff --git a/tests/test_signal_windows_cosine.py b/tests/test_signal_windows_cosine.py index 67ebd632e..5beea6a0c 100644 --- a/tests/test_signal_windows_cosine.py +++ b/tests/test_signal_windows_cosine.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.cosine(5) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_2(): @@ -37,7 +37,7 @@ def test_case_2(): result = torch.signal.windows.cosine(a) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_3(): @@ -47,7 +47,7 @@ def test_case_3(): result = torch.signal.windows.cosine(M=5) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_4(): @@ -57,7 +57,7 @@ def test_case_4(): result = torch.signal.windows.cosine(M=5, sym=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_5(): @@ -67,7 +67,7 @@ def test_case_5(): result = torch.signal.windows.cosine(M=5, sym=True, dtype=torch.float32) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_6(): @@ -77,7 +77,7 @@ def test_case_6(): result = torch.signal.windows.cosine(M=5, sym=True, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_7(): @@ -87,7 +87,7 @@ def test_case_7(): result = torch.signal.windows.cosine(5, sym=True, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_8(): @@ -97,7 +97,7 @@ def test_case_8(): result = torch.signal.windows.cosine(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_9(): @@ -107,7 +107,7 @@ def test_case_9(): result = torch.signal.windows.cosine(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_10(): @@ -117,7 +117,7 @@ def test_case_10(): result = torch.signal.windows.cosine(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_11(): @@ -127,7 +127,7 @@ def test_case_11(): result = torch.signal.windows.cosine(M=5, sym=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_12(): @@ -137,7 +137,7 @@ def test_case_12(): result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_13(): @@ -147,7 +147,7 @@ def test_case_13(): result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_14(): @@ -157,7 +157,7 @@ def test_case_14(): result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_15(): @@ -167,7 +167,7 @@ def test_case_15(): result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_16(): @@ -177,4 +177,4 @@ def test_case_16(): result = torch.signal.windows.cosine(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index c9e6364a8..6184aa5a1 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.exponential(10) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_2(): @@ -37,7 +37,7 @@ def test_case_2(): result = torch.signal.windows.exponential(a) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_3(): @@ -47,7 +47,7 @@ def test_case_3(): result = torch.signal.windows.exponential(M=10) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_4(): @@ -57,7 +57,7 @@ def test_case_4(): result = torch.signal.windows.exponential(M=10, sym=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_5(): @@ -67,7 +67,7 @@ def test_case_5(): result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float32) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_6(): @@ -77,7 +77,7 @@ def test_case_6(): result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_7(): @@ -87,7 +87,7 @@ def test_case_7(): result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_8(): @@ -97,7 +97,7 @@ def test_case_8(): result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_9(): @@ -107,7 +107,7 @@ def test_case_9(): result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_10(): @@ -117,7 +117,7 @@ def test_case_10(): result = torch.signal.windows.exponential(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_11(): @@ -127,7 +127,7 @@ def test_case_11(): result = torch.signal.windows.exponential(M=10, tau=0.5) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_12(): @@ -137,7 +137,7 @@ def test_case_12(): result = torch.signal.windows.exponential(10, tau=0.5, sym=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_13(): @@ -147,7 +147,7 @@ def test_case_13(): result = torch.signal.windows.exponential(10, tau=0.5, sym=False, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_14(): @@ -157,7 +157,7 @@ def test_case_14(): result = torch.signal.windows.exponential(10, tau=0.5, sym=False, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_15(): @@ -167,7 +167,7 @@ def test_case_15(): result = torch.signal.windows.exponential(10, tau=0.5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_16(): @@ -177,7 +177,7 @@ def test_case_16(): result = torch.signal.windows.exponential(10, tau=0.5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_17(): @@ -187,7 +187,7 @@ def test_case_17(): result = torch.signal.windows.exponential(requires_grad=False, sym=True, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.6) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_18(): @@ -197,4 +197,4 @@ def test_case_18(): result = torch.signal.windows.exponential(requires_grad=True, sym=False, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.3) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index 3b74cc206..33d5efd46 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.gaussian(10) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_2(): @@ -37,7 +37,7 @@ def test_case_2(): result = torch.signal.windows.gaussian(a) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_3(): @@ -47,7 +47,7 @@ def test_case_3(): result = torch.signal.windows.gaussian(M=10) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_4(): @@ -57,7 +57,7 @@ def test_case_4(): result = torch.signal.windows.gaussian(M=10, sym=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_5(): @@ -67,7 +67,7 @@ def test_case_5(): result = torch.signal.windows.gaussian(M=10, sym=True, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_6(): @@ -77,7 +77,7 @@ def test_case_6(): result = torch.signal.windows.gaussian(M=10, sym=True, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_7(): @@ -87,7 +87,7 @@ def test_case_7(): result = torch.signal.windows.gaussian(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_8(): @@ -97,7 +97,7 @@ def test_case_8(): result = torch.signal.windows.gaussian(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_9(): @@ -107,7 +107,7 @@ def test_case_9(): result = torch.signal.windows.gaussian(M=10, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_10(): @@ -117,7 +117,7 @@ def test_case_10(): result = torch.signal.windows.gaussian(M=10, sym=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_11(): @@ -127,7 +127,7 @@ def test_case_11(): result = torch.signal.windows.gaussian(M=10, sym=False, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_12(): @@ -137,7 +137,7 @@ def test_case_12(): result = torch.signal.windows.gaussian(M=10, sym=False, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_13(): @@ -147,7 +147,7 @@ def test_case_13(): result = torch.signal.windows.gaussian(M=10, std=0.6, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_14(): @@ -157,7 +157,7 @@ def test_case_14(): result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_15(): @@ -167,7 +167,7 @@ def test_case_15(): result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_16(): @@ -177,7 +177,7 @@ def test_case_16(): result = torch.signal.windows.gaussian(M=10, std=0.6, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_17(): @@ -187,7 +187,7 @@ def test_case_17(): result = torch.signal.windows.gaussian(requires_grad=True, std=0.8, sym=False, M=10, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_18(): @@ -197,4 +197,4 @@ def test_case_18(): result = torch.signal.windows.gaussian(requires_grad=False, std=0.8, sym=True, M=10, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index 243b80718..8672e1562 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31]) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_2(): @@ -36,7 +36,7 @@ def test_case_2(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_3(): @@ -47,7 +47,7 @@ def test_case_3(): result = torch.signal.windows.general_cosine(m, a=[0.46, 0.23, 0.31], dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_4(): @@ -57,7 +57,7 @@ def test_case_4(): result = torch.signal.windows.general_cosine(M=10, a=[0.46, 0.23, 0.31], dtype=torch.float64, sym=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_5(): @@ -67,7 +67,7 @@ def test_case_5(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], sym=True, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_6(): @@ -77,7 +77,7 @@ def test_case_6(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_7(): @@ -87,7 +87,7 @@ def test_case_7(): result = torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_8(): @@ -97,7 +97,7 @@ def test_case_8(): result = torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_9(): @@ -107,7 +107,7 @@ def test_case_9(): result = torch.signal.windows.general_cosine(M=10, a=[0.5, 1 - 0.5], sym=False, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_10(): @@ -117,7 +117,7 @@ def test_case_10(): result = torch.signal.windows.general_cosine(M=10, a=[0.5, 1 - 0.5], sym=False, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_11(): @@ -127,7 +127,7 @@ def test_case_11(): result = torch.signal.windows.general_cosine(M=10, a=[0.5, 1 - 0.5], sym=False, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_12(): @@ -137,7 +137,7 @@ def test_case_12(): result = torch.signal.windows.general_cosine(M=10, sym=False, a=[0.5, 1 - 0.5], layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_13(): @@ -147,7 +147,7 @@ def test_case_13(): result = torch.signal.windows.general_cosine(M=10, sym=False, a=[0.5, 1 - 0.5], layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_14(): @@ -157,7 +157,7 @@ def test_case_14(): result = torch.signal.windows.general_cosine(sym=False, requires_grad=False, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_15(): @@ -167,4 +167,4 @@ def test_case_15(): result = torch.signal.windows.general_cosine(sym=True, requires_grad=True, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py index 8fdc192a7..8d5485047 100644 --- a/tests/test_signal_windows_general_hamming.py +++ b/tests/test_signal_windows_general_hamming.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.general_hamming(10) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_2(): @@ -36,7 +36,7 @@ def test_case_2(): result = torch.signal.windows.general_hamming(10, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_3(): @@ -47,7 +47,7 @@ def test_case_3(): result = torch.signal.windows.general_hamming(a) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_4(): @@ -57,7 +57,7 @@ def test_case_4(): result = torch.signal.windows.general_hamming(M=10) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_5(): @@ -67,7 +67,7 @@ def test_case_5(): result = torch.signal.windows.general_hamming(M=10, sym=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_6(): @@ -77,7 +77,7 @@ def test_case_6(): result = torch.signal.windows.general_hamming(M=10, sym=True, alpha=0.8, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_7(): @@ -87,7 +87,7 @@ def test_case_7(): result = torch.signal.windows.general_hamming(M=10, sym=True, alpha=0.8, dtype=torch.float64, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_8(): @@ -97,7 +97,7 @@ def test_case_8(): result = torch.signal.windows.general_hamming(M=10, sym=True, alpha=0.8, dtype=torch.float64, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_9(): @@ -107,7 +107,7 @@ def test_case_9(): result = torch.signal.windows.general_hamming(M=10, sym=True, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_10(): @@ -117,7 +117,7 @@ def test_case_10(): result = torch.signal.windows.general_hamming(M=10, sym=True, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_11(): @@ -127,7 +127,7 @@ def test_case_11(): result = torch.signal.windows.general_hamming(M=10, sym=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_12(): @@ -137,7 +137,7 @@ def test_case_12(): result = torch.signal.windows.general_hamming(M=10, sym=False, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_13(): @@ -147,7 +147,7 @@ def test_case_13(): result = torch.signal.windows.general_hamming(M=10, sym=False, alpha=0.8, layout=torch.strided, requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_14(): @@ -157,14 +157,14 @@ def test_case_14(): result = torch.signal.windows.general_hamming(M=10, sym=False, alpha=0.8, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_15(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_hamming(requires_grad=False, alpha=0.2, sym=False, M=5, layout=torch.strided, device=torch.device('cpu')) + result = torch.signal.windows.general_hamming(requires_grad=False, alpha=0.4, sym=False, M=10, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_hamming.py b/tests/test_signal_windows_hamming.py index 08d467c74..e0d5a48ec 100644 --- a/tests/test_signal_windows_hamming.py +++ b/tests/test_signal_windows_hamming.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.hamming(5) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_2(): @@ -37,7 +37,7 @@ def test_case_2(): result = torch.signal.windows.hamming(a) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_3(): @@ -47,7 +47,7 @@ def test_case_3(): result = torch.signal.windows.hamming(M=5) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_4(): @@ -57,7 +57,7 @@ def test_case_4(): result = torch.signal.windows.hamming(M=5, sym=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_5(): @@ -67,7 +67,7 @@ def test_case_5(): result = torch.signal.windows.hamming(M=5, sym=True, dtype=torch.float32) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_6(): @@ -77,7 +77,7 @@ def test_case_6(): result = torch.signal.windows.hamming(M=5, sym=True, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_7(): @@ -87,7 +87,7 @@ def test_case_7(): result = torch.signal.windows.hamming(5, sym=True, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_8(): @@ -97,7 +97,7 @@ def test_case_8(): result = torch.signal.windows.hamming(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_9(): @@ -107,7 +107,7 @@ def test_case_9(): result = torch.signal.windows.hamming(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_10(): @@ -117,7 +117,7 @@ def test_case_10(): result = torch.signal.windows.hamming(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_11(): @@ -127,7 +127,7 @@ def test_case_11(): result = torch.signal.windows.hamming(M=5, sym=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_12(): @@ -137,7 +137,7 @@ def test_case_12(): result = torch.signal.windows.hamming(M=5, sym=False, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_13(): @@ -147,7 +147,7 @@ def test_case_13(): result = torch.signal.windows.hamming(M=5, sym=False, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_14(): @@ -157,7 +157,7 @@ def test_case_14(): result = torch.signal.windows.hamming(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_15(): @@ -167,7 +167,7 @@ def test_case_15(): result = torch.signal.windows.hamming(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_16(): @@ -177,4 +177,4 @@ def test_case_16(): result = torch.signal.windows.hamming(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_hann.py b/tests/test_signal_windows_hann.py index f61dfe675..ed475b3ac 100644 --- a/tests/test_signal_windows_hann.py +++ b/tests/test_signal_windows_hann.py @@ -26,7 +26,7 @@ def test_case_1(): result = torch.signal.windows.hann(5) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_2(): @@ -37,7 +37,7 @@ def test_case_2(): result = torch.signal.windows.hann(a) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_3(): @@ -47,7 +47,7 @@ def test_case_3(): result = torch.signal.windows.hann(M=5) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_4(): @@ -57,7 +57,7 @@ def test_case_4(): result = torch.signal.windows.hann(M=5, sym=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_5(): @@ -67,7 +67,7 @@ def test_case_5(): result = torch.signal.windows.hann(M=5, sym=True, dtype=torch.float32) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_6(): @@ -77,7 +77,7 @@ def test_case_6(): result = torch.signal.windows.hann(M=5, sym=True, dtype=torch.float64) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_7(): @@ -87,7 +87,7 @@ def test_case_7(): result = torch.signal.windows.hann(5, sym=True, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_8(): @@ -97,7 +97,7 @@ def test_case_8(): result = torch.signal.windows.hann(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_9(): @@ -107,7 +107,7 @@ def test_case_9(): result = torch.signal.windows.hann(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_10(): @@ -117,7 +117,7 @@ def test_case_10(): result = torch.signal.windows.hann(5, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_11(): @@ -127,7 +127,7 @@ def test_case_11(): result = torch.signal.windows.hann(M=5, sym=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_12(): @@ -137,7 +137,7 @@ def test_case_12(): result = torch.signal.windows.hann(M=5, sym=False, dtype=torch.float64, layout=torch.strided) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_13(): @@ -147,7 +147,7 @@ def test_case_13(): result = torch.signal.windows.hann(M=5, sym=False, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu')) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_14(): @@ -157,7 +157,7 @@ def test_case_14(): result = torch.signal.windows.hann(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_15(): @@ -167,7 +167,7 @@ def test_case_15(): result = torch.signal.windows.hann(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) def test_case_16(): @@ -177,4 +177,4 @@ def test_case_16(): result = torch.signal.windows.hann(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) """ ) - obj.run(pytorch_code, ["result"], check_value=False) + obj.run(pytorch_code, ["result"]) From 4963e99aafbdc58fa863a3e5c156b9bad819bacf Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 14 Oct 2024 11:27:12 +0800 Subject: [PATCH 47/53] fix --- paconvert/api_mapping.json | 1 - paconvert/api_matcher.py | 11 +++++----- tests/test_signal_windows_blackman.py | 23 ++++++++++++++++++++ tests/test_signal_windows_cosine.py | 23 ++++++++++++++++++++ tests/test_signal_windows_exponential.py | 23 ++++++++++++++++++++ tests/test_signal_windows_gaussian.py | 23 ++++++++++++++++++++ tests/test_signal_windows_general_cosine.py | 23 ++++++++++++++++++++ tests/test_signal_windows_general_hamming.py | 23 ++++++++++++++++++++ tests/test_signal_windows_hamming.py | 23 ++++++++++++++++++++ tests/test_signal_windows_hann.py | 23 ++++++++++++++++++++ 10 files changed, 190 insertions(+), 6 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index cb109f509..ee99f71d2 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14778,7 +14778,6 @@ ], "kwargs_change": { "M": "win_length", - "a": "", "sym": "fftbins", "dtype": "dtype" }, diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 6b1dee202..978c0cc6d 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -522,10 +522,10 @@ def generate_code(self, kwargs): class SignalWindowsWatcher(BaseMatcher): def generate_code(self, kwargs): new_kwargs = {} - if "sym" not in kwargs or kwargs["sym"] == "(True)": + if "sym" in kwargs: + kwargs["fftbins"] = f"1 - {kwargs.pop('sym')}" + else: kwargs["fftbins"] = "(False)" - elif kwargs["sym"] == "(False)": - kwargs["fftbins"] = "(True)" if "exponential" in self.torch_api: if "sym" not in kwargs or kwargs["sym"] == "(True)": if "tau" in kwargs: @@ -534,7 +534,7 @@ def generate_code(self, kwargs): else: new_kwargs["window"] = ("exponential", None, 1.0) else: - c_value = float(str(kwargs["M"]).strip("()")) / 2 + c_value = float(str(kwargs.pop("tau")).strip("()")) / 2 if "tau" in kwargs: tau_value = float(str(kwargs.pop("tau")).strip("()")) new_kwargs["window"] = ("exponential", c_value, tau_value) @@ -553,7 +553,8 @@ def generate_code(self, kwargs): else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: - new_kwargs["window"] = ("general_cosine", eval(kwargs["a"])) + a_value = str(kwargs.pop("a")).strip("()") + new_kwargs["window"] = ("general_cosine", eval(a_value)) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index 3cb90cc39..e46ff430a 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -178,3 +178,26 @@ def test_case_16(): """ ) obj.run(pytorch_code, ["result"], atol=1e-05) + + +def test_case_17(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + result = torch.signal.windows.blackman(M=5, sym=a, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"], atol=1e-05) + + +def test_case_18(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + m=5 + result = torch.signal.windows.blackman(M=m, sym=a, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"], atol=1e-05) diff --git a/tests/test_signal_windows_cosine.py b/tests/test_signal_windows_cosine.py index 5beea6a0c..d87597d04 100644 --- a/tests/test_signal_windows_cosine.py +++ b/tests/test_signal_windows_cosine.py @@ -178,3 +178,26 @@ def test_case_16(): """ ) obj.run(pytorch_code, ["result"]) + + +def test_case_17(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + result = torch.signal.windows.cosine(M=5, sym=a, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"], atol=1e-05) + + +def test_case_18(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + m=5 + result = torch.signal.windows.cosine(M=m, sym=a, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"], atol=1e-05) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index 6184aa5a1..577bab0f0 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -198,3 +198,26 @@ def test_case_18(): """ ) obj.run(pytorch_code, ["result"]) + + +def test_case_19(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + result = torch.signal.windows.exponential(requires_grad=True, sym=a, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.3) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_20(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + m=10 + result = torch.signal.windows.exponential(requires_grad=True, sym=a, M=m, layout=torch.strided, device=torch.device('cpu'), tau=0.3) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index 33d5efd46..dc629c6d1 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -198,3 +198,26 @@ def test_case_18(): """ ) obj.run(pytorch_code, ["result"]) + + +def test_case_19(): + pytorch_code = textwrap.dedent( + """ + import torch + a=True + result = torch.signal.windows.gaussian(requires_grad=False, std=0.8, sym=a, M=10, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_20(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + m=10 + result = torch.signal.windows.gaussian(requires_grad=False, std=0.8, sym=a, M=m, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index 8672e1562..4bca2233c 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -168,3 +168,26 @@ def test_case_15(): """ ) obj.run(pytorch_code, ["result"]) + + +def test_case_16(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + result = torch.signal.windows.general_cosine(sym=a, requires_grad=True, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_17(): + pytorch_code = textwrap.dedent( + """ + import torch + s=False + m=10 + result = torch.signal.windows.general_cosine(sym=s, requires_grad=True, a=[0.5, 1 - 0.5], M=m, layout=torch.strided, device=torch.device('cpu')) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py index 8d5485047..b5c78e344 100644 --- a/tests/test_signal_windows_general_hamming.py +++ b/tests/test_signal_windows_general_hamming.py @@ -168,3 +168,26 @@ def test_case_15(): """ ) obj.run(pytorch_code, ["result"]) + + +def test_case_16(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + result = torch.signal.windows.general_hamming(requires_grad=False, alpha=0.4, sym=a, M=10, layout=torch.strided, device=torch.device('cpu')) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_17(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + m=10 + result = torch.signal.windows.general_hamming(requires_grad=False, alpha=0.4, sym=a, M=m, layout=torch.strided, device=torch.device('cpu')) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_hamming.py b/tests/test_signal_windows_hamming.py index e0d5a48ec..6d9e315d4 100644 --- a/tests/test_signal_windows_hamming.py +++ b/tests/test_signal_windows_hamming.py @@ -178,3 +178,26 @@ def test_case_16(): """ ) obj.run(pytorch_code, ["result"]) + + +def test_case_17(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + result = torch.signal.windows.hamming(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=a, requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_18(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + m=5 + result = torch.signal.windows.hamming(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=m, sym=a, requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_hann.py b/tests/test_signal_windows_hann.py index ed475b3ac..3558de96d 100644 --- a/tests/test_signal_windows_hann.py +++ b/tests/test_signal_windows_hann.py @@ -178,3 +178,26 @@ def test_case_16(): """ ) obj.run(pytorch_code, ["result"]) + + +def test_case_17(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + result = torch.signal.windows.hann(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=a, requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"]) + + +def test_case_18(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + m=5 + result = torch.signal.windows.hann(dtype=torch.float32, M=m, sym=a, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + """ + ) + obj.run(pytorch_code, ["result"]) From 4bbab9485fc06b44c529d03c6ecd4070342277b0 Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 14 Oct 2024 11:44:04 +0800 Subject: [PATCH 48/53] fix --- tests/test_signal_windows_gaussian.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index dc629c6d1..c09e935e4 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -187,7 +187,7 @@ def test_case_17(): result = torch.signal.windows.gaussian(requires_grad=True, std=0.8, sym=False, M=10, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], atol=1e-05) def test_case_18(): @@ -220,4 +220,4 @@ def test_case_20(): result = torch.signal.windows.gaussian(requires_grad=False, std=0.8, sym=a, M=m, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) """ ) - obj.run(pytorch_code, ["result"]) + obj.run(pytorch_code, ["result"], atol=1e-05) From b279579bf8620596f9922a0176c323f5d25af135 Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 15 Oct 2024 15:09:05 +0800 Subject: [PATCH 49/53] fix --- paconvert/api_mapping.json | 16 +++++++++---- paconvert/api_matcher.py | 30 +++++++++--------------- tests/test_signal_windows_exponential.py | 2 +- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/paconvert/api_mapping.json b/paconvert/api_mapping.json index ee99f71d2..a2c585fe0 100644 --- a/paconvert/api_mapping.json +++ b/paconvert/api_mapping.json @@ -14686,6 +14686,7 @@ }, "paddle_default_kwargs": { "dtype": "'float32'", + "fftbins": "False", "window": "'blackman'" } }, @@ -14709,6 +14710,7 @@ }, "paddle_default_kwargs": { "dtype": "'float32'", + "fftbins": "False", "window": "'cosine'" } }, @@ -14736,7 +14738,8 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "'float32'" + "dtype": "'float32'", + "fftbins": "False" } }, "torch.signal.windows.gaussian": { @@ -14759,7 +14762,8 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "'float32'" + "dtype": "'float32'", + "fftbins": "False" } }, "torch.signal.windows.general_cosine": { @@ -14782,7 +14786,8 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "'float32'" + "dtype": "'float32'", + "fftbins": "False" } }, "torch.signal.windows.general_hamming": { @@ -14805,7 +14810,8 @@ "dtype": "dtype" }, "paddle_default_kwargs": { - "dtype": "'float32'" + "dtype": "'float32'", + "fftbins": "False" } }, "torch.signal.windows.hamming": { @@ -14828,6 +14834,7 @@ }, "paddle_default_kwargs": { "dtype": "'float32'", + "fftbins": "False", "window": "'hamming'" } }, @@ -14851,6 +14858,7 @@ }, "paddle_default_kwargs": { "dtype": "'float32'", + "fftbins": "False", "window": "'hann'" } }, diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 978c0cc6d..969fbeb54 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -523,38 +523,30 @@ class SignalWindowsWatcher(BaseMatcher): def generate_code(self, kwargs): new_kwargs = {} if "sym" in kwargs: - kwargs["fftbins"] = f"1 - {kwargs.pop('sym')}" - else: - kwargs["fftbins"] = "(False)" + kwargs["fftbins"] = "not " + kwargs["sym"] if "exponential" in self.torch_api: - if "sym" not in kwargs or kwargs["sym"] == "(True)": - if "tau" in kwargs: - tau_value = float(str(kwargs.pop("tau")).strip("()")) - new_kwargs["window"] = ("exponential", None, tau_value) - else: - new_kwargs["window"] = ("exponential", None, 1.0) + if "tau" in kwargs: + new_kwargs["window"] = ( + "exponential", + None, + kwargs.pop("tau").strip("()"), + ) else: - c_value = float(str(kwargs.pop("tau")).strip("()")) / 2 - if "tau" in kwargs: - tau_value = float(str(kwargs.pop("tau")).strip("()")) - new_kwargs["window"] = ("exponential", c_value, tau_value) - else: - new_kwargs["window"] = ("exponential", c_value, 1.0) + new_kwargs["window"] = ("exponential", None, 1.0) if "gaussian" in self.torch_api: if "std" in kwargs: - std_value = float(str(kwargs.pop("std")).strip("()")) + std_value = float(kwargs.pop("std").strip("()")) new_kwargs["window"] = ("gaussian", std_value) else: new_kwargs["window"] = ("gaussian", 1.0) if "general_hamming" in self.torch_api: if "alpha" in kwargs: - alpha_value = float(str(kwargs.pop("alpha")).strip("()")) + alpha_value = float(kwargs.pop("alpha").strip("()")) new_kwargs["window"] = ("general_hamming", alpha_value) else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: - a_value = str(kwargs.pop("a")).strip("()") - new_kwargs["window"] = ("general_cosine", eval(a_value)) + new_kwargs["window"] = ("general_cosine", eval(kwargs.pop("a"))) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index 577bab0f0..2c5e49d53 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -144,7 +144,7 @@ def test_case_13(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(10, tau=0.5, sym=False, dtype=torch.float64) + result = torch.signal.windows.exponential(10, sym=False, tau=0.5, dtype=torch.float64) """ ) obj.run(pytorch_code, ["result"]) From 2672ed4f631559108dda631d3776a99f965637ac Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 15 Oct 2024 15:24:50 +0800 Subject: [PATCH 50/53] fix --- paconvert/api_matcher.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 969fbeb54..e2741d11a 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -526,23 +526,17 @@ def generate_code(self, kwargs): kwargs["fftbins"] = "not " + kwargs["sym"] if "exponential" in self.torch_api: if "tau" in kwargs: - new_kwargs["window"] = ( - "exponential", - None, - kwargs.pop("tau").strip("()"), - ) + new_kwargs["window"] = ("exponential", None, eval(kwargs.pop("tau"))) else: new_kwargs["window"] = ("exponential", None, 1.0) if "gaussian" in self.torch_api: if "std" in kwargs: - std_value = float(kwargs.pop("std").strip("()")) - new_kwargs["window"] = ("gaussian", std_value) + new_kwargs["window"] = ("gaussian", eval(kwargs.pop("std"))) else: new_kwargs["window"] = ("gaussian", 1.0) if "general_hamming" in self.torch_api: if "alpha" in kwargs: - alpha_value = float(kwargs.pop("alpha").strip("()")) - new_kwargs["window"] = ("general_hamming", alpha_value) + new_kwargs["window"] = ("general_hamming", eval(kwargs.pop("alpha"))) else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: From cea82be248a781cf8bac4ad1a620dbcd94deb97f Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 15 Oct 2024 15:36:14 +0800 Subject: [PATCH 51/53] fix --- paconvert/api_matcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index e2741d11a..a6ef5a678 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -523,7 +523,7 @@ class SignalWindowsWatcher(BaseMatcher): def generate_code(self, kwargs): new_kwargs = {} if "sym" in kwargs: - kwargs["fftbins"] = "not " + kwargs["sym"] + kwargs["fftbins"] = "not " + kwargs.pop("sym") if "exponential" in self.torch_api: if "tau" in kwargs: new_kwargs["window"] = ("exponential", None, eval(kwargs.pop("tau"))) From 874738fd48a9a7806db78a8811f0fa6296f0a364 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 16 Oct 2024 13:19:59 +0800 Subject: [PATCH 52/53] fix --- paconvert/api_matcher.py | 12 +++-- tests/test_signal_windows_blackman.py | 24 +-------- tests/test_signal_windows_cosine.py | 22 +------- tests/test_signal_windows_exponential.py | 27 ++++------ tests/test_signal_windows_gaussian.py | 57 ++++---------------- tests/test_signal_windows_general_cosine.py | 23 ++++---- tests/test_signal_windows_general_hamming.py | 15 ++++++ tests/test_signal_windows_hamming.py | 26 ++------- tests/test_signal_windows_hann.py | 22 +------- 9 files changed, 61 insertions(+), 167 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index a6ef5a678..333d6a41f 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -526,21 +526,25 @@ def generate_code(self, kwargs): kwargs["fftbins"] = "not " + kwargs.pop("sym") if "exponential" in self.torch_api: if "tau" in kwargs: - new_kwargs["window"] = ("exponential", None, eval(kwargs.pop("tau"))) + new_kwargs["window"] = "('exponential', None, {})".format( + kwargs.pop("tau") + ) else: new_kwargs["window"] = ("exponential", None, 1.0) if "gaussian" in self.torch_api: if "std" in kwargs: - new_kwargs["window"] = ("gaussian", eval(kwargs.pop("std"))) + new_kwargs["window"] = "('gaussian', {})".format(kwargs.pop("std")) else: new_kwargs["window"] = ("gaussian", 1.0) if "general_hamming" in self.torch_api: if "alpha" in kwargs: - new_kwargs["window"] = ("general_hamming", eval(kwargs.pop("alpha"))) + new_kwargs["window"] = "('general_hamming', {})".format( + kwargs.pop("alpha") + ) else: new_kwargs["window"] = ("general_hamming", 0.54) if "general_cosine" in self.torch_api: - new_kwargs["window"] = ("general_cosine", eval(kwargs.pop("a"))) + new_kwargs["window"] = "('general_cosine', {})".format(kwargs.pop("a")) new_kwargs.update(kwargs) return GenericMatcher.generate_code(self, new_kwargs) diff --git a/tests/test_signal_windows_blackman.py b/tests/test_signal_windows_blackman.py index e46ff430a..222b6333a 100644 --- a/tests/test_signal_windows_blackman.py +++ b/tests/test_signal_windows_blackman.py @@ -161,26 +161,6 @@ def test_case_14(): def test_case_15(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.blackman(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_16(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.blackman(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) - """ - ) - obj.run(pytorch_code, ["result"], atol=1e-05) - - -def test_case_17(): pytorch_code = textwrap.dedent( """ import torch @@ -191,13 +171,13 @@ def test_case_17(): obj.run(pytorch_code, ["result"], atol=1e-05) -def test_case_18(): +def test_case_16(): pytorch_code = textwrap.dedent( """ import torch a=False m=5 - result = torch.signal.windows.blackman(M=m, sym=a, dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) + result = torch.signal.windows.blackman(dtype=torch.float32, M=m, sym=a, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) """ ) obj.run(pytorch_code, ["result"], atol=1e-05) diff --git a/tests/test_signal_windows_cosine.py b/tests/test_signal_windows_cosine.py index d87597d04..31b4e7ec5 100644 --- a/tests/test_signal_windows_cosine.py +++ b/tests/test_signal_windows_cosine.py @@ -161,26 +161,6 @@ def test_case_14(): def test_case_15(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.cosine(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_16(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.cosine(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_17(): pytorch_code = textwrap.dedent( """ import torch @@ -191,7 +171,7 @@ def test_case_17(): obj.run(pytorch_code, ["result"], atol=1e-05) -def test_case_18(): +def test_case_16(): pytorch_code = textwrap.dedent( """ import torch diff --git a/tests/test_signal_windows_exponential.py b/tests/test_signal_windows_exponential.py index 2c5e49d53..8207bb32c 100644 --- a/tests/test_signal_windows_exponential.py +++ b/tests/test_signal_windows_exponential.py @@ -184,7 +184,7 @@ def test_case_17(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(requires_grad=False, sym=True, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.6) + result = torch.signal.windows.exponential(requires_grad=True, sym=False, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.3) """ ) obj.run(pytorch_code, ["result"]) @@ -194,7 +194,9 @@ def test_case_18(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.exponential(requires_grad=True, sym=False, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.3) + a=True + m=10 + result = torch.signal.windows.exponential(requires_grad=True, sym=a, M=m, layout=torch.strided, device=torch.device('cpu'), tau=0.3) """ ) obj.run(pytorch_code, ["result"]) @@ -204,20 +206,13 @@ def test_case_19(): pytorch_code = textwrap.dedent( """ import torch - a=False - result = torch.signal.windows.exponential(requires_grad=True, sym=a, M=10, layout=torch.strided, device=torch.device('cpu'), tau=0.3) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_20(): - pytorch_code = textwrap.dedent( - """ - import torch - a=False - m=10 - result = torch.signal.windows.exponential(requires_grad=True, sym=a, M=m, layout=torch.strided, device=torch.device('cpu'), tau=0.3) + a=10 + b=0.5 + c=False + d=torch.float64 + e=torch.device('cpu') + f=False + result = torch.signal.windows.exponential(M=a, tau=b, sym=c, dtype=d, layout=torch.strided, device=e, requires_grad=f) """ ) obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_gaussian.py b/tests/test_signal_windows_gaussian.py index c09e935e4..2587331b5 100644 --- a/tests/test_signal_windows_gaussian.py +++ b/tests/test_signal_windows_gaussian.py @@ -144,7 +144,7 @@ def test_case_13(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(M=10, std=0.6, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) """ ) obj.run(pytorch_code, ["result"]) @@ -154,7 +154,8 @@ def test_case_14(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) + a=True + result = torch.signal.windows.gaussian(requires_grad=False, std=0.8, sym=a, M=10, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) """ ) obj.run(pytorch_code, ["result"]) @@ -164,60 +165,24 @@ def test_case_15(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.gaussian(M=10, std=0.6, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_16(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.gaussian(M=10, std=0.6, sym=True, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_17(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.gaussian(requires_grad=True, std=0.8, sym=False, M=10, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) + a=False + m=10 + result = torch.signal.windows.gaussian(requires_grad=False, std=0.8, sym=a, M=m, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) """ ) obj.run(pytorch_code, ["result"], atol=1e-05) -def test_case_18(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.gaussian(requires_grad=False, std=0.8, sym=True, M=10, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_19(): - pytorch_code = textwrap.dedent( - """ - import torch - a=True - result = torch.signal.windows.gaussian(requires_grad=False, std=0.8, sym=a, M=10, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_20(): +def test_case_16(): pytorch_code = textwrap.dedent( """ import torch a=False m=10 - result = torch.signal.windows.gaussian(requires_grad=False, std=0.8, sym=a, M=m, layout=torch.strided, device=torch.device('cpu'), dtype=torch.float32) + s=0.8 + d=torch.float32 + e=torch.device('cpu') + result = torch.signal.windows.gaussian(requires_grad=False, std=s, sym=a, M=m, layout=torch.strided, device=e, dtype=d) """ ) obj.run(pytorch_code, ["result"], atol=1e-05) diff --git a/tests/test_signal_windows_general_cosine.py b/tests/test_signal_windows_general_cosine.py index 4bca2233c..101fea8e2 100644 --- a/tests/test_signal_windows_general_cosine.py +++ b/tests/test_signal_windows_general_cosine.py @@ -154,7 +154,8 @@ def test_case_14(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(sym=False, requires_grad=False, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) + a=True + result = torch.signal.windows.general_cosine(sym=a, requires_grad=True, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"]) @@ -164,30 +165,24 @@ def test_case_15(): pytorch_code = textwrap.dedent( """ import torch - result = torch.signal.windows.general_cosine(sym=True, requires_grad=True, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) + s=False + m=10 + result = torch.signal.windows.general_cosine(sym=s, requires_grad=True, a=[0.5, 1 - 0.5], M=m, layout=torch.strided, device=torch.device('cpu')) """ ) obj.run(pytorch_code, ["result"]) def test_case_16(): - pytorch_code = textwrap.dedent( - """ - import torch - a=False - result = torch.signal.windows.general_cosine(sym=a, requires_grad=True, a=[0.5, 1 - 0.5], M=10, layout=torch.strided, device=torch.device('cpu')) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_17(): pytorch_code = textwrap.dedent( """ import torch s=False m=10 - result = torch.signal.windows.general_cosine(sym=s, requires_grad=True, a=[0.5, 1 - 0.5], M=m, layout=torch.strided, device=torch.device('cpu')) + c=[0.5, 1 - 0.5] + r=True + d=torch.device('cpu') + result = torch.signal.windows.general_cosine(sym=s, requires_grad=r, a=c, M=m, layout=torch.strided, device=d) """ ) obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_general_hamming.py b/tests/test_signal_windows_general_hamming.py index b5c78e344..2e193d8c2 100644 --- a/tests/test_signal_windows_general_hamming.py +++ b/tests/test_signal_windows_general_hamming.py @@ -191,3 +191,18 @@ def test_case_17(): """ ) obj.run(pytorch_code, ["result"]) + + +def test_case_18(): + pytorch_code = textwrap.dedent( + """ + import torch + a=False + m=10 + c=0.4 + e=torch.device('cpu') + g=False + result = torch.signal.windows.general_hamming(requires_grad=g, alpha=c, sym=a, M=m, layout=torch.strided, device=e) + """ + ) + obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_hamming.py b/tests/test_signal_windows_hamming.py index 6d9e315d4..87751972a 100644 --- a/tests/test_signal_windows_hamming.py +++ b/tests/test_signal_windows_hamming.py @@ -151,16 +151,6 @@ def test_case_13(): def test_case_14(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.hamming(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=True) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_15(): pytorch_code = textwrap.dedent( """ import torch @@ -170,17 +160,7 @@ def test_case_15(): obj.run(pytorch_code, ["result"]) -def test_case_16(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.hamming(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_17(): +def test_case_15(): pytorch_code = textwrap.dedent( """ import torch @@ -191,13 +171,13 @@ def test_case_17(): obj.run(pytorch_code, ["result"]) -def test_case_18(): +def test_case_16(): pytorch_code = textwrap.dedent( """ import torch a=False m=5 - result = torch.signal.windows.hamming(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=m, sym=a, requires_grad=False) + result = torch.signal.windows.hamming(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=m, sym=a, requires_grad=True) """ ) obj.run(pytorch_code, ["result"]) diff --git a/tests/test_signal_windows_hann.py b/tests/test_signal_windows_hann.py index 3558de96d..d5101caad 100644 --- a/tests/test_signal_windows_hann.py +++ b/tests/test_signal_windows_hann.py @@ -161,26 +161,6 @@ def test_case_14(): def test_case_15(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.hann(M=5, sym=False, dtype=torch.float64, layout=torch.strided, device=torch.device('cpu'), requires_grad=False) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_16(): - pytorch_code = textwrap.dedent( - """ - import torch - result = torch.signal.windows.hann(dtype=torch.float32, layout=torch.strided, device=torch.device('cpu'), M=5, sym=False, requires_grad=False) - """ - ) - obj.run(pytorch_code, ["result"]) - - -def test_case_17(): pytorch_code = textwrap.dedent( """ import torch @@ -191,7 +171,7 @@ def test_case_17(): obj.run(pytorch_code, ["result"]) -def test_case_18(): +def test_case_16(): pytorch_code = textwrap.dedent( """ import torch From d79909e9840cd48111504b4fb305477499eb04a0 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 16 Oct 2024 14:38:56 +0800 Subject: [PATCH 53/53] fix --- paconvert/api_matcher.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/paconvert/api_matcher.py b/paconvert/api_matcher.py index 8a93463ab..10cbab96e 100644 --- a/paconvert/api_matcher.py +++ b/paconvert/api_matcher.py @@ -530,19 +530,19 @@ def generate_code(self, kwargs): kwargs.pop("tau") ) else: - new_kwargs["window"] = ("exponential", None, 1.0) + new_kwargs["window"] = "('exponential', None, 1.0)" if "gaussian" in self.torch_api: if "std" in kwargs: new_kwargs["window"] = "('gaussian', {})".format(kwargs.pop("std")) else: - new_kwargs["window"] = ("gaussian", 1.0) + new_kwargs["window"] = "('gaussian', 1.0)" if "general_hamming" in self.torch_api: if "alpha" in kwargs: new_kwargs["window"] = "('general_hamming', {})".format( kwargs.pop("alpha") ) else: - new_kwargs["window"] = ("general_hamming", 0.54) + new_kwargs["window"] = "('general_hamming', 0.54)" if "general_cosine" in self.torch_api: new_kwargs["window"] = "('general_cosine', {})".format(kwargs.pop("a")) new_kwargs.update(kwargs) @@ -862,7 +862,7 @@ def get_paddle_nodes(self, args, kwargs): else: shape = self.parse_args(args)[0] kwargs = {"shape": str(shape).replace("'", ""), **kwargs} - + if "dtype" not in kwargs: kwargs["dtype"] = "float32" @@ -871,7 +871,7 @@ def get_paddle_nodes(self, args, kwargs): if "high" not in kwargs: kwargs["high"] = 1 - + if "requires_grad" not in kwargs.keys(): API_TEMPLATE = textwrap.dedent( """ @@ -901,7 +901,7 @@ def get_paddle_nodes(self, args, kwargs): kwargs["device"], kwargs["requires_grad"], ) - + return ast.parse(code).body