diff --git a/test/ir/inference/test_trt_convert_bitwise_or.py b/test/ir/inference/test_trt_convert_bitwise_or.py index 469ecbb5371bb..2916f687344a0 100644 --- a/test/ir/inference/test_trt_convert_bitwise_or.py +++ b/test/ir/inference/test_trt_convert_bitwise_or.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import unittest from functools import partial -from typing import List import numpy as np from program_config import ProgramConfig, TensorConfig @@ -70,7 +71,7 @@ def generate_input(batch): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def generate_dynamic_shape(attrs): if self.dims == 4: self.dynamic_shape.min_input_shape = { diff --git a/test/ir/inference/test_trt_convert_bmm.py b/test/ir/inference/test_trt_convert_bmm.py index 804d1fdb5f0c2..2677fac1bbeca 100644 --- a/test/ir/inference/test_trt_convert_bmm.py +++ b/test/ir/inference/test_trt_convert_bmm.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import os import unittest from functools import partial -from typing import List import numpy as np from program_config import ProgramConfig, TensorConfig @@ -66,7 +67,7 @@ def generate_input(shape): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input1_data": [10, 350, 75], diff --git a/test/ir/inference/test_trt_convert_cast.py b/test/ir/inference/test_trt_convert_cast.py index b6785018d9d01..7e489f66bbebc 100644 --- a/test/ir/inference/test_trt_convert_cast.py +++ b/test/ir/inference/test_trt_convert_cast.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import unittest from functools import partial -from typing import List import numpy as np from program_config import ProgramConfig, TensorConfig @@ -103,7 +104,7 @@ def generate_input(type): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def generate_dynamic_shape(attrs): if self.dims == 0: self.dynamic_shape.min_input_shape = {"input_data": []} diff --git a/test/ir/inference/test_trt_convert_clip.py b/test/ir/inference/test_trt_convert_clip.py index d8aab6baf9e5f..730eb13e6f78e 100644 --- a/test/ir/inference/test_trt_convert_clip.py +++ b/test/ir/inference/test_trt_convert_clip.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import unittest from functools import partial -from typing import Any, Dict, List +from typing import Any import numpy as np from program_config import ProgramConfig, TensorConfig @@ -28,7 +30,7 @@ def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(dims, batch, dtype, attrs: List[Dict[str, Any]]): + def generate_input1(dims, batch, dtype, attrs: list[dict[str, Any]]): if dims == 0: return np.ones([]).astype(dtype) elif dims == 1: @@ -40,10 +42,10 @@ def generate_input1(dims, batch, dtype, attrs: List[Dict[str, Any]]): else: return np.ones([batch, 3, 32, 32]).astype(dtype) - def generate_weight1(attrs: List[Dict[str, Any]]): + def generate_weight1(attrs: list[dict[str, Any]]): return np.array([np.random.uniform(1, 10)]).astype("float32") - def generate_weight2(attrs: List[Dict[str, Any]]): + def generate_weight2(attrs: list[dict[str, Any]]): return np.array([np.random.uniform(10, 20)]).astype("float32") for dims in [0, 1, 2, 3, 4]: diff --git a/test/ir/inference/test_trt_convert_compare_and_logical.py b/test/ir/inference/test_trt_convert_compare_and_logical.py index b53d2b4c5e234..4b08b93bff5bc 100755 --- a/test/ir/inference/test_trt_convert_compare_and_logical.py +++ b/test/ir/inference/test_trt_convert_compare_and_logical.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import unittest from functools import partial -from typing import List import numpy as np from program_config import ProgramConfig, TensorConfig @@ -91,7 +92,7 @@ def generate_input(shape): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def generate_dynamic_shape(attrs): if self.dims == 2: self.dynamic_shape.min_input_shape = { @@ -237,7 +238,7 @@ def generate_input(shape): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> (paddle_infer.Config, list[int], float): def generate_dynamic_shape(attrs): if self.dims == 2: self.dynamic_shape.min_input_shape = { @@ -398,7 +399,7 @@ def generate_input(shape): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> (paddle_infer.Config, list[int], float): def generate_dynamic_shape(attrs): if self.dims == 2: self.dynamic_shape.min_input_shape = { @@ -561,7 +562,7 @@ def generate_input(shape): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> (paddle_infer.Config, list[int], float): def generate_dynamic_shape(attrs): if self.dims == 2: self.dynamic_shape.min_input_shape = { @@ -726,7 +727,7 @@ def generate_input(shape): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> (paddle_infer.Config, list[int], float): def generate_dynamic_shape(attrs): if self.dims == 2: shape_data = [2, 16] diff --git a/test/ir/inference/test_trt_convert_concat.py b/test/ir/inference/test_trt_convert_concat.py index 3a7ff35860da4..fe8321e6955b0 100644 --- a/test/ir/inference/test_trt_convert_concat.py +++ b/test/ir/inference/test_trt_convert_concat.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import unittest from functools import partial -from typing import Any, Dict, List +from typing import Any import numpy as np from program_config import ProgramConfig, TensorConfig @@ -39,7 +41,7 @@ def is_program_valid(self, program_config: ProgramConfig) -> bool: return True def sample_program_configs(self): - def generate_input1(attrs: List[Dict[str, Any]], batch): + def generate_input1(attrs: list[dict[str, Any]], batch): if self.dims == 4: return np.ones([batch, 3, 24, 24]).astype(np.float32) elif self.dims == 3: @@ -49,7 +51,7 @@ def generate_input1(attrs: List[Dict[str, Any]], batch): elif self.dims == 1: return np.ones([24]).astype(np.float32) - def generate_input2(attrs: List[Dict[str, Any]], batch): + def generate_input2(attrs: list[dict[str, Any]], batch): if self.dims == 4: return np.ones([batch, 3, 24, 24]).astype(np.float32) elif self.dims == 3: @@ -59,7 +61,7 @@ def generate_input2(attrs: List[Dict[str, Any]], batch): elif self.dims == 1: return np.ones([24]).astype(np.float32) - def generate_input3(attrs: List[Dict[str, Any]], batch): + def generate_input3(attrs: list[dict[str, Any]], batch): if self.dims == 4: return np.ones([batch, 3, 24, 24]).astype(np.float32) elif self.dims == 3: @@ -69,7 +71,7 @@ def generate_input3(attrs: List[Dict[str, Any]], batch): elif self.dims == 1: return np.ones([24]).astype(np.float32) - def generate_weight1(attrs: List[Dict[str, Any]]): + def generate_weight1(attrs: list[dict[str, Any]]): return np.zeros([1]).astype(np.int32) for dims in [2, 3, 4]: @@ -155,7 +157,7 @@ def generate_weight1(attrs: List[Dict[str, Any]]): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def generate_dynamic_shape(attrs): if self.num_input == 0: if self.dims == 4: diff --git a/test/ir/inference/test_trt_convert_conv2d.py b/test/ir/inference/test_trt_convert_conv2d.py index 3fa99a078ddd7..7a034c9ad6c3c 100644 --- a/test/ir/inference/test_trt_convert_conv2d.py +++ b/test/ir/inference/test_trt_convert_conv2d.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import itertools import unittest from functools import partial -from typing import Any, Dict, List +from typing import Any import numpy as np from program_config import ProgramConfig, TensorConfig @@ -50,7 +52,7 @@ def is_program_valid(self, program_config: ProgramConfig) -> bool: def sample_program_configs(self): self.trt_param.workspace_size = 1073741824 - def generate_input1(batch, attrs: List[Dict[str, Any]]): + def generate_input1(batch, attrs: list[dict[str, Any]]): return ( np.ones([batch, attrs[0]['groups'] * 3, 64, 64]).astype( np.float32 @@ -58,7 +60,7 @@ def generate_input1(batch, attrs: List[Dict[str, Any]]): / 4 ) - def generate_weight1(attrs: List[Dict[str, Any]]): + def generate_weight1(attrs: list[dict[str, Any]]): return np.random.random([9, 3, 3, 3]).astype(np.float32) - 0.5 batch_options = [1, 2] @@ -139,7 +141,7 @@ def generate_weight1(attrs: List[Dict[str, Any]]): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def generate_dynamic_shape(attrs): input_groups = attrs[0]['groups'] * 3 self.dynamic_shape.min_input_shape = { @@ -233,13 +235,13 @@ def is_program_valid(self, program_config: ProgramConfig) -> bool: def sample_program_configs(self): self.trt_param.workspace_size = 1073741824 - def generate_input1(attrs: List[Dict[str, Any]]): + def generate_input1(attrs: list[dict[str, Any]]): return ( np.random.random(attrs[0]['input_shape']).astype(np.float32) - 0.5 ) - def generate_data(attrs: List[Dict[str, Any]]): + def generate_data(attrs: list[dict[str, Any]]): return ( np.random.random(attrs[0]['weight_shape']).astype(np.float32) - 0.5 @@ -330,7 +332,7 @@ def generate_data(attrs: List[Dict[str, Any]]): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def clear_dynamic_shape(): self.dynamic_shape.min_input_shape = {} self.dynamic_shape.max_input_shape = {} diff --git a/test/ir/inference/test_trt_convert_conv2d_transpose.py b/test/ir/inference/test_trt_convert_conv2d_transpose.py index c493377ebf976..b15302a60e727 100644 --- a/test/ir/inference/test_trt_convert_conv2d_transpose.py +++ b/test/ir/inference/test_trt_convert_conv2d_transpose.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import unittest from functools import partial -from typing import Any, Dict, List +from typing import Any import numpy as np from program_config import ProgramConfig, TensorConfig @@ -52,10 +54,10 @@ def is_program_valid(self, program_config: ProgramConfig) -> bool: def sample_program_configs(self): self.trt_param.workspace_size = 1073741824 - def generate_input1(batch, num_channels, attrs: List[Dict[str, Any]]): + def generate_input1(batch, num_channels, attrs: list[dict[str, Any]]): return np.ones([batch, num_channels, 64, 64]).astype(np.float32) - def generate_weight1(num_channels, attrs: List[Dict[str, Any]]): + def generate_weight1(num_channels, attrs: list[dict[str, Any]]): if attrs[0]['groups'] == 1: return np.random.random( [num_channels, num_channels, 3, 3] @@ -136,7 +138,7 @@ def generate_weight1(num_channels, attrs: List[Dict[str, Any]]): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def generate_dynamic_shape(attrs): if self.num_channels == 2: self.dynamic_shape.min_input_shape = { @@ -254,12 +256,12 @@ def is_program_valid(self, program_config: ProgramConfig) -> bool: def sample_program_configs(self): self.trt_param.workspace_size = 1073741824 - def generate_input1(batch, num_channels, attrs: List[Dict[str, Any]]): + def generate_input1(batch, num_channels, attrs: list[dict[str, Any]]): return ( np.ones([batch, num_channels, 20, 30]).astype(np.float32) / 100 ) - def generate_weight1(num_channels, attrs: List[Dict[str, Any]]): + def generate_weight1(num_channels, attrs: list[dict[str, Any]]): return ( np.random.random([num_channels, 64, 3, 3]).astype(np.float32) / 100 @@ -314,7 +316,7 @@ def generate_weight1(num_channels, attrs: List[Dict[str, Any]]): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 128, 20, 30], diff --git a/test/ir/inference/test_trt_convert_conv3d_transpose.py b/test/ir/inference/test_trt_convert_conv3d_transpose.py index b2d15d3643f57..097ce3aa4ff21 100644 --- a/test/ir/inference/test_trt_convert_conv3d_transpose.py +++ b/test/ir/inference/test_trt_convert_conv3d_transpose.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import unittest from functools import partial -from typing import Any, Dict, List +from typing import Any import numpy as np from program_config import ProgramConfig, TensorConfig @@ -34,10 +36,10 @@ def is_program_valid(self, program_config: ProgramConfig) -> bool: def sample_program_configs(self): self.trt_param.workspace_size = 1073741824 - def generate_input1(batch, num_channels, attrs: List[Dict[str, Any]]): + def generate_input1(batch, num_channels, attrs: list[dict[str, Any]]): return np.ones([batch, num_channels, 4, 20, 30]).astype(np.float32) - def generate_weight1(num_channels, attrs: List[Dict[str, Any]]): + def generate_weight1(num_channels, attrs: list[dict[str, Any]]): return np.random.random([num_channels, 64, 3, 3, 3]).astype( np.float32 ) @@ -91,7 +93,7 @@ def generate_weight1(num_channels, attrs: List[Dict[str, Any]]): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def generate_dynamic_shape(attrs): self.dynamic_shape.min_input_shape = { "input_data": [1, 128, 4, 20, 30], diff --git a/test/ir/inference/test_trt_convert_cross_multihead_matmul.py b/test/ir/inference/test_trt_convert_cross_multihead_matmul.py index a8ce9ee0a59e9..40845d60acc42 100644 --- a/test/ir/inference/test_trt_convert_cross_multihead_matmul.py +++ b/test/ir/inference/test_trt_convert_cross_multihead_matmul.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + import unittest from functools import partial -from typing import List import numpy as np from program_config import ProgramConfig, TensorConfig @@ -242,7 +243,7 @@ def generate_weight2(): def sample_predictor_configs( self, program_config - ) -> (paddle_infer.Config, List[int], float): + ) -> tuple[paddle_infer.Config, list[int], float]: def generate_dynamic_shape(attrs): # The last dim of input1 and input2 should be static. self.dynamic_shape.min_input_shape = {