From 4b24411e8cf50b076179fc8e8f5a080da5971dbd Mon Sep 17 00:00:00 2001 From: rewang Date: Wed, 6 Mar 2024 09:13:40 +0000 Subject: [PATCH 1/4] Manually release predictor_tuned Signed-off-by: rewang --- test/cpp/inference/api/trt_dynamic_shape_test.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/test/cpp/inference/api/trt_dynamic_shape_test.cc b/test/cpp/inference/api/trt_dynamic_shape_test.cc index bbfdc0a2cd228..c6f6f8b16d358 100644 --- a/test/cpp/inference/api/trt_dynamic_shape_test.cc +++ b/test/cpp/inference/api/trt_dynamic_shape_test.cc @@ -191,6 +191,7 @@ void TestTunedDynamic() { output_t->copy_to_cpu(out_data.data()); }; check_func(predictor_tuned.get()); + predictor_tuned.reset(nullptr); // check tuned_dynamic_shape AnalysisConfig config; From 64293d961bf194d959745bed755ed295d935809a Mon Sep 17 00:00:00 2001 From: rewang Date: Wed, 6 Mar 2024 09:46:54 +0000 Subject: [PATCH 2/4] Add indices to no_cast_list to keep it as fp32 Signed-off-by: rewang --- test/ir/inference/test_trt_convert_lookup_table.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/ir/inference/test_trt_convert_lookup_table.py b/test/ir/inference/test_trt_convert_lookup_table.py index e1fb64bcdf545..b7cf7d657d7a0 100644 --- a/test/ir/inference/test_trt_convert_lookup_table.py +++ b/test/ir/inference/test_trt_convert_lookup_table.py @@ -80,6 +80,7 @@ def generate_input2(dims, attrs: List[Dict[str, Any]]): ) }, outputs=["out_data"], + no_cast_list=["indices"], ) yield program_config From eb0db308246866312df35826ab154931ada3f300 Mon Sep 17 00:00:00 2001 From: rewang Date: Wed, 6 Mar 2024 09:47:58 +0000 Subject: [PATCH 3/4] Set both atol and rtol for the fp16 test_trt_convert_solve Signed-off-by: rewang --- test/ir/inference/test_trt_convert_solve.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/ir/inference/test_trt_convert_solve.py b/test/ir/inference/test_trt_convert_solve.py index c3117ee335740..f12fb453a48f6 100644 --- a/test/ir/inference/test_trt_convert_solve.py +++ b/test/ir/inference/test_trt_convert_solve.py @@ -89,7 +89,7 @@ def clear_dynamic_shape(): self.trt_param.precision = paddle_infer.PrecisionType.Float32 yield self.create_inference_config(), (1, 3), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half - yield self.create_inference_config(), (1, 3), 1e-3 + yield self.create_inference_config(), (1, 3), (1e-3, 1e-3) def test(self): self.run_test() From 3cdd7f779a427f0b09d994796292e29d7c994f56 Mon Sep 17 00:00:00 2001 From: Ryan Jeng Date: Mon, 19 Feb 2024 18:32:43 -0800 Subject: [PATCH 4/4] Merge branch 'rewang/fix_test_sparse_fused_attention_seed' into 'nv-2.6.0' Fix test_sparse_fused_attention random seed See merge request dl/paddle/paddle!312 --- test/legacy_test/test_sparse_fused_attention_op.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/legacy_test/test_sparse_fused_attention_op.py b/test/legacy_test/test_sparse_fused_attention_op.py index 68cdd16d4bd12..098f4815b85f3 100644 --- a/test/legacy_test/test_sparse_fused_attention_op.py +++ b/test/legacy_test/test_sparse_fused_attention_op.py @@ -42,6 +42,7 @@ def get_cuda_version(): ) class TestSparseAttentionAPI1(unittest.TestCase): def setUp(self): + paddle.seed(0) self.batch_size = 16 self.num_heads = 16 self.seq_len = 128 @@ -134,6 +135,7 @@ def test_dygraph(self): class TestSparseAttentionAPI2(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 128 @@ -144,6 +146,7 @@ def setUp(self): class TestSparseAttentionAPI3(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 512 @@ -154,6 +157,7 @@ def setUp(self): class TestSparseAttentionAPI4(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 512 @@ -164,6 +168,7 @@ def setUp(self): class TestSparseAttentionAPI5(TestSparseAttentionAPI1): def setUp(self): + super().setUp() self.batch_size = 16 self.num_heads = 16 self.seq_len = 512