diff --git a/docs/supported_ops.md b/docs/supported_ops.md index 46869e88f20..c907921fc74 100644 --- a/docs/supported_ops.md +++ b/docs/supported_ops.md @@ -650,7 +650,7 @@ Accelerator supports are described below. S NS NS -NS +PS* (missing nested BINARY, CALENDAR, ARRAY, MAP, STRUCT, UDT) NS NS NS @@ -673,7 +673,7 @@ Accelerator supports are described below. S NS NS -NS +PS* (missing nested BINARY, CALENDAR, ARRAY, MAP, STRUCT, UDT) NS NS NS diff --git a/integration_tests/src/main/python/join_test.py b/integration_tests/src/main/python/join_test.py index 28f9f90305a..226ccd034ea 100644 --- a/integration_tests/src/main/python/join_test.py +++ b/integration_tests/src/main/python/join_test.py @@ -206,7 +206,7 @@ def do_join(spark): # local sort because of https://github.com/NVIDIA/spark-rapids/issues/84 # After 3.1.0 is the min spark version we can drop this @ignore_order(local=True) -@pytest.mark.parametrize('data_gen', all_gen, ids=idfn) +@pytest.mark.parametrize('data_gen', all_gen + single_level_array_gens, ids=idfn) @pytest.mark.parametrize('batch_size', ['100', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches def test_cartesean_join(data_gen, batch_size): def do_join(spark): @@ -221,7 +221,7 @@ def do_join(spark): @ignore_order(local=True) @pytest.mark.xfail(condition=is_databricks_runtime(), reason='https://github.com/NVIDIA/spark-rapids/issues/334') -@pytest.mark.parametrize('data_gen', all_gen, ids=idfn) +@pytest.mark.parametrize('data_gen', all_gen + single_level_array_gens, ids=idfn) @pytest.mark.parametrize('batch_size', ['100', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches def test_cartesean_join_special_case_count(data_gen, batch_size): def do_join(spark): @@ -249,7 +249,7 @@ def do_join(spark): # local sort because of https://github.com/NVIDIA/spark-rapids/issues/84 # After 3.1.0 is the min spark version we can drop this @ignore_order(local=True) -@pytest.mark.parametrize('data_gen', all_gen, ids=idfn) +@pytest.mark.parametrize('data_gen', all_gen + single_level_array_gens, ids=idfn) @pytest.mark.parametrize('batch_size', ['100', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches def test_broadcast_nested_loop_join(data_gen, batch_size): def do_join(spark): @@ -262,7 +262,7 @@ def do_join(spark): # local sort because of https://github.com/NVIDIA/spark-rapids/issues/84 # After 3.1.0 is the min spark version we can drop this @ignore_order(local=True) -@pytest.mark.parametrize('data_gen', all_gen, ids=idfn) +@pytest.mark.parametrize('data_gen', all_gen + single_level_array_gens, ids=idfn) @pytest.mark.parametrize('batch_size', ['100', '1g'], ids=idfn) # set the batch size so we can test multiple stream batches def test_broadcast_nested_loop_join_special_case_count(data_gen, batch_size): def do_join(spark): diff --git a/sql-plugin/src/main/scala/com/nvidia/spark/rapids/GpuOverrides.scala b/sql-plugin/src/main/scala/com/nvidia/spark/rapids/GpuOverrides.scala index e760c075a2b..a03ea67723b 100644 --- a/sql-plugin/src/main/scala/com/nvidia/spark/rapids/GpuOverrides.scala +++ b/sql-plugin/src/main/scala/com/nvidia/spark/rapids/GpuOverrides.scala @@ -2962,11 +2962,15 @@ object GpuOverrides { (exchange, conf, p, r) => new GpuBroadcastMeta(exchange, conf, p, r)), exec[BroadcastNestedLoopJoinExec]( "Implementation of join using brute force", - ExecChecks(TypeSig.commonCudfTypes + TypeSig.NULL + TypeSig.DECIMAL, TypeSig.all), + ExecChecks(TypeSig.commonCudfTypes + TypeSig.NULL + TypeSig.DECIMAL + + TypeSig.ARRAY.nested(TypeSig.commonCudfTypes + TypeSig.NULL + TypeSig.DECIMAL), + TypeSig.all), (join, conf, p, r) => new GpuBroadcastNestedLoopJoinMeta(join, conf, p, r)), exec[CartesianProductExec]( "Implementation of join using brute force", - ExecChecks(TypeSig.commonCudfTypes + TypeSig.NULL + TypeSig.DECIMAL, TypeSig.all), + ExecChecks(TypeSig.commonCudfTypes + TypeSig.NULL + TypeSig.DECIMAL + + TypeSig.ARRAY.nested(TypeSig.commonCudfTypes + TypeSig.NULL + TypeSig.DECIMAL), + TypeSig.all), (join, conf, p, r) => new SparkPlanMeta[CartesianProductExec](join, conf, p, r) { val condition: Option[BaseExprMeta[_]] = join.condition.map(GpuOverrides.wrapExpr(_, conf, Some(this)))