diff --git a/src/frontends/paddle/src/op/pool3d.cpp b/src/frontends/paddle/src/op/pool3d.cpp new file mode 100644 index 00000000000000..8cde73fa23e911 --- /dev/null +++ b/src/frontends/paddle/src/op/pool3d.cpp @@ -0,0 +1,328 @@ +//***************************************************************************** +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +//***************************************************************************** + +#include + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +// helper func - get pad_begin and pad_end +static void get_paddings(const NodeContext& node, + ov::Shape& pad_begin, + ov::Shape& pad_end, + ov::op::PadType& auto_pad, + std::string& data_format) { + if (node.has_attribute("padding_algorithm")) { + auto pad_algo = node.get_attribute("padding_algorithm"); + if (pad_algo == "SAME") { + auto_pad = ov::op::PadType::SAME_UPPER; + } else if (pad_algo == "VALID") { + auto_pad = ov::op::PadType::VALID; + } else if (pad_algo == "EXPLICIT") { + auto_pad = ov::op::PadType::EXPLICIT; + } else { + throw std::runtime_error("Unsupported pooling padding_algorithm " + pad_algo); + } + } else { + // adaptive_maxpool with no such attr. + auto_pad = ov::op::PadType::EXPLICIT; + } + + /*If pool padding size is a tuple or list, it could be in three forms: + [pad_depth, pad_height, pad_width] or [pad_depth_front, pad_depth_back, + pad_height_top, pad_height_bottom, pad_width_left, pad_width_right], + and when data_format is “NCDHW”, pool_padding can + be in the form [[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, + pad_height_bottom], [pad_width_left, pad_width_right]]. when + data_format is “NDHWC”, pool_padding can be in the form + [[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, + pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]. + Otherwise, the pool padding size will be a square of an int.*/ + auto paddings = node.get_attribute>("paddings"); + + switch (paddings.size()) { + case 3: + pad_begin = + Shape{static_cast(paddings[0]), static_cast(paddings[1]), static_cast(paddings[2])}; + pad_end = pad_begin; + break; + case 6: + pad_begin = + Shape{static_cast(paddings[0]), static_cast(paddings[2]), static_cast(paddings[4])}; + pad_end = Shape{ + static_cast(paddings[1]), + static_cast(paddings[3]), + static_cast(paddings[5]), + }; + break; + default: + throw std::runtime_error("Unsupported pooling paddings " + std::to_string(paddings.size())); + } +} + +NamedOutputs pool3d(const NodeContext& node) { + auto data = node.get_input("X"); + + auto pooling_type = node.get_attribute("pooling_type", {}); + auto global_pooling = node.get_attribute("global_pooling"); + auto adaptive = node.get_attribute("adaptive"); + auto kernel_shape = node.get_attribute>("ksize"); + + auto rounding_type = + node.get_attribute("ceil_mode", false) ? ov::op::RoundingType::CEIL : ov::op::RoundingType::FLOOR; + + if (pooling_type.empty()) { + pooling_type = "max"; + } + + PADDLE_OP_CHECK(node, (pooling_type == "max") || (pooling_type == "avg"), "pool3d: not supported pooling type !"); + PADDLE_OP_CHECK(node, kernel_shape.size() == 1 || kernel_shape.size() == 3, "pool3d: ksize must be 1 or 3!"); + + PartialShape input_shape = data.get_partial_shape(); + + int32_t input_rank = static_cast(input_shape.rank().get_length()); + PADDLE_OP_CHECK(node, input_rank >= 2, "input tensor rank must be greater than 2"); + + auto auto_pad = ov::op::PadType::EXPLICIT; + ov::Shape pad_begin, pad_end; + std::string data_format = node.get_attribute("data_format", "NCDHW"); + + get_paddings(node, pad_begin, pad_end, auto_pad, data_format); + + if (data_format == "NDHWC") { + data = std::make_shared( + data, + std::make_shared(ov::element::i64, Shape{5}, std::vector{0, 4, 1, 2, 3})); + input_shape = data.get_partial_shape(); + } + + std::vector> pool_outputs; + if (global_pooling || (adaptive && std::any_of(kernel_shape.begin(), kernel_shape.end(), [](int32_t i) { + return i == 1; + }))) { + if (pooling_type == "max") { + auto axes = default_opset::Constant::create(ov::element::i64, + {3}, + {input_rank - 3, input_rank - 2, input_rank - 1}); + pool_outputs = std::make_shared(data, axes, true)->outputs(); + } else { + auto axes = default_opset::Constant::create(ov::element::i64, + {3}, + {input_rank - 3, input_rank - 2, input_rank - 1}); + pool_outputs = std::make_shared(data, axes, true)->outputs(); + } + } else if (adaptive) { + auto pool_size = std::vector(3, 0); + + if (kernel_shape.size() == 1) { + // Not tested: implemented according to spec, but can't generate real + // model to test + pool_size[0] = pool_size[1] = pool_size[2] = kernel_shape[0]; + } else { + pool_size[0] = kernel_shape[0]; + pool_size[1] = kernel_shape[1]; + pool_size[2] = kernel_shape[2]; + } + + const Output output_shape = + default_opset::Constant::create(ov::element::i64, {pool_size.size()}, pool_size); + + if (pooling_type == "max") { + pool_outputs = + std::make_shared(data, output_shape, ov::element::i32)->outputs(); + } else { + pool_outputs = std::make_shared(data, output_shape)->outputs(); + } + } else { + auto strides = node.get_attribute>("strides"); + + size_t kernel_d, kernel_h, kernel_w; + if (kernel_shape.size() == 1) { + // Not tested: implemented according to spec, but can't generate real + // model to test + kernel_d = kernel_h = kernel_w = kernel_shape[0]; + } else { + kernel_d = kernel_shape[0]; + kernel_h = kernel_shape[1]; + kernel_w = kernel_shape[2]; + } + + PADDLE_OP_CHECK(node, + kernel_d > 0 && kernel_h > 0 && kernel_w > 0, + "pool3d kernel shape must be greater than 0"); + + // Note: this shape check is only valid when the spatial dim of input_shape + // is static. + if (input_shape[2].is_static() && input_shape[3].is_static() && input_shape[4].is_static()) { + uint64_t input_d = input_shape[input_rank - 3].get_length(); + uint64_t input_h = input_shape[input_rank - 2].get_length(); + uint64_t input_w = input_shape[input_rank - 1].get_length(); + if ((input_d > 0) && (input_d + pad_begin[0] + pad_end[0] < kernel_d)) { + kernel_d = input_d + pad_begin[0] + pad_end[0]; + } + if ((input_h > 0) && (input_h + pad_begin[1] + pad_end[1] < kernel_h)) { + kernel_h = input_h + pad_begin[1] + pad_end[1]; + } + if ((input_w > 0) && (input_w + pad_begin[2] + pad_end[2] < kernel_w)) { + kernel_w = input_w + pad_begin[2] + pad_end[2]; + } + } + + if (pooling_type == "max") { + pool_outputs = std::make_shared(data, + ov::Strides(strides.begin(), strides.end()), + ov::Strides{1, 1, 1}, + pad_begin, + pad_end, + ov::Shape{kernel_d, kernel_h, kernel_w}, + rounding_type, + auto_pad, + ov::element::i32, + 2) + ->outputs(); + } else { + bool exclude_pad = node.get_attribute("exclusive", false); + pool_outputs = std::make_shared(data, + ov::Strides(strides.begin(), strides.end()), + pad_begin, + pad_end, + ov::Shape{kernel_d, kernel_h, kernel_w}, + exclude_pad, + rounding_type, + auto_pad) + ->outputs(); + } + } + + if (data_format == "NDHWC") { + pool_outputs[0] = std::make_shared( + pool_outputs[0], + std::make_shared(ov::element::i64, Shape{5}, std::vector{0, 2, 3, 4, 1})); + } + + return NamedOutputs{{"Out", {pool_outputs[0]}}}; +} + +NamedOutputs pool3d_with_index(const NodeContext& node) { + auto data = node.get_input("X"); + auto pooling_type = node.get_attribute("pooling_type", {}); + auto adaptive = node.get_attribute("adaptive"); + auto kernel_shape = node.get_attribute>("ksize"); + + auto rounding_type = + node.get_attribute("ceil_mode", false) ? ov::op::RoundingType::CEIL : ov::op::RoundingType::FLOOR; + + if (pooling_type.empty()) { + pooling_type = "max"; + } + + PADDLE_OP_CHECK(node, (pooling_type == "max") || (pooling_type == "avg"), "pool3d: not supported pooling type !"); + PADDLE_OP_CHECK(node, kernel_shape.size() == 1 || kernel_shape.size() == 3, "pool3d: ksize must be 1 or 3!"); + + PartialShape input_shape = data.get_partial_shape(); + + int32_t input_rank = static_cast(input_shape.rank().get_length()); + PADDLE_OP_CHECK(node, input_rank >= 2, "input tensor rank must be greater than 2"); + + auto auto_pad = ov::op::PadType::EXPLICIT; + ov::Shape pad_begin, pad_end; + std::string data_format = node.get_attribute("data_format", "NCDHW"); + + get_paddings(node, pad_begin, pad_end, auto_pad, data_format); + + if (data_format == "NDHWC") { + data = std::make_shared( + data, + std::make_shared(ov::element::i64, Shape{5}, std::vector{0, 4, 1, 2, 3})); + input_shape = data.get_partial_shape(); + } + + std::vector> pool_outputs; + if (adaptive) { + auto pool_size = std::vector(3, 0); + + if (kernel_shape.size() == 1) { + // Not tested: implemented according to spec, but can't generate real + // model to test + pool_size[0] = pool_size[1] = pool_size[2] = kernel_shape[0]; + } else { + pool_size[0] = kernel_shape[0]; + pool_size[1] = kernel_shape[1]; + pool_size[2] = kernel_shape[2]; + } + + const Output output_shape = + default_opset::Constant::create(ov::element::i64, {pool_size.size()}, pool_size); + + pool_outputs = + std::make_shared(data, output_shape, ov::element::i32)->outputs(); + + } else { + auto strides = node.get_attribute>("strides"); + + size_t kernel_d, kernel_h, kernel_w; + if (kernel_shape.size() == 1) { + // Not tested: implemented according to spec, but can't generate real + // model to test + kernel_d = kernel_h = kernel_w = kernel_shape[0]; + } else { + kernel_d = kernel_shape[0]; + kernel_h = kernel_shape[1]; + kernel_w = kernel_shape[2]; + } + + PADDLE_OP_CHECK(node, + kernel_d > 0 && kernel_h > 0 && kernel_w > 0, + "pool3d kernel shape must be greater than 0"); + + // Note: this shape check is only valid when the spatial dim of input_shape + // is static. + if (input_shape[2].is_static() && input_shape[3].is_static() && input_shape[4].is_static()) { + uint64_t input_d = input_shape[input_rank - 3].get_length(); + uint64_t input_h = input_shape[input_rank - 2].get_length(); + uint64_t input_w = input_shape[input_rank - 1].get_length(); + if ((input_d > 0) && (input_d + pad_begin[0] + pad_end[0] < kernel_d)) { + kernel_d = input_d + pad_begin[0] + pad_end[0]; + } + if ((input_h > 0) && (input_h + pad_begin[1] + pad_end[1] < kernel_h)) { + kernel_h = input_h + pad_begin[1] + pad_end[1]; + } + if ((input_w > 0) && (input_w + pad_begin[2] + pad_end[2] < kernel_w)) { + kernel_w = input_w + pad_begin[2] + pad_end[2]; + } + } + + pool_outputs = std::make_shared(data, + ov::Strides(strides.begin(), strides.end()), + ov::Strides{1, 1, 1}, + pad_begin, + pad_end, + ov::Shape{kernel_d, kernel_h, kernel_w}, + rounding_type, + auto_pad, + ov::element::i32, + 2) + ->outputs(); + } + + if (data_format == "NDHWC") { + pool_outputs[0] = std::make_shared( + pool_outputs[0], + std::make_shared(ov::element::i64, Shape{5}, std::vector{0, 2, 3, 4, 1})); + } + + auto output_name = node.get_output_names(); + return NamedOutputs{{"Out", {pool_outputs[0]}}, {"Mask", {pool_outputs[1]}}}; +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index 858e1beed4fe33..fa6a14b530e93f 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -79,6 +79,8 @@ OP_CONVERTER(p_norm); OP_CONVERTER(pad3d); OP_CONVERTER(pow); OP_CONVERTER(pool2d); +OP_CONVERTER(pool3d); +OP_CONVERTER(pool3d_with_index); OP_CONVERTER(prior_box); OP_CONVERTER(quantize_linear); OP_CONVERTER(range); @@ -199,6 +201,7 @@ std::map get_supported_ops() { {"matmul", op::matmul}, {"matmul_v2", op::matmul_v2}, {"max_pool2d_with_index", op::pool2d}, + {"max_pool3d_with_index", op::pool3d_with_index}, {"matrix_nms", op::matrix_nms}, {"meshgrid", op::meshgrid}, {"multiclass_nms3", op::multiclass_nms}, @@ -210,6 +213,7 @@ std::map get_supported_ops() { {"pad3d", op::pad3d}, {"pow", op::pow}, {"pool2d", op::pool2d}, + {"pool3d", op::pool3d}, {"prior_box", op::prior_box}, {"quantize_linear", op::quantize_linear}, {"range", op::range}, diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index 907253bbf26afb..f0c865b53c79b2 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -34,6 +34,20 @@ static const std::vector models{ std::string("avgPool_test7"), std::string("avgPool_test8"), std::string("avgPool_test9"), + std::string("avgAdaptivePool3D_test1"), + std::string("avgAdaptivePool3D_test2"), + std::string("avgAdaptivePool3D_test3"), + std::string("avgAdaptivePool3D_test4"), + std::string("avg3dPool_test1"), + std::string("avg3dPool_test2"), + std::string("avg3dPool_test3"), + std::string("avg3dPool_test4"), + std::string("avg3dPool_test5"), + std::string("avg3dPool_test6"), + std::string("avg3dPool_test7"), + std::string("avg3dPool_test8"), + std::string("avg3dPool_test9"), + std::string("avg3dPool_test10"), std::string("batch_norm_nchw/batch_norm_nchw.pdmodel"), std::string("batch_norm_nhwc/batch_norm_nhwc.pdmodel"), std::string("bicubic_downsample_false_0/bicubic_downsample_false_0.pdmodel"), @@ -330,6 +344,21 @@ static const std::vector models{ std::string("maxPool_test7"), std::string("maxPool_test8"), std::string("maxPool_test9"), + std::string("maxAdaptivePool3D_test1"), + std::string("maxAdaptivePool3D_test2"), + std::string("maxAdaptivePool3D_test3"), + std::string("maxAdaptivePool3D_test4"), + std::string("max3dPool_test1"), + std::string("max3dPool_test2"), + std::string("max3dPool_test3"), + std::string("max3dPool_test4"), + std::string("max3dPool_test5"), + std::string("max3dPool_test6"), + std::string("max3dPool_test7"), + std::string("max3dPool_test8"), + std::string("max3dPool_test9"), + std::string("max3dPool_test10"), + std::string("max3dRetureMask"), std::string("meshgrid/meshgrid.pdmodel"), std::string("multiclass_nms_by_background"), std::string("multiclass_nms_by_class_id"), diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_pool3d.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_pool3d.py new file mode 100644 index 00000000000000..7d8c621f9ee2de --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_pool3d.py @@ -0,0 +1,324 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# pool3d paddle model generator +# +import numpy as np +import sys +from save_model import saveModel + +data_type = "float32" + + +def pool3d(name: str, x, attrs: dict): + import paddle + + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name="x", shape=x.shape, dtype=data_type) + if attrs["pool_type"] == "max": + out = paddle.nn.functional.max_pool3d( + node_x, + kernel_size=attrs["pool_size"], + stride=attrs["pool_stride"], + padding=attrs["pool_padding"], + ceil_mode=attrs["ceil_mode"], + data_format=attrs["data_format"], + return_mask=attrs["return_mask"], + ) + else: + out = paddle.nn.functional.avg_pool3d( + node_x, + kernel_size=attrs["pool_size"], + stride=attrs["pool_stride"], + padding=attrs["pool_padding"], + ceil_mode=attrs["ceil_mode"], + data_format=attrs["data_format"], + ) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + if attrs["return_mask"]: + outs = exe.run(feed={"x": x}, fetch_list=[out[0], out[1]]) + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out[0], out[1]], + inputs=[x], + outputs=[outs[0], outs[1]], + target_dir=sys.argv[1], + ) + else: + outs = exe.run(feed={"x": x}, fetch_list=[out]) + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out], + inputs=[x], + outputs=[outs[0]], + target_dir=sys.argv[1], + ) + + return outs[0] + + +def adaptive_pool3d(name: str, x, attrs: dict): + import paddle + + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name="x", shape=x.shape, dtype=data_type) + if attrs["pool_type"] == "max": + out = paddle.nn.functional.adaptive_max_pool3d( + x=node_x, + output_size=attrs["pool_size"], + return_mask=attrs["return_mask"], + ) + else: + out = paddle.nn.functional.adaptive_avg_pool3d( + x=node_x, output_size=attrs["pool_size"] + ) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + if attrs["return_mask"]: + outs = exe.run(feed={"x": x}, fetch_list=[out[0], out[1]]) + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out[0], out[1]], + inputs=[x], + outputs=[outs[0], outs[1]], + target_dir=sys.argv[1], + ) + else: + outs = exe.run(feed={"x": x}, fetch_list=[out]) + + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out], + inputs=[x], + outputs=[outs[0]], + target_dir=sys.argv[1], + ) + + return outs[0] + + +def main(): + N, C, D, H, W = 2, 3, 4, 4, 4 + data = np.arange(N * C * D * H * W).astype(data_type) + data_NCDHW = data.reshape(N, C, D, H, W) + data_NDHWC = data.reshape(N, D, H, W, C) + + pooling_types = ["max", "avg"] + + for i, pooling_type in enumerate(pooling_types): + # example 1: + # ceil_mode = False + paddle_attrs = { + # input=data_NCDHW, # shape: [2, 3, 4, 4, 4] + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [ + 1, + 2, + 1, + ], # it is same as pool_padding = [1, 1, 2, 2, 1, 1] + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test1", data_NCDHW, paddle_attrs) + + # example 2: + # ceil_mode = True (different from example 1) + paddle_attrs = { + # input=data_NCDHW, + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [ + [0, 0], + [0, 0], + [1, 1], + [2, 2], + [1, 1], + ], # it is same as pool_padding = [1, 1, 2, 2, 1, 1] + "ceil_mode": True, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test2", data_NCDHW, paddle_attrs) + + # example 3: + # pool_padding = "SAME" (different from example 1) + paddle_attrs = { + # input=data_NCDHW, + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": "SAME", + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test3", data_NCDHW, paddle_attrs) + + # example 4: + # pool_padding = "VALID" (different from example 1) + paddle_attrs = { + # input=data_NCDHW, + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": "VALID", + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test4", data_NCDHW, paddle_attrs) + + # example 5: + # data_format = "NDHWC" (different from example 1) + paddle_attrs = { + # input=data_NDHWC, # shape: [2, 4, 4, 4, 3] + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [1, 2, 1], + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NDHWC", + } + # NOT support data_format = "NDHWC" now + pool3d(pooling_type + "3d" + "Pool_test5", data_NDHWC, paddle_attrs) + + # example 6: + # pool_padding size is 1 + paddle_attrs = { + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": 2, + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test6", data_NCDHW, paddle_attrs) + + # input data for test7 and test8 + N_data1, C_data1, D_data1, H_data1, W_data1 = 2, 3, 8, 8, 8 + data1 = np.arange(N_data1 * C_data1 * D_data1 * H_data1 * W_data1).astype( + data_type + ) + data1_NCDHW = data1.reshape(N_data1, C_data1, D_data1, H_data1, W_data1) + data1_NDHWC = data1.reshape(N_data1, D_data1, H_data1, W_data1, C_data1) + # example 7: + # pool_padding size is 6: [pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] + paddle_attrs = { + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [1, 2, 1, 1, 2, 1], + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test7", data1_NCDHW, paddle_attrs) + + # example 8: + paddle_attrs = { + "pool_size": [3, 3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [[0, 0], [0, 0], [1, 2], [2, 1], [2, 1]], + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test8", data1_NCDHW, paddle_attrs) + + # example 9: + paddle_attrs = { + "pool_size": 9, + "pool_type": pooling_type, + "pool_stride": [3, 3, 3], + "pool_padding": [[0, 0], [0, 0], [2, 1], [1, 2], [1, 2]], + "ceil_mode": False, + "exclusive": True, + "return_mask": False, + "data_format": "NCDHW", + } + pool3d(pooling_type + "3d" + "Pool_test9", data1_NCDHW, paddle_attrs) + + # example 10: + paddle_attrs = { + "pool_size": 9, + "pool_type": pooling_type, + "pool_stride": 3, + "pool_padding": [[0, 0], [2, 2], [1, 2], [2, 2], [0, 0]], + "ceil_mode": False, + "return_mask": False, + "data_format": "NDHWC", + } + pool3d(pooling_type + "3d" + "Pool_test10", data1_NDHWC, paddle_attrs) + + paddle_attrs = { + "pool_size": 9, + "pool_type": "max", + "pool_stride": 3, + "pool_padding": [3, 3, 3], + "ceil_mode": False, + "return_mask": True, + "data_format": "NCDHW", + } + pool3d("max3dRetureMask", data_NCDHW, paddle_attrs) + + # adaptive_pool3 + for i, pooling_type in enumerate(pooling_types): + paddle_attrs = { + "pool_size": [2, 2, 2], + "pool_type": pooling_type, + "return_mask": False, + } + adaptive_pool3d(pooling_type + "AdaptivePool3D_test1", data_NCDHW, paddle_attrs) + paddle_attrs = {"pool_size": 2, "pool_type": pooling_type, "return_mask": False} + adaptive_pool3d(pooling_type + "AdaptivePool3D_test2", data_NCDHW, paddle_attrs) + paddle_attrs = { + "pool_size": 1, # global pooling case + "pool_type": pooling_type, + "return_mask": False, + } + adaptive_pool3d(pooling_type + "AdaptivePool3D_test3", data_NCDHW, paddle_attrs) + paddle_attrs = { + "pool_size": 1, # global pooling case + "pool_type": pooling_type, + "return_mask": True, + } + adaptive_pool3d(pooling_type + "AdaptivePool3D_test4", data_NCDHW, paddle_attrs) + + +if __name__ == "__main__": + main()