From 36dbe95d9e2da83ac3afad2d31719645ac999fd9 Mon Sep 17 00:00:00 2001 From: Asthestarsfalll <72954905+Asthestarsfalll@users.noreply.github.com> Date: Fri, 19 May 2023 17:30:27 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90PaddlePaddle=20Hackathon=204=E3=80=91a?= =?UTF-8?q?dd=20paddle=20flip=20op=20(#15828)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add flip op * fix * fix signedness error * update flip * use Slice * remove redundant codes * unify flip and reverse * fix bug --------- Co-authored-by: cecilia peng --- src/frontends/paddle/src/op/flip.cpp | 17 +++++ src/frontends/paddle/src/op/reverse.cpp | 22 +----- src/frontends/paddle/src/op/reverse_op.hpp | 38 ++++++++++ src/frontends/paddle/src/op_table.cpp | 2 + src/frontends/paddle/tests/op_fuzzy.cpp | 7 ++ .../test_models/gen_scripts/generate_flip.py | 73 +++++++++++++++++++ 6 files changed, 139 insertions(+), 20 deletions(-) create mode 100644 src/frontends/paddle/src/op/flip.cpp create mode 100644 src/frontends/paddle/src/op/reverse_op.hpp create mode 100644 src/frontends/paddle/tests/test_models/gen_scripts/generate_flip.py diff --git a/src/frontends/paddle/src/op/flip.cpp b/src/frontends/paddle/src/op/flip.cpp new file mode 100644 index 00000000000000..804edcc7e015d7 --- /dev/null +++ b/src/frontends/paddle/src/op/flip.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "reverse_op.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs flip(const NodeContext& node) { + return reverse_op(node); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op/reverse.cpp b/src/frontends/paddle/src/op/reverse.cpp index b5e87f4c5b9fca..097e13b401986b 100644 --- a/src/frontends/paddle/src/op/reverse.cpp +++ b/src/frontends/paddle/src/op/reverse.cpp @@ -2,32 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "default_opset.hpp" -#include "openvino/frontend/paddle/node_context.hpp" -#include "openvino/opsets/opset1.hpp" +#include "reverse_op.hpp" namespace ov { namespace frontend { namespace paddle { namespace op { - -using namespace default_opset; - NamedOutputs reverse(const NodeContext& node) { - auto x = node.get_input("X"); - // axis is a vector - auto axis = node.get_attribute>("axis"); - // try to keep the axis positive since reverse IR doesn't support negative axis. - const auto dims = static_cast(x.get_partial_shape().rank().get_length()); - std::for_each(axis.begin(), axis.end(), [&dims](int32_t& value) { - if (value < 0) { - value += dims; - } - }); - - auto axis_node = std::make_shared(ngraph::element::i32, Shape{axis.size()}, axis); - auto reverse_op = std::make_shared(x, axis_node, ov::opset1::Reverse::Mode::INDEX); - return node.default_single_output_mapping({reverse_op}, {"Out"}); + return reverse_op(node); } } // namespace op } // namespace paddle diff --git a/src/frontends/paddle/src/op/reverse_op.hpp b/src/frontends/paddle/src/op/reverse_op.hpp new file mode 100644 index 00000000000000..eec1afb1dd9986 --- /dev/null +++ b/src/frontends/paddle/src/op/reverse_op.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +namespace { +NamedOutputs reverse_op(const NodeContext& node) { + const auto data_node = node.get_input("X"); + const auto axes = node.get_attribute>("axis"); + auto axes_length = axes.size(); + const auto starts = + default_opset::Constant::create(element::i32, + {axes_length}, + std::vector(axes_length, std::numeric_limits::max())); + const auto stops = + default_opset::Constant::create(element::i32, + {axes_length}, + std::vector(axes_length, std::numeric_limits::min())); + const auto steps = + default_opset::Constant::create(element::i32, {axes_length}, std::vector(axes_length, -1)); + const auto axes_node = default_opset::Constant::create(element::i32, {axes_length}, axes); + + return node.default_single_output_mapping( + {std::make_shared(data_node, starts, stops, steps, axes_node)}, + {"Out"}); +} +} // namespace +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index c23bc1381068dd..835933c8a0640e 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -41,6 +41,7 @@ OP_CONVERTER(elementwise_sub); OP_CONVERTER(embedding); OP_CONVERTER(exp); OP_CONVERTER(expand_v2); +OP_CONVERTER(flip); OP_CONVERTER(fill_any_like); OP_CONVERTER(fill_constant_batch_size_like); OP_CONVERTER(fill_constant); @@ -160,6 +161,7 @@ std::map get_supported_ops() { {"fill_constant_batch_size_like", op::fill_constant_batch_size_like}, {"fill_constant", op::fill_constant}, {"flatten_contiguous_range", op::flatten_contiguous_range}, + {"flip", op::flip}, {"floor", op::floor}, {"gather", op::gather}, {"gather_nd", op::gather_nd}, diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index c6db82b7a6b5d8..55536297b54235 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -188,6 +188,13 @@ static const std::vector models{ std::string("expand_v2_tensor_list"), std::string("expand_v2_tensor_list2"), std::string("exp_test_float32"), + std::string("flip_1"), + std::string("flip_2"), + std::string("flip_3"), + std::string("flip_4"), + std::string("flip_5"), + std::string("flip_dynamic_1"), + std::string("flip_dynamic_2"), std::string("fill_any_like"), std::string("fill_any_like_f16"), std::string("fill_any_like_f32"), diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_flip.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_flip.py new file mode 100644 index 00000000000000..5c91c0bf5f3603 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_flip.py @@ -0,0 +1,73 @@ +# +# flip paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def flip(name: str, x, axis, is_dynamic=False): + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + if is_dynamic: + data = paddle.static.data(name='x', shape=(-1, ) * len(x.shape), dtype=x.dtype) + else: + data = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = paddle.flip(data, axis=axis) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[ + x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data_type = 'int32' + axis = [2, 3] + x = np.random.randint(0, 5, (2, 3, 4, 5)).astype(data_type) + flip("flip_1", x, axis) + + data_type = 'float32' + axis = [-1, -3] + # axis = [3, 1] + x = np.random.randn(3, 2, 1, 5).astype(data_type) + flip("flip_2", x, axis) + + data_type = 'float32' + axis = [0, 1] + x = np.random.randn(1, 1, 1, 1).astype(data_type) + flip("flip_3", x, axis) + + data_type = 'int64' + axis = 1 + x = np.random.randint(-1, 3, (5, 3, 1, 1)).astype(data_type) + flip("flip_4", x, axis) + + data_type = 'float32' + axis = -1 + x = np.random.randn(1).astype(data_type) + flip("flip_5", x, axis) + + data_type = 'int64' + axis = 3 + x = np.random.randint(-5, 5, (1, 1, 4, 1)).astype(data_type) + flip("flip_dynamic_1", x, axis, True) + + data_type = 'float32' + axis = [-1, -2] + x = np.random.randn(1, 4, 1).astype(data_type) + flip("flip_dynamic_2", x, axis, True) + +if __name__ == "__main__": + main()