Skip to content

Commit

Permalink
【PaddlePaddle Hackathon 4】add paddle flip op (#15828)
Browse files Browse the repository at this point in the history
* add flip op

* fix

* fix signedness error

* update flip

* use Slice

* remove redundant codes

* unify flip and reverse

* fix bug

---------

Co-authored-by: cecilia peng <cecilia.peng@intel.com>
  • Loading branch information
Asthestarsfalll and ceciliapeng2011 authored May 19, 2023
1 parent d52efb9 commit 36dbe95
Show file tree
Hide file tree
Showing 6 changed files with 139 additions and 20 deletions.
17 changes: 17 additions & 0 deletions src/frontends/paddle/src/op/flip.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "reverse_op.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs flip(const NodeContext& node) {
return reverse_op(node);
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
22 changes: 2 additions & 20 deletions src/frontends/paddle/src/op/reverse.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,32 +2,14 @@
// SPDX-License-Identifier: Apache-2.0
//

#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"
#include "openvino/opsets/opset1.hpp"
#include "reverse_op.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {

using namespace default_opset;

NamedOutputs reverse(const NodeContext& node) {
auto x = node.get_input("X");
// axis is a vector
auto axis = node.get_attribute<std::vector<int32_t>>("axis");
// try to keep the axis positive since reverse IR doesn't support negative axis.
const auto dims = static_cast<int32_t>(x.get_partial_shape().rank().get_length());
std::for_each(axis.begin(), axis.end(), [&dims](int32_t& value) {
if (value < 0) {
value += dims;
}
});

auto axis_node = std::make_shared<Constant>(ngraph::element::i32, Shape{axis.size()}, axis);
auto reverse_op = std::make_shared<ov::opset1::Reverse>(x, axis_node, ov::opset1::Reverse::Mode::INDEX);
return node.default_single_output_mapping({reverse_op}, {"Out"});
return reverse_op(node);
}
} // namespace op
} // namespace paddle
Expand Down
38 changes: 38 additions & 0 deletions src/frontends/paddle/src/op/reverse_op.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once

#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
namespace {
NamedOutputs reverse_op(const NodeContext& node) {
const auto data_node = node.get_input("X");
const auto axes = node.get_attribute<std::vector<int32_t>>("axis");
auto axes_length = axes.size();
const auto starts =
default_opset::Constant::create(element::i32,
{axes_length},
std::vector<int32_t>(axes_length, std::numeric_limits<int32_t>::max()));
const auto stops =
default_opset::Constant::create(element::i32,
{axes_length},
std::vector<int32_t>(axes_length, std::numeric_limits<int32_t>::min()));
const auto steps =
default_opset::Constant::create(element::i32, {axes_length}, std::vector<int32_t>(axes_length, -1));
const auto axes_node = default_opset::Constant::create(element::i32, {axes_length}, axes);

return node.default_single_output_mapping(
{std::make_shared<default_opset::Slice>(data_node, starts, stops, steps, axes_node)},
{"Out"});
}
} // namespace
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
2 changes: 2 additions & 0 deletions src/frontends/paddle/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ OP_CONVERTER(elementwise_sub);
OP_CONVERTER(embedding);
OP_CONVERTER(exp);
OP_CONVERTER(expand_v2);
OP_CONVERTER(flip);
OP_CONVERTER(fill_any_like);
OP_CONVERTER(fill_constant_batch_size_like);
OP_CONVERTER(fill_constant);
Expand Down Expand Up @@ -160,6 +161,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"fill_constant_batch_size_like", op::fill_constant_batch_size_like},
{"fill_constant", op::fill_constant},
{"flatten_contiguous_range", op::flatten_contiguous_range},
{"flip", op::flip},
{"floor", op::floor},
{"gather", op::gather},
{"gather_nd", op::gather_nd},
Expand Down
7 changes: 7 additions & 0 deletions src/frontends/paddle/tests/op_fuzzy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,13 @@ static const std::vector<std::string> models{
std::string("expand_v2_tensor_list"),
std::string("expand_v2_tensor_list2"),
std::string("exp_test_float32"),
std::string("flip_1"),
std::string("flip_2"),
std::string("flip_3"),
std::string("flip_4"),
std::string("flip_5"),
std::string("flip_dynamic_1"),
std::string("flip_dynamic_2"),
std::string("fill_any_like"),
std::string("fill_any_like_f16"),
std::string("fill_any_like_f32"),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
#
# flip paddle model generator
#
import numpy as np
from save_model import saveModel
import paddle
import sys


def flip(name: str, x, axis, is_dynamic=False):
paddle.enable_static()

with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
if is_dynamic:
data = paddle.static.data(name='x', shape=(-1, ) * len(x.shape), dtype=x.dtype)
else:
data = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
out = paddle.flip(data, axis=axis)

cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())

outs = exe.run(
feed={'x': x},
fetch_list=[out])

saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[
x], outputs=[outs[0]], target_dir=sys.argv[1])

return outs[0]


def main():
data_type = 'int32'
axis = [2, 3]
x = np.random.randint(0, 5, (2, 3, 4, 5)).astype(data_type)
flip("flip_1", x, axis)

data_type = 'float32'
axis = [-1, -3]
# axis = [3, 1]
x = np.random.randn(3, 2, 1, 5).astype(data_type)
flip("flip_2", x, axis)

data_type = 'float32'
axis = [0, 1]
x = np.random.randn(1, 1, 1, 1).astype(data_type)
flip("flip_3", x, axis)

data_type = 'int64'
axis = 1
x = np.random.randint(-1, 3, (5, 3, 1, 1)).astype(data_type)
flip("flip_4", x, axis)

data_type = 'float32'
axis = -1
x = np.random.randn(1).astype(data_type)
flip("flip_5", x, axis)

data_type = 'int64'
axis = 3
x = np.random.randint(-5, 5, (1, 1, 4, 1)).astype(data_type)
flip("flip_dynamic_1", x, axis, True)

data_type = 'float32'
axis = [-1, -2]
x = np.random.randn(1, 4, 1).astype(data_type)
flip("flip_dynamic_2", x, axis, True)

if __name__ == "__main__":
main()

0 comments on commit 36dbe95

Please sign in to comment.