Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[operator migration] Migrate unstack_op and nms_op #44424

Merged
merged 28 commits into from
Aug 1, 2022
Merged
Show file tree
Hide file tree
Changes from 22 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
3921b3f
update unstack_op
HexToString Jul 13, 2022
a8bf83f
update unstack_op
HexToString Jul 13, 2022
b9685d7
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
HexToString Jul 13, 2022
1ccb8f0
update unstack_op
HexToString Jul 13, 2022
e872d80
update unstack_op
HexToString Jul 13, 2022
9c9e750
fix unstack test
ShiningZhang Jul 13, 2022
95ba10a
update unstack
HexToString Jul 13, 2022
f5e8a0c
update with remote
HexToString Jul 13, 2022
5b4c814
Merge branch 'develop_ysl' of https://github.com/HexToString/Paddle i…
HexToString Jul 13, 2022
694d590
fix unstack_test.py
HexToString Jul 14, 2022
2fe8dcd
temp_save_change_nms_op
HexToString Jul 14, 2022
4132861
Merge branch 'develop_ysl' of https://github.com/HexToString/Paddle i…
HexToString Jul 14, 2022
840b5ea
add nms test
HexToString Jul 14, 2022
00e9eb0
update nms fix
HexToString Jul 15, 2022
e4cbf9d
update unstack_op
HexToString Jul 15, 2022
0e00ce6
Merge branch 'develop_ysl' of https://github.com/HexToString/Paddle i…
HexToString Jul 15, 2022
8154310
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
HexToString Jul 18, 2022
9135bf8
temp save change
HexToString Jul 18, 2022
a692a33
finish fix nms_op
HexToString Jul 18, 2022
de372d6
pass nms test
HexToString Jul 20, 2022
3717a30
fix CI
HexToString Jul 21, 2022
58cafb9
fix ops test
HexToString Jul 22, 2022
0939969
save change
HexToString Jul 28, 2022
481c4f3
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
HexToString Jul 28, 2022
4daaf39
fix code style
HexToString Jul 28, 2022
586e2f4
fix code style
HexToString Jul 28, 2022
b429e42
fix ci and codestyle
HexToString Jul 29, 2022
8c578bc
fix ci
HexToString Jul 29, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/operators/detection/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ detection_library(sigmoid_focal_loss_op SRCS sigmoid_focal_loss_op.cc
sigmoid_focal_loss_op.cu)
detection_library(retinanet_detection_output_op SRCS
retinanet_detection_output_op.cc)
detection_library(nms_op SRCS nms_op.cc nms_op.cu)
detection_library(nms_op SRCS nms_op.cc)

if(WITH_GPU OR WITH_ROCM)
set(TMPDEPS memory)
Expand Down
77 changes: 10 additions & 67 deletions paddle/fluid/operators/detection/nms_op.cc
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/detection/nms_op.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/fluid/framework/infershape_utils.h"

#include <vector>

Expand Down Expand Up @@ -65,24 +69,6 @@ class NMSOpMaker : public framework::OpProtoAndCheckerMaker {
class NMSOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Boxes"), "Input", "Boxes", "NMS");
OP_INOUT_CHECK(
ctx->HasOutput("KeepBoxesIdxs"), "Output", "KeepBoxesIdxs", "NMS");

auto boxes_dim = ctx->GetInputDim("Boxes");
PADDLE_ENFORCE_EQ(boxes_dim.size(),
2,
platform::errors::InvalidArgument(
"The Input Boxes must be 2-dimention "
"whose shape must be [N, 4] "
"N is the number of boxes "
"in last dimension in format [x1, x2, y1, y2]. "));
auto num_boxes = boxes_dim[0];

ctx->SetOutputDim("KeepBoxesIdxs", {num_boxes});
}

protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
Expand All @@ -91,65 +77,22 @@ class NMSOp : public framework::OperatorWithKernel {
}
};

template <typename T>
static void NMS(const T* boxes_data,
int64_t* output_data,
float threshold,
int64_t num_boxes) {
auto num_masks = CeilDivide(num_boxes, 64);
std::vector<uint64_t> masks(num_masks, 0);

for (int64_t i = 0; i < num_boxes; ++i) {
if (masks[i / 64] & 1ULL << (i % 64)) continue;
T box_1[4];
for (int k = 0; k < 4; ++k) {
box_1[k] = boxes_data[i * 4 + k];
}
for (int64_t j = i + 1; j < num_boxes; ++j) {
if (masks[j / 64] & 1ULL << (j % 64)) continue;
T box_2[4];
for (int k = 0; k < 4; ++k) {
box_2[k] = boxes_data[j * 4 + k];
}
bool is_overlap = CalculateIoU<T>(box_1, box_2, threshold);
if (is_overlap) {
masks[j / 64] |= 1ULL << (j % 64);
}
}
}

int64_t output_data_idx = 0;
for (int64_t i = 0; i < num_boxes; ++i) {
if (masks[i / 64] & 1ULL << (i % 64)) continue;
output_data[output_data_idx++] = i;
}

for (; output_data_idx < num_boxes; ++output_data_idx) {
output_data[output_data_idx] = 0;
}
}

template <typename T>
class NMSKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* boxes = context.Input<Tensor>("Boxes");
Tensor* output = context.Output<Tensor>("KeepBoxesIdxs");
int64_t* output_data = output->mutable_data<int64_t>(context.GetPlace());
auto threshold = context.template Attr<float>("iou_threshold");
NMS<T>(boxes->data<T>(), output_data, threshold, boxes->dims()[0]);
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(nms,
NMSInferMetaFunctor,
PD_INFER_META(phi::NMSInferMeta));

REGISTER_OPERATOR(
nms,
ops::NMSOp,
ops::NMSOpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OP_CPU_KERNEL(nms, ops::NMSKernel<float>, ops::NMSKernel<double>);
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
NMSInferMetaFunctor);
53 changes: 5 additions & 48 deletions paddle/fluid/operators/unstack_op.cc
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ limitations under the License. */
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
#include "paddle/phi/infermeta/backward.h"

namespace paddle {
namespace operators {
Expand Down Expand Up @@ -63,51 +64,6 @@ class UnStackGradOpMaker : public framework::SingleGradOpMaker<T> {
class UnStackGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_GT(ctx->Inputs(framework::GradVarName("Y")).size(),
0,
platform::errors::InvalidArgument(
"The Inputs(Y@Grad) of unstack operator are empty."));
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")),
"Output",
"X",
"UnStackGrad");
auto input_dims = ctx->GetInputsDim(framework::GradVarName("Y"));
for (size_t i = 1; i < input_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(
input_dims[i],
input_dims[0],
platform::errors::InvalidArgument(
"The dimensions of all Inputs(Y@Grad) must be the same,"
"but received Inputs(Y@Grad)'s %d-th dimension is %d, "
"Inputs(Y@Grad)'s 0-th to %d-th dimension is %d.",
i,
input_dims[i],
i - 1,
input_dims[0]));
}

int axis = ctx->Attrs().Get<int>("axis");
int rank = input_dims[0].size();
PADDLE_ENFORCE_GE(axis,
-(rank + 1),
platform::errors::InvalidArgument(
"The attribute axis is out of range, it must be "
"inside [-(rank+1), rank+1), where rank = %d",
rank));
PADDLE_ENFORCE_LT(axis,
rank + 1,
platform::errors::InvalidArgument(
"The attribute axis is out of range, it must be "
"inside [-(rank+1), rank+1), where rank = %d",
rank));
if (axis < 0) axis += (rank + 1);

auto vec = phi::vectorize<int>(input_dims[0]);
vec.insert(vec.begin() + axis, input_dims.size());
ctx->SetOutputDim(framework::GradVarName("X"), phi::make_ddim(vec));
}
};

} // namespace operators
Expand All @@ -119,12 +75,13 @@ namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(unstack,
UnStackInferMetaFunctor,
PD_INFER_META(phi::UnStackInferMeta));

DECLARE_INFER_SHAPE_FUNCTOR(unstack_grad,
UnStackGradInferMetaFunctor,
PD_INFER_META(phi::UnStackGradInferMeta));
REGISTER_OPERATOR(unstack,
ops::UnStackOp,
ops::UnStackOpMaker,
ops::UnStackGradOpMaker<paddle::framework::OpDesc>,
ops::UnStackGradOpMaker<paddle::imperative::OpBase>,
UnStackInferMetaFunctor);

REGISTER_OPERATOR(unstack_grad, ops::UnStackGradOp);
REGISTER_OPERATOR(unstack_grad, ops::UnStackGradOp, UnStackGradInferMetaFunctor);
19 changes: 19 additions & 0 deletions paddle/phi/api/yaml/legacy_api.yaml
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -1483,6 +1483,15 @@
optional : weight
backward : nll_loss_grad

- api : nms
args : (Tensor x, float threshold)
output : Tensor(out)
infer_meta :
func : NMSInferMeta
kernel :
func : nms
data_type : x

- api : norm
args : (Tensor x, int axis, float epsilon, bool is_test)
output : Tensor(out), Tensor(norm)
Expand Down Expand Up @@ -2214,6 +2223,16 @@
func : unique
data_type : x

# unstack
- api : unstack
args : (Tensor x, int axis, int num)
output : Tensor[]{num}
infer_meta :
func : UnStackInferMeta
kernel :
func : unstack
backward : unstack_grad

- api : unique_consecutive
args : (Tensor x, bool return_inverse, bool return_counts, int[] axis, int dtype)
output : Tensor(out), Tensor(index), Tensor(counts)
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/api/yaml/legacy_backward.yaml
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -2211,6 +2211,16 @@
func : unfold_grad
no_need_buffer : x

- backward_api : unstack_grad
forward : unstack (Tensor x, int axis, int num) -> Tensor[](out)
args : (Tensor[] out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : UnStackGradInferMeta
param : [out_grad, axis]
kernel :
func : unstack_grad

- backward_api : unsqueeze_double_grad
forward : unsqueeze_grad(Tensor xshape, Tensor grad_out, IntArray axes) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray axes)
Expand Down
42 changes: 42 additions & 0 deletions paddle/phi/infermeta/backward.cc
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -678,4 +678,46 @@ void StackGradInferMeta(const MetaTensor& out_grad,
}
}

void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
int axis,
MetaTensor* x_grad) {
std::vector<phi::DDim> input_dims(out_grad.size());
for(size_t i = 0; i < out_grad.size(); ++i){
input_dims[i] = out_grad[i]->dims();
}
for (size_t i = 1; i < input_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(
input_dims[i],
input_dims[0],
phi::errors::InvalidArgument(
"The dimensions of all Inputs(Y@Grad) must be the same,"
"but received Inputs(Y@Grad)'s %d-th dimension is %d, "
"Inputs(Y@Grad)'s 0-th to %d-th dimension is %d.",
i,
input_dims[i],
i - 1,
input_dims[0]));
}

int rank = input_dims[0].size();
PADDLE_ENFORCE_GE(axis,
-(rank + 1),
phi::errors::InvalidArgument(
"The attribute axis is out of range, it must be "
"inside [-(rank+1), rank+1), where rank = %d",
rank));
PADDLE_ENFORCE_LT(axis,
rank + 1,
phi::errors::InvalidArgument(
"The attribute axis is out of range, it must be "
"inside [-(rank+1), rank+1), where rank = %d",
rank));
if (axis < 0) axis += (rank + 1);

auto vec = phi::vectorize<int>(input_dims[0]);
vec.insert(vec.begin() + axis, input_dims.size());
x_grad->set_dims(phi::make_ddim(vec));
x_grad->set_dtype(out_grad[0]->dtype());
}

} // namespace phi
4 changes: 4 additions & 0 deletions paddle/phi/infermeta/backward.h
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -267,4 +267,8 @@ void StackGradInferMeta(const MetaTensor& out_grad,
int axis,
std::vector<MetaTensor*> x_grad);

void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
int axis,
MetaTensor* x_grad);

} // namespace phi
15 changes: 15 additions & 0 deletions paddle/phi/infermeta/unary.cc
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -1345,6 +1345,21 @@ void NanmedianInferMeta(const MetaTensor& x,
out->set_dims(make_ddim(out_dim));
}

void NMSInferMeta(const MetaTensor& x,
float threshold,
MetaTensor* out){
auto boxes_dim = x.dims();
PADDLE_ENFORCE_EQ(boxes_dim.size(),
2,
phi::errors::InvalidArgument(
"The Input Boxes must be 2-dimention "
"whose shape must be [N, 4] "
"N is the number of boxes "
"in last dimension in format [x1, x2, y1, y2]. "));
auto num_boxes = boxes_dim[0];
out->set_dims(phi::make_ddim({num_boxes}));
}

void NormInferMeta(const MetaTensor& x,
int axis,
float epsilon,
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/infermeta/unary.h
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,10 @@ void NanmedianInferMeta(const MetaTensor& x,
MetaTensor* out,
MetaTensor* median_index);

void NMSInferMeta(const MetaTensor& x,
float threshold,
MetaTensor* out);

void NormInferMeta(const MetaTensor& x,
int axis,
float epsilon,
Expand Down
Loading