Skip to content

Commit

Permalink
[NewIR] add_n and combine support selected rows (PaddlePaddle#56754)
Browse files Browse the repository at this point in the history
* add_n and combine support selected rows
  • Loading branch information
wanghuancoder authored and BeingGod committed Sep 9, 2023
1 parent 59a4728 commit 0ca6f53
Show file tree
Hide file tree
Showing 9 changed files with 454 additions and 41 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/ir/dialect/op_generator/op_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ class {op_name} : public ir::Op<{op_name}{interfaces}{traits}> {{
'bool': 'ir::BoolAttribute',
}

_NO_NEED_GEN_OPS = {'add_n', 'split_grad'}
_NO_NEED_GEN_OPS = {'add_n', 'add_n_', 'add_n_with_kernel', 'split_grad'}


def to_phi_and_fluid_op_name(op_item):
Expand Down
5 changes: 4 additions & 1 deletion paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,10 @@ void PaddleDialect::initialize() {
#define GET_OP_LIST
#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" // NOLINT
>();
RegisterOps<paddle::dialect::AddNOp, paddle::dialect::SplitGradOp>();
RegisterOps<paddle::dialect::AddNOp,
paddle::dialect::AddN_Op,
paddle::dialect::AddNWithKernelOp,
paddle::dialect::SplitGradOp>();

RegisterInterfaces<ParameterConvertInterface>();
}
Expand Down
270 changes: 267 additions & 3 deletions paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,13 +57,18 @@ void AddNOp::Verify() {
"The size %d of inputs must be equal to 1.", input_size));
if (auto vec_type = (*this)->operand(0).type().dyn_cast<ir::VectorType>()) {
for (size_t i = 0; i < vec_type.size(); ++i) {
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>(),
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>() ||
vec_type[i].isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
} else {
PADDLE_ENFORCE(
(*this)->operand(0).type().isa<paddle::dialect::DenseTensorType>(),
(*this)->operand(0).type().isa<paddle::dialect::DenseTensorType>() ||
(*this)
->operand(0)
.type()
.isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
Expand All @@ -81,7 +86,8 @@ void AddNOp::Verify() {
phi::errors::PreconditionNotMet(
"The size %d of outputs must be equal to 1.", output_size));
PADDLE_ENFORCE(
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>(),
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>() ||
(*this)->result(0).type().isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th output."));
}
Expand Down Expand Up @@ -146,6 +152,262 @@ void AddNOp::InferMeta(phi::InferMetaContext *infer_meta) {
fn(infer_meta);
}

OpInfoTuple AddN_Op::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = {
paddle::dialect::OpInputInfo(
"inputs",
"ir::VectorType<paddle::dialect::DenseTensorType>",
false,
false,
false)};
std::vector<paddle::dialect::OpAttributeInfo> attributes = {};
std::vector<paddle::dialect::OpOutputInfo> outputs = {
paddle::dialect::OpOutputInfo(
"out", "paddle::dialect::DenseTensorType", false, false)};
paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo(
"AddNInferMeta", {"inputs"}, {"add_n"}, {"inputs"}, {}, {}, {}, {});
return std::make_tuple(inputs, attributes, outputs, run_time_info, "add_n_");
}

void AddN_Op::Build(ir::Builder &builder,
ir::OperationArgument &argument,
ir::OpResult inputs_) {
VLOG(4) << "Builder construction inputs";
std::vector<ir::OpResult> argument_inputs = {inputs_};
argument.AddOperands(argument_inputs.begin(), argument_inputs.end());

VLOG(4) << "Builder construction attributes";

VLOG(4) << "Builder construction outputs";
ir::VectorType inputs = inputs_.type().dyn_cast<ir::VectorType>();
(void)inputs;
std::vector<phi::DenseTensor> vec_dense_inputs;
for (size_t i = 0; i < static_cast<size_t>(inputs.size()); i++) {
vec_dense_inputs.push_back(phi::DenseTensor(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
phi::DenseTensorMeta(
paddle::dialect::TransToPhiDataType(
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dims(),
inputs[i]
.dyn_cast<paddle::dialect::DenseTensorType>()
.data_layout(),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().lod(),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().offset())));
}
std::vector<phi::MetaTensor> vec_meta_inputs;
for (size_t i = 0; i < vec_dense_inputs.size(); i++) {
vec_meta_inputs.push_back(phi::MetaTensor(&vec_dense_inputs[i]));
}

std::vector<const phi::MetaTensor *> meta_inputs;
for (size_t i = 0; i < static_cast<size_t>(vec_meta_inputs.size()); i++) {
meta_inputs.push_back(&vec_meta_inputs[i]);
}
phi::DenseTensor dense_out;
phi::MetaTensor meta_out(&dense_out);

phi::AddNInferMeta(meta_inputs, &meta_out);

std::vector<ir::Type> argument_outputs;
ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get(
ir::IrContext::Instance(),
paddle::dialect::TransToIrDataType(dense_out.dtype()),
dense_out.dims(),
dense_out.layout(),
dense_out.lod(),
dense_out.offset());
argument_outputs.push_back(out_dense_tensor_type);
argument.AddOutputs(argument_outputs.begin(), argument_outputs.end());
}

void AddN_Op::Verify() {
VLOG(4) << "Start Verifying inputs, outputs and attributes for: AddN_Op.";
VLOG(4) << "Verifying inputs:";
{
auto input_size = num_operands();
PADDLE_ENFORCE_EQ(
input_size,
1u,
phi::errors::PreconditionNotMet(
"The size %d of inputs must be equal to 1.", input_size));
if (auto vec_type =
(*this)->operand_source(0).type().dyn_cast<ir::VectorType>()) {
for (size_t i = 0; i < vec_type.size(); ++i) {
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>() ||
vec_type[i].isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
} else {
PADDLE_ENFORCE((*this)->operand_source(0)
.type()
.isa<paddle::dialect::DenseTensorType>() ||
(*this)
->operand_source(0)
.type()
.isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
}
VLOG(4) << "Verifying attributes:";
{
// Attributes num is 0, not need to check attributes type.
}
VLOG(4) << "Verifying outputs:";
{
auto output_size = num_results();
PADDLE_ENFORCE_EQ(
output_size,
1u,
phi::errors::PreconditionNotMet(
"The size %d of outputs must be equal to 1.", output_size));
PADDLE_ENFORCE(
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>() ||
(*this)->result(0).type().isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th output."));
}
VLOG(4) << "End Verifying for: AddN_Op.";
}

void AddN_Op::InferMeta(phi::InferMetaContext *infer_meta) {
auto fn = PD_INFER_META(phi::AddNInferMeta);
fn(infer_meta);
}

OpInfoTuple AddNWithKernelOp::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = {
paddle::dialect::OpInputInfo(
"inputs",
"ir::VectorType<paddle::dialect::DenseTensorType>",
false,
false,
false)};
std::vector<paddle::dialect::OpAttributeInfo> attributes = {};
std::vector<paddle::dialect::OpOutputInfo> outputs = {
paddle::dialect::OpOutputInfo(
"out", "paddle::dialect::DenseTensorType", false, false)};
paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo(
"AddNInferMeta", {"inputs"}, {"add_n"}, {"inputs"}, {}, {}, {}, {});
return std::make_tuple(
inputs, attributes, outputs, run_time_info, "add_n_with_kernel");
}

void AddNWithKernelOp::Build(ir::Builder &builder,
ir::OperationArgument &argument,
ir::OpResult inputs_) {
VLOG(4) << "Builder construction inputs";
std::vector<ir::OpResult> argument_inputs = {inputs_};
argument.AddOperands(argument_inputs.begin(), argument_inputs.end());

VLOG(4) << "Builder construction attributes";

VLOG(4) << "Builder construction outputs";
ir::VectorType inputs = inputs_.type().dyn_cast<ir::VectorType>();
(void)inputs;
std::vector<phi::DenseTensor> vec_dense_inputs;
for (size_t i = 0; i < static_cast<size_t>(inputs.size()); i++) {
vec_dense_inputs.push_back(phi::DenseTensor(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
phi::DenseTensorMeta(
paddle::dialect::TransToPhiDataType(
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dims(),
inputs[i]
.dyn_cast<paddle::dialect::DenseTensorType>()
.data_layout(),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().lod(),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().offset())));
}
std::vector<phi::MetaTensor> vec_meta_inputs;
for (size_t i = 0; i < vec_dense_inputs.size(); i++) {
vec_meta_inputs.push_back(phi::MetaTensor(&vec_dense_inputs[i]));
}

std::vector<const phi::MetaTensor *> meta_inputs;
for (size_t i = 0; i < static_cast<size_t>(vec_meta_inputs.size()); i++) {
meta_inputs.push_back(&vec_meta_inputs[i]);
}
phi::DenseTensor dense_out;
phi::MetaTensor meta_out(&dense_out);

phi::AddNInferMeta(meta_inputs, &meta_out);

std::vector<ir::Type> argument_outputs;
ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get(
ir::IrContext::Instance(),
paddle::dialect::TransToIrDataType(dense_out.dtype()),
dense_out.dims(),
dense_out.layout(),
dense_out.lod(),
dense_out.offset());
argument_outputs.push_back(out_dense_tensor_type);
argument.AddOutputs(argument_outputs.begin(), argument_outputs.end());
}

void AddNWithKernelOp::Verify() {
VLOG(4) << "Start Verifying inputs, outputs and attributes for: "
"AddNWithKernelOp.";
VLOG(4) << "Verifying inputs:";
{
auto input_size = num_operands();
PADDLE_ENFORCE_EQ(
input_size,
1u,
phi::errors::PreconditionNotMet(
"The size %d of inputs must be equal to 1.", input_size));
if (auto vec_type =
(*this)->operand_source(0).type().dyn_cast<ir::VectorType>()) {
for (size_t i = 0; i < vec_type.size(); ++i) {
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>() ||
vec_type[i].isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
} else {
PADDLE_ENFORCE((*this)->operand_source(0)
.type()
.isa<paddle::dialect::DenseTensorType>() ||
(*this)
->operand_source(0)
.type()
.isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
}
VLOG(4) << "Verifying attributes:";
{
// Attributes num is 0, not need to check attributes type.
}
VLOG(4) << "Verifying outputs:";
{
auto output_size = num_results();
PADDLE_ENFORCE_EQ(
output_size,
1u,
phi::errors::PreconditionNotMet(
"The size %d of outputs must be equal to 1.", output_size));
PADDLE_ENFORCE(
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>() ||
(*this)->result(0).type().isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th output."));
}
VLOG(4) << "End Verifying for: AddNWithKernelOp.";
}

void AddNWithKernelOp::InferMeta(phi::InferMetaContext *infer_meta) {
auto fn = PD_INFER_META(phi::AddNInferMeta);
fn(infer_meta);
}

const char *SplitGradOp::attributes_name[1] = {"axis"};

OpInfoTuple SplitGradOp::GetOpInfo() {
Expand Down Expand Up @@ -364,3 +626,5 @@ void SplitGradOp::InferMeta(phi::InferMetaContext *infer_meta) {

IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddN_Op)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddNWithKernelOp)
44 changes: 44 additions & 0 deletions paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ paddle::dialect::AddNOp, paddle::dialect::SplitGradOp
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h"
#include "paddle/ir/core/builder.h"
Expand Down Expand Up @@ -51,6 +52,47 @@ class AddNOp : public ir::Op<AddNOp, OpYamlInfoInterface, InferMetaInterface> {
static void InferMeta(phi::InferMetaContext *infer_meta);
};

class AddN_Op : public ir::Op<AddN_Op,
paddle::dialect::OpYamlInfoInterface,
paddle::dialect::InferMetaInterface,
paddle::dialect::InplaceTrait> {
public:
using Op::Op;
static const char *name() { return "pd.add_n_"; }
static constexpr const char **attributes_name = nullptr;
static constexpr uint32_t attributes_num = 0;
static OpInfoTuple GetOpInfo();
static void Build(ir::Builder &builder, // NOLINT
ir::OperationArgument &argument, // NOLINT
ir::OpResult inputs_);

void Verify();
ir::Value inputs() { return operand_source(0); }
ir::OpResult out() { return result(0); }

static void InferMeta(phi::InferMetaContext *infer_meta);
};

class AddNWithKernelOp : public ir::Op<AddNWithKernelOp,
paddle::dialect::OpYamlInfoInterface,
paddle::dialect::InferMetaInterface> {
public:
using Op::Op;
static const char *name() { return "pd.add_n_with_kernel"; }
static constexpr const char **attributes_name = nullptr;
static constexpr uint32_t attributes_num = 0;
static OpInfoTuple GetOpInfo();
static void Build(ir::Builder &builder, // NOLINT
ir::OperationArgument &argument, // NOLINT
ir::OpResult inputs_);

void Verify();
ir::Value inputs() { return operand_source(0); }
ir::OpResult out() { return result(0); }

static void InferMeta(phi::InferMetaContext *infer_meta);
};

class SplitGradOp : public ir::Op<SplitGradOp, OpYamlInfoInterface> {
public:
using Op::Op;
Expand Down Expand Up @@ -79,5 +121,7 @@ class SplitGradOp : public ir::Op<SplitGradOp, OpYamlInfoInterface> {

IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddN_Op)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNWithKernelOp)

#endif
Loading

0 comments on commit 0ca6f53

Please sign in to comment.