From 0b7cd676a605eab9142105000aad107ad041a9f2 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Tue, 29 Aug 2023 03:14:32 +0000 Subject: [PATCH 01/10] add_n and combine support selected rows --- .../ir/phi_kernel_adaptor/phi_kernel_util.h | 28 +++- .../ir/transforms/pd_op_to_kernel_pass.cc | 151 ++++++++++++++---- 2 files changed, 143 insertions(+), 36 deletions(-) diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h index 1958a9444bcb9..b1916d5418f77 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h +++ b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h @@ -118,8 +118,18 @@ void BuildPhiContext(ir::Operation* op, InListType inputs; auto& variable_array = var->Get(); for (size_t i = 0; i < variable_array.size(); ++i) { - inputs.emplace_back(InType(const_cast( - &(variable_array[i]->Get())))); + if (variable_array[i]->IsType()) { + inputs.emplace_back(InType(const_cast( + &(variable_array[i]->Get())))); + } else if (variable_array[i]->IsType()) { + inputs.emplace_back(InType(const_cast( + &(variable_array[i]->Get())))); + } else { + PADDLE_THROW(phi::errors::Unimplemented( + "Only support Vector and vector now, " + "not support vector<%d>.", + variable_array[i]->Type())); + } } ctx->EmplaceBackInputs(inputs); } else { @@ -315,8 +325,18 @@ void BuildPhiContext(ir::Operation* op, auto& variable_array = inner_scope->FindVar(name_map.at(out_ptr)) ->Get(); for (size_t i = 0; i < variable_array.size(); ++i) { - outputs.emplace_back(OutType(const_cast( - &(variable_array[i]->Get())))); + if (variable_array[i]->IsType()) { + outputs.emplace_back(OutType(const_cast( + &(variable_array[i]->Get())))); + } else if (variable_array[i]->IsType()) { + outputs.emplace_back(OutType(const_cast( + &(variable_array[i]->Get())))); + } else { + PADDLE_THROW(phi::errors::Unimplemented( + "Only support Vector and vector now, " + "not support vector<%d>.", + variable_array[i]->Type())); + } } ctx->EmplaceBackOutputs(outputs); } else { diff --git a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc index 4198098f2bd4f..f40d59b19d1ef 100644 --- a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc @@ -126,34 +126,67 @@ bool SkipFeedOp(ir::Operation* op, const std::set& feed_names) { op->attributes().at("name").dyn_cast().AsString()); } -std::vector GetFakeTensorList(ir::Value new_input_tmp) { - std::vector vec_res; +std::vector> GetFakeTensorList( + ir::Value new_input_tmp) { + std::vector> vec_res; auto input_type = new_input_tmp.type(); - std::vector types; - if (input_type.isa()) { - types.push_back(input_type.dyn_cast()); - } else if (input_type.isa()) { - auto vec_inner_types = input_type.dyn_cast().data(); - for (size_t i = 0; i < vec_inner_types.size(); ++i) { - types.push_back( - vec_inner_types[0].dyn_cast()); - } - } - for (auto& type : types) { - auto ptr = new phi::Allocation(nullptr, 0, type.place()); + auto build_fake_dense_tensor = + [](const dialect::AllocatedDenseTensorType& type) { + auto ptr = new phi::Allocation(nullptr, 0, type.place()); + + std::shared_ptr holder(ptr); + + auto dtype = TransToPhiDataType(type.dtype()); + + phi::DenseTensorMeta meta( + dtype, type.dims(), type.data_layout(), type.lod(), type.offset()); + + return std::make_shared(holder, meta); + }; + + auto build_fake_selected_rows = + [](const dialect::AllocatedSelectedRowsType& type) { + auto ptr = new phi::Allocation(nullptr, 0, type.place()); - std::shared_ptr holder(ptr); + std::shared_ptr holder(ptr); - auto dtype = TransToPhiDataType(type.dtype()); + auto dtype = TransToPhiDataType(type.dtype()); - phi::DenseTensorMeta meta( - dtype, type.dims(), type.data_layout(), type.lod(), type.offset()); + phi::DenseTensorMeta meta( + dtype, type.dims(), type.data_layout(), type.lod(), type.offset()); - phi::DenseTensor fake_tensor(holder, meta); + std::vector rows; + int64_t height = 0; + rows.clear(); - vec_res.push_back(fake_tensor); + auto sr = std::make_shared(rows, height); + + phi::DenseTensor dense_tensor(holder, meta); + *(sr->mutable_value()) = dense_tensor; + + return sr; + }; + + if (input_type.isa()) { + vec_res.push_back(build_fake_dense_tensor( + input_type.dyn_cast())); + } else if (input_type.isa()) { + vec_res.push_back(build_fake_selected_rows( + input_type.dyn_cast())); + } else if (input_type.isa()) { + auto vec_inner_types = input_type.dyn_cast().data(); + for (size_t i = 0; i < vec_inner_types.size(); ++i) { + if (vec_inner_types[i].isa()) { + vec_res.push_back(build_fake_dense_tensor( + vec_inner_types[i].dyn_cast())); + } else if (vec_inner_types[i].isa()) { + vec_res.push_back(build_fake_selected_rows( + vec_inner_types[i].dyn_cast())); + } + } } + return vec_res; } @@ -443,7 +476,7 @@ phi::KernelKey GetKernelKey( auto fake_tensors = GetFakeTensorList(new_input_tmp); for (auto& fake_tensor : fake_tensors) { - kernel_key_parser.AssignKernelKeySet(fake_tensor); + kernel_key_parser.AssignKernelKeySet(*fake_tensor); } // Because we can't make sure the place when build data op @@ -540,6 +573,12 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, new_in.type() .dyn_cast() .place()); + } else if (new_in.type() + .isa()) { + out_places.push_back( + new_in.type() + .dyn_cast() + .place()); } else { PADDLE_THROW(phi::errors::Unimplemented( "only support dense tensor type for now")); @@ -566,6 +605,13 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, out_places[idx], base_type.dyn_cast()); vec_inner_types.push_back(allocated_dense_tensor_dtype); + } else if (base_type.isa()) { + auto allocated_dense_tensor_dtype = + paddle::dialect::AllocatedSelectedRowsType::get( + ctx, + out_places[idx], + base_type.dyn_cast()); + vec_inner_types.push_back(allocated_dense_tensor_dtype); } else { PADDLE_THROW(phi::errors::Unimplemented( "only support dense tensor in vector type for now")); @@ -587,7 +633,8 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, } else { PADDLE_THROW(phi::errors::Unimplemented( "builtin.combine Result type only support " - "VectorType")); + "VectorType and " + "VectorType")); } } } @@ -765,6 +812,14 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, if (op_info_parser != nullptr) { kernel_fn_str = op_info_parser->OpRuntimeInfo().kernel_func[0]; } + + if (op_item->name() == "pd.add_n_" || + op_item->name() == "pd.add_n_with_kernel") { + if (op_item->result(0).type().isa()) { + kernel_fn_str = "add_n_sr"; + } + } + auto kernel_key = GetKernelKey(op_item, place, map_value_pair, op_info_parser.get()); VLOG(6) << "kernel type " << kernel_key; @@ -939,9 +994,22 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, for (size_t j = 0; j < pre_define_op->num_operands(); ++j) { auto in_i = map_value_pair.at(pre_define_op->operand_source(j)); auto in_i_type = in_i.type(); - auto place = - in_i_type.dyn_cast() - .place(); + phi::Place place; + if (in_i_type.isa()) { + place = + in_i_type.dyn_cast() + .place(); + } else if (in_i_type + .isa()) { + place = + in_i_type.dyn_cast() + .place(); + } else { + PADDLE_THROW(phi::errors::Unimplemented( + "builtin.combine Input type only support " + "VectorType and " + "VectorType")); + } // get input args def type auto args_def = kernel.args_def(); @@ -959,12 +1027,30 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, // build memcopy op auto out_place = phi::TransToPhiPlace(kernel.InputAt(i).backend); - auto out_type = dialect::AllocatedDenseTensorType::get( - ctx, - out_place, - pre_define_op->operand_source(j) - .type() - .dyn_cast()); + + ir::Type out_type; + if (in_i_type.isa()) { + out_type = dialect::AllocatedDenseTensorType::get( + ctx, + out_place, + pre_define_op->operand_source(j) + .type() + .dyn_cast()); + } else if (in_i_type + .isa()) { + out_type = dialect::AllocatedSelectedRowsType::get( + ctx, + out_place, + pre_define_op->operand_source(j) + .type() + .dyn_cast()); + } else { + PADDLE_THROW(phi::errors::Unimplemented( + "builtin.combine Input type only support " + "VectorType and " + "VectorType")); + } + in_i = AddPlaceTransferOp(in_i, out_type, place, @@ -994,7 +1080,8 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, } } else if (new_in_type.isa()) { - // do nothing here + PADDLE_THROW(phi::errors::Unimplemented( + "only support allocated selected tensor type for now")); } else { PADDLE_THROW(phi::errors::Unimplemented( "only support allocated dense tensor type for now")); From b724ce97d23e822639f68d81402bb1f9477cc47b Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Tue, 29 Aug 2023 03:29:13 +0000 Subject: [PATCH 02/10] refine --- .../fluid/ir/dialect/op_generator/op_gen.py | 2 +- .../dialect/paddle_dialect/ir/pd_manual_op.cc | 268 +++++++++++++++++- .../dialect/paddle_dialect/ir/pd_manual_op.h | 41 +++ 3 files changed, 307 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/ir/dialect/op_generator/op_gen.py b/paddle/fluid/ir/dialect/op_generator/op_gen.py index fe2edb2b00ea5..a151ba9dbe107 100644 --- a/paddle/fluid/ir/dialect/op_generator/op_gen.py +++ b/paddle/fluid/ir/dialect/op_generator/op_gen.py @@ -172,7 +172,7 @@ class {op_name} : public ir::Op<{op_name}{interfaces}{traits}> {{ 'bool': 'ir::BoolAttribute', } -_NO_NEED_GEN_OPS = {'add_n', 'split_grad'} +_NO_NEED_GEN_OPS = {'add_n', 'add_n_', 'add_n_with_kernel', 'split_grad'} def to_phi_and_fluid_op_name(op_item): diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc index 64cb1d69b210a..0078919830cc9 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc +++ b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc @@ -57,13 +57,18 @@ void AddNOp::Verify() { "The size %d of inputs must be equal to 1.", input_size)); if (auto vec_type = (*this)->operand(0).type().dyn_cast()) { for (size_t i = 0; i < vec_type.size(); ++i) { - PADDLE_ENFORCE(vec_type[i].isa(), + PADDLE_ENFORCE(vec_type[i].isa() || + vec_type[i].isa(), phi::errors::PreconditionNotMet( "Type validation failed for the 0th input.")); } } else { PADDLE_ENFORCE( - (*this)->operand(0).type().isa(), + (*this)->operand(0).type().isa() || + (*this) + ->operand(0) + .type() + .isa(), phi::errors::PreconditionNotMet( "Type validation failed for the 0th input.")); } @@ -81,7 +86,8 @@ void AddNOp::Verify() { phi::errors::PreconditionNotMet( "The size %d of outputs must be equal to 1.", output_size)); PADDLE_ENFORCE( - (*this)->result(0).type().isa(), + (*this)->result(0).type().isa() || + (*this)->result(0).type().isa(), phi::errors::PreconditionNotMet( "Type validation failed for the 0th output.")); } @@ -146,6 +152,262 @@ void AddNOp::InferMeta(phi::InferMetaContext *infer_meta) { fn(infer_meta); } +OpInfoTuple AddN_Op::GetOpInfo() { + std::vector inputs = { + paddle::dialect::OpInputInfo( + "inputs", + "ir::VectorType", + false, + false, + false)}; + std::vector attributes = {}; + std::vector outputs = { + paddle::dialect::OpOutputInfo( + "out", "paddle::dialect::DenseTensorType", false, false)}; + paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo( + "AddNInferMeta", {"inputs"}, {"add_n"}, {"inputs"}, {}, {}, {}, {}); + return std::make_tuple(inputs, attributes, outputs, run_time_info, "add_n_"); +} + +void AddN_Op::Build(ir::Builder &builder, + ir::OperationArgument &argument, + ir::OpResult inputs_) { + VLOG(4) << "Builder construction inputs"; + std::vector argument_inputs = {inputs_}; + argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); + + VLOG(4) << "Builder construction attributes"; + + VLOG(4) << "Builder construction outputs"; + ir::VectorType inputs = inputs_.type().dyn_cast(); + (void)inputs; + std::vector vec_dense_inputs; + for (size_t i = 0; i < static_cast(inputs.size()); i++) { + vec_dense_inputs.push_back(phi::DenseTensor( + std::make_unique( + paddle::platform::CPUPlace()) + .get(), + phi::DenseTensorMeta( + paddle::dialect::TransToPhiDataType( + inputs[i].dyn_cast().dtype()), + inputs[i].dyn_cast().dims(), + inputs[i] + .dyn_cast() + .data_layout(), + inputs[i].dyn_cast().lod(), + inputs[i].dyn_cast().offset()))); + } + std::vector vec_meta_inputs; + for (size_t i = 0; i < vec_dense_inputs.size(); i++) { + vec_meta_inputs.push_back(phi::MetaTensor(&vec_dense_inputs[i])); + } + + std::vector meta_inputs; + for (size_t i = 0; i < static_cast(vec_meta_inputs.size()); i++) { + meta_inputs.push_back(&vec_meta_inputs[i]); + } + phi::DenseTensor dense_out; + phi::MetaTensor meta_out(&dense_out); + + phi::AddNInferMeta(meta_inputs, &meta_out); + + std::vector argument_outputs; + ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( + ir::IrContext::Instance(), + paddle::dialect::TransToIrDataType(dense_out.dtype()), + dense_out.dims(), + dense_out.layout(), + dense_out.lod(), + dense_out.offset()); + argument_outputs.push_back(out_dense_tensor_type); + argument.AddOutputs(argument_outputs.begin(), argument_outputs.end()); +} + +void AddN_Op::Verify() { + VLOG(4) << "Start Verifying inputs, outputs and attributes for: AddN_Op."; + VLOG(4) << "Verifying inputs:"; + { + auto input_size = num_operands(); + PADDLE_ENFORCE_EQ( + input_size, + 1u, + phi::errors::PreconditionNotMet( + "The size %d of inputs must be equal to 1.", input_size)); + if (auto vec_type = + (*this)->operand_source(0).type().dyn_cast()) { + for (size_t i = 0; i < vec_type.size(); ++i) { + PADDLE_ENFORCE(vec_type[i].isa() || + vec_type[i].isa(), + phi::errors::PreconditionNotMet( + "Type validation failed for the 0th input.")); + } + } else { + PADDLE_ENFORCE((*this)->operand_source(0) + .type() + .isa() || + (*this) + ->operand_source(0) + .type() + .isa(), + phi::errors::PreconditionNotMet( + "Type validation failed for the 0th input.")); + } + } + VLOG(4) << "Verifying attributes:"; + { + // Attributes num is 0, not need to check attributes type. + } + VLOG(4) << "Verifying outputs:"; + { + auto output_size = num_results(); + PADDLE_ENFORCE_EQ( + output_size, + 1u, + phi::errors::PreconditionNotMet( + "The size %d of outputs must be equal to 1.", output_size)); + PADDLE_ENFORCE( + (*this)->result(0).type().isa() || + (*this)->result(0).type().isa(), + phi::errors::PreconditionNotMet( + "Type validation failed for the 0th output.")); + } + VLOG(4) << "End Verifying for: AddN_Op."; +} + +void AddN_Op::InferMeta(phi::InferMetaContext *infer_meta) { + auto fn = PD_INFER_META(phi::AddNInferMeta); + fn(infer_meta); +} + +OpInfoTuple AddNWithKernelOp::GetOpInfo() { + std::vector inputs = { + paddle::dialect::OpInputInfo( + "inputs", + "ir::VectorType", + false, + false, + false)}; + std::vector attributes = {}; + std::vector outputs = { + paddle::dialect::OpOutputInfo( + "out", "paddle::dialect::DenseTensorType", false, false)}; + paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo( + "AddNInferMeta", {"inputs"}, {"add_n"}, {"inputs"}, {}, {}, {}, {}); + return std::make_tuple( + inputs, attributes, outputs, run_time_info, "add_n_with_kernel"); +} + +void AddNWithKernelOp::Build(ir::Builder &builder, + ir::OperationArgument &argument, + ir::OpResult inputs_) { + VLOG(4) << "Builder construction inputs"; + std::vector argument_inputs = {inputs_}; + argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); + + VLOG(4) << "Builder construction attributes"; + + VLOG(4) << "Builder construction outputs"; + ir::VectorType inputs = inputs_.type().dyn_cast(); + (void)inputs; + std::vector vec_dense_inputs; + for (size_t i = 0; i < static_cast(inputs.size()); i++) { + vec_dense_inputs.push_back(phi::DenseTensor( + std::make_unique( + paddle::platform::CPUPlace()) + .get(), + phi::DenseTensorMeta( + paddle::dialect::TransToPhiDataType( + inputs[i].dyn_cast().dtype()), + inputs[i].dyn_cast().dims(), + inputs[i] + .dyn_cast() + .data_layout(), + inputs[i].dyn_cast().lod(), + inputs[i].dyn_cast().offset()))); + } + std::vector vec_meta_inputs; + for (size_t i = 0; i < vec_dense_inputs.size(); i++) { + vec_meta_inputs.push_back(phi::MetaTensor(&vec_dense_inputs[i])); + } + + std::vector meta_inputs; + for (size_t i = 0; i < static_cast(vec_meta_inputs.size()); i++) { + meta_inputs.push_back(&vec_meta_inputs[i]); + } + phi::DenseTensor dense_out; + phi::MetaTensor meta_out(&dense_out); + + phi::AddNInferMeta(meta_inputs, &meta_out); + + std::vector argument_outputs; + ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( + ir::IrContext::Instance(), + paddle::dialect::TransToIrDataType(dense_out.dtype()), + dense_out.dims(), + dense_out.layout(), + dense_out.lod(), + dense_out.offset()); + argument_outputs.push_back(out_dense_tensor_type); + argument.AddOutputs(argument_outputs.begin(), argument_outputs.end()); +} + +void AddNWithKernelOp::Verify() { + VLOG(4) << "Start Verifying inputs, outputs and attributes for: " + "AddNWithKernelOp."; + VLOG(4) << "Verifying inputs:"; + { + auto input_size = num_operands(); + PADDLE_ENFORCE_EQ( + input_size, + 1u, + phi::errors::PreconditionNotMet( + "The size %d of inputs must be equal to 1.", input_size)); + if (auto vec_type = + (*this)->operand_source(0).type().dyn_cast()) { + for (size_t i = 0; i < vec_type.size(); ++i) { + PADDLE_ENFORCE(vec_type[i].isa() || + vec_type[i].isa(), + phi::errors::PreconditionNotMet( + "Type validation failed for the 0th input.")); + } + } else { + PADDLE_ENFORCE((*this)->operand_source(0) + .type() + .isa() || + (*this) + ->operand_source(0) + .type() + .isa(), + phi::errors::PreconditionNotMet( + "Type validation failed for the 0th input.")); + } + } + VLOG(4) << "Verifying attributes:"; + { + // Attributes num is 0, not need to check attributes type. + } + VLOG(4) << "Verifying outputs:"; + { + auto output_size = num_results(); + PADDLE_ENFORCE_EQ( + output_size, + 1u, + phi::errors::PreconditionNotMet( + "The size %d of outputs must be equal to 1.", output_size)); + PADDLE_ENFORCE( + (*this)->result(0).type().isa() || + (*this)->result(0).type().isa(), + phi::errors::PreconditionNotMet( + "Type validation failed for the 0th output.")); + } + VLOG(4) << "End Verifying for: AddNWithKernelOp."; +} + +void AddNWithKernelOp::InferMeta(phi::InferMetaContext *infer_meta) { + auto fn = PD_INFER_META(phi::AddNInferMeta); + fn(infer_meta); +} + const char *SplitGradOp::attributes_name[1] = {"axis"}; OpInfoTuple SplitGradOp::GetOpInfo() { diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h index fe9beb46012ed..f600cf91aa9d0 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h +++ b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h @@ -51,6 +51,47 @@ class AddNOp : public ir::Op { static void InferMeta(phi::InferMetaContext *infer_meta); }; +class AddN_Op : public ir::Op { + public: + using Op::Op; + static const char *name() { return "pd.add_n_"; } + static constexpr const char **attributes_name = nullptr; + static constexpr uint32_t attributes_num = 0; + static OpInfoTuple GetOpInfo(); + static void Build(ir::Builder &builder, // NOLINT + ir::OperationArgument &argument, // NOLINT + ir::OpResult inputs_); + + void Verify(); + ir::Value inputs() { return operand_source(0); } + ir::OpResult out() { return result(0); } + + static void InferMeta(phi::InferMetaContext *infer_meta); +}; + +class AddNWithKernelOp : public ir::Op { + public: + using Op::Op; + static const char *name() { return "pd.add_n_with_kernel"; } + static constexpr const char **attributes_name = nullptr; + static constexpr uint32_t attributes_num = 0; + static OpInfoTuple GetOpInfo(); + static void Build(ir::Builder &builder, // NOLINT + ir::OperationArgument &argument, // NOLINT + ir::OpResult inputs_); + + void Verify(); + ir::Value inputs() { return operand_source(0); } + ir::OpResult out() { return result(0); } + + static void InferMeta(phi::InferMetaContext *infer_meta); +}; + class SplitGradOp : public ir::Op { public: using Op::Op; From 0c95a128f164f35e16bcdf617450a15f964ce9c1 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Tue, 29 Aug 2023 03:31:13 +0000 Subject: [PATCH 03/10] refine --- paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h index f600cf91aa9d0..f3aa77d70d58e 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h +++ b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h @@ -24,6 +24,7 @@ paddle::dialect::AddNOp, paddle::dialect::SplitGradOp #include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" #include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" +#include "paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h" #include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h" #include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" #include "paddle/ir/core/builder.h" From 0fcae1104e0e5d46b5a3ab1483601a2728a0775c Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Tue, 29 Aug 2023 06:49:52 +0000 Subject: [PATCH 04/10] refine --- paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h | 2 ++ test/white_list/new_ir_op_test_white_list | 2 ++ 2 files changed, 4 insertions(+) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h index f3aa77d70d58e..dc4bea5295f52 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h +++ b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.h @@ -121,5 +121,7 @@ class SplitGradOp : public ir::Op { IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp) IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddN_Op) +IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNWithKernelOp) #endif diff --git a/test/white_list/new_ir_op_test_white_list b/test/white_list/new_ir_op_test_white_list index 57d6154bfc7d5..d8346ff61b999 100644 --- a/test/white_list/new_ir_op_test_white_list +++ b/test/white_list/new_ir_op_test_white_list @@ -195,3 +195,5 @@ test_where_op test_yolo_box_op test_yolov3_loss_op test_fill_constant_op +test_simnet +test_simnet_v2 From 2d9a90bcf42bca4c286f32bb452fcc50706edecf Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Tue, 29 Aug 2023 07:15:46 +0000 Subject: [PATCH 05/10] refine --- paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.cc | 5 ++++- paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc | 2 ++ paddle/fluid/ir_adaptor/translator/op_translator.cc | 4 ++-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.cc b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.cc index 19b8b133559b7..9d24dcd277884 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.cc +++ b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_dialect.cc @@ -48,7 +48,10 @@ void PaddleDialect::initialize() { #define GET_OP_LIST #include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" // NOLINT >(); - RegisterOps(); + RegisterOps(); RegisterInterfaces(); } diff --git a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc index 0078919830cc9..c191cc7dc492d 100644 --- a/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc +++ b/paddle/fluid/ir/dialect/paddle_dialect/ir/pd_manual_op.cc @@ -626,3 +626,5 @@ void SplitGradOp::InferMeta(phi::InferMetaContext *infer_meta) { IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp) +IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddN_Op) +IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddNWithKernelOp) diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index cad900ee7f6da..39dc36acb2a60 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -1112,8 +1112,8 @@ struct AddNOpTranscriber : public OpTranscriber { } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( - "Op assign_value should have corresponding OpInfo pd.assign_value_"); + IR_THROW("Op assign_value should have corresponding OpInfo %s", + target_op_name); } return op_info; From d2aa75661bc3e69fa771d171e438535a4c136b91 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Wed, 30 Aug 2023 10:59:12 +0000 Subject: [PATCH 06/10] refine --- paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc index 808cf8b4cc59b..832c8f315f326 100644 --- a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc @@ -178,12 +178,12 @@ std::vector> GetFakeTensorList( } else if (input_type.isa()) { auto vec_inner_types = input_type.dyn_cast().data(); for (size_t i = 0; i < vec_inner_types.size(); ++i) { - if (vec_inner_types[i].isa()) { + if (vec_inner_types[0].isa()) { vec_res.push_back(build_fake_dense_tensor( - vec_inner_types[i].dyn_cast())); - } else if (vec_inner_types[i].isa()) { + vec_inner_types[0].dyn_cast())); + } else if (vec_inner_types[0].isa()) { vec_res.push_back(build_fake_selected_rows( - vec_inner_types[i].dyn_cast())); + vec_inner_types[0].dyn_cast())); } } } From 7a041a322227a6373905f44807157bd9ddc43566 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Thu, 31 Aug 2023 06:16:55 +0000 Subject: [PATCH 07/10] merge --- .../ir/transforms/pd_op_to_kernel_pass.cc | 51 ------------------- 1 file changed, 51 deletions(-) diff --git a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc index 5a9225afbba13..1ec501eb4fb5c 100644 --- a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc @@ -638,57 +638,6 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, } // Copy op output type std::vector op_output_types; - if (op_item->num_results() > 0) { - for (size_t i = 0; i < op_item->num_results(); ++i) { - auto result_type = op_item->result(i).type(); - if (!result_type) { - op_output_types.push_back(result_type); - } else if (result_type.isa()) { - std::vector vec_inner_types; - auto base_types = result_type.dyn_cast().data(); - for (size_t idx = 0; idx < base_types.size(); idx++) { - auto& base_type = base_types[idx]; - if (base_type) { - if (base_type.isa()) { - auto allocated_dense_tensor_dtype = - paddle::dialect::AllocatedDenseTensorType::get( - ctx, - out_places[idx], - base_type.dyn_cast()); - vec_inner_types.push_back(allocated_dense_tensor_dtype); - } else if (base_type.isa()) { - auto allocated_dense_tensor_dtype = - paddle::dialect::AllocatedSelectedRowsType::get( - ctx, - out_places[idx], - base_type.dyn_cast()); - vec_inner_types.push_back(allocated_dense_tensor_dtype); - } else { - PADDLE_THROW(phi::errors::Unimplemented( - "only support dense tensor in vector type for now")); - } - } else { - // NOTE(phlrain), kernel not support a nullptr in output - ir::Type fp32_dtype = ir::Float32Type::get(ctx); - phi::DDim dims = {}; - phi::DataLayout data_layout = phi::DataLayout::NCHW; - phi::LoD lod = {{}}; - size_t offset = 0; - auto dense_tensor_dtype = paddle::dialect::DenseTensorType::get( - ctx, fp32_dtype, dims, data_layout, lod, offset); - vec_inner_types.push_back(dense_tensor_dtype); - } - } - ir::Type t1 = ir::VectorType::get(ctx, vec_inner_types); - op_output_types.push_back(t1); - } else { - PADDLE_THROW(phi::errors::Unimplemented( - "builtin.combine Result type only support " - "VectorType and " - "VectorType")); - } - } - } ir::Type t1 = ir::VectorType::get(ctx, vec_inner_types); op_output_types.push_back(t1); From d86b6f832cf413c24ddeab4cdf6076ed64eeabc5 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Fri, 1 Sep 2023 06:49:59 +0000 Subject: [PATCH 08/10] refine --- test/dygraph_to_static/test_simnet.py | 2 ++ test/dygraph_to_static/test_simnet_v2.py | 2 ++ test/white_list/new_ir_op_test_white_list | 2 -- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/dygraph_to_static/test_simnet.py b/test/dygraph_to_static/test_simnet.py index 2c69cf2072cf9..ba0460f122483 100644 --- a/test/dygraph_to_static/test_simnet.py +++ b/test/dygraph_to_static/test_simnet.py @@ -17,6 +17,7 @@ import unittest import numpy as np +from dygraph_to_static_util import test_and_compare_with_new_ir from simnet_dygraph_model import BOW, HingeLoss import paddle @@ -120,6 +121,7 @@ def __len__(self): ) +@test_and_compare_with_new_ir(True) def train(conf_dict, to_static): """ train process diff --git a/test/dygraph_to_static/test_simnet_v2.py b/test/dygraph_to_static/test_simnet_v2.py index a49cc23af11f8..10333e0695b98 100644 --- a/test/dygraph_to_static/test_simnet_v2.py +++ b/test/dygraph_to_static/test_simnet_v2.py @@ -17,6 +17,7 @@ import unittest import numpy as np +from dygraph_to_static_util import test_and_compare_with_new_ir from simnet_dygraph_model_v2 import BOW, HingeLoss import paddle @@ -119,6 +120,7 @@ def __len__(self): ) +@test_and_compare_with_new_ir(True) def train(conf_dict, to_static): """ train process diff --git a/test/white_list/new_ir_op_test_white_list b/test/white_list/new_ir_op_test_white_list index d8346ff61b999..57d6154bfc7d5 100644 --- a/test/white_list/new_ir_op_test_white_list +++ b/test/white_list/new_ir_op_test_white_list @@ -195,5 +195,3 @@ test_where_op test_yolo_box_op test_yolov3_loss_op test_fill_constant_op -test_simnet -test_simnet_v2 From bc2475e7d8a006b605848771c8e5666f98c08fcb Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Tue, 5 Sep 2023 01:39:04 +0000 Subject: [PATCH 09/10] refine --- test/dygraph_to_static/test_simnet.py | 2 +- test/dygraph_to_static/test_simnet_v2.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/dygraph_to_static/test_simnet.py b/test/dygraph_to_static/test_simnet.py index ba0460f122483..09ea063f9ad8e 100644 --- a/test/dygraph_to_static/test_simnet.py +++ b/test/dygraph_to_static/test_simnet.py @@ -121,7 +121,6 @@ def __len__(self): ) -@test_and_compare_with_new_ir(True) def train(conf_dict, to_static): """ train process @@ -178,6 +177,7 @@ def train(conf_dict, to_static): class TestSimnet(unittest.TestCase): + @test_and_compare_with_new_ir(True) def test_dygraph_static_same_loss(self): if fluid.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) diff --git a/test/dygraph_to_static/test_simnet_v2.py b/test/dygraph_to_static/test_simnet_v2.py index 10333e0695b98..316464ab79132 100644 --- a/test/dygraph_to_static/test_simnet_v2.py +++ b/test/dygraph_to_static/test_simnet_v2.py @@ -120,7 +120,6 @@ def __len__(self): ) -@test_and_compare_with_new_ir(True) def train(conf_dict, to_static): """ train process @@ -178,6 +177,7 @@ def train(conf_dict, to_static): class TestSimnet(unittest.TestCase): + @test_and_compare_with_new_ir(True) def test_dygraph_static_same_loss(self): if paddle.is_compiled_with_cuda(): paddle.fluid.set_flags({"FLAGS_cudnn_deterministic": True}) From dde908539bd802da38961d23317f564d98d508d9 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Tue, 5 Sep 2023 02:49:37 +0000 Subject: [PATCH 10/10] refine --- paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc index 0c9b6811ab671..d75c7cc4779ff 100644 --- a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc @@ -1062,8 +1062,7 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, } } else if (new_in_type.isa()) { - PADDLE_THROW(phi::errors::Unimplemented( - "only support allocated selected tensor type for now")); + // do nothing here } else { PADDLE_THROW(phi::errors::Unimplemented( "only support allocated dense tensor type for now"));