-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[NewIR] add_n and combine support selected rows #56754
Merged
wanghuancoder
merged 13 commits into
PaddlePaddle:develop
from
wanghuancoder:simnet_support_sr
Sep 6, 2023
Merged
Changes from all commits
Commits
Show all changes
13 commits
Select commit
Hold shift + click to select a range
0b7cd67
add_n and combine support selected rows
wanghuancoder b724ce9
refine
wanghuancoder 0c95a12
refine
wanghuancoder 0fcae11
refine
wanghuancoder 2d9a90b
refine
wanghuancoder 839a94e
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
wanghuancoder d2aa756
refine
wanghuancoder bfcd84f
merge
wanghuancoder 7a041a3
merge
wanghuancoder d86b6f8
refine
wanghuancoder 092ea4a
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
wanghuancoder bc2475e
refine
wanghuancoder dde9085
refine
wanghuancoder File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -57,13 +57,18 @@ void AddNOp::Verify() { | |
"The size %d of inputs must be equal to 1.", input_size)); | ||
if (auto vec_type = (*this)->operand(0).type().dyn_cast<ir::VectorType>()) { | ||
for (size_t i = 0; i < vec_type.size(); ++i) { | ||
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>(), | ||
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>() || | ||
vec_type[i].isa<paddle::dialect::SelectedRowsType>(), | ||
phi::errors::PreconditionNotMet( | ||
"Type validation failed for the 0th input.")); | ||
} | ||
} else { | ||
PADDLE_ENFORCE( | ||
(*this)->operand(0).type().isa<paddle::dialect::DenseTensorType>(), | ||
(*this)->operand(0).type().isa<paddle::dialect::DenseTensorType>() || | ||
(*this) | ||
->operand(0) | ||
.type() | ||
.isa<paddle::dialect::SelectedRowsType>(), | ||
phi::errors::PreconditionNotMet( | ||
"Type validation failed for the 0th input.")); | ||
} | ||
|
@@ -81,7 +86,8 @@ void AddNOp::Verify() { | |
phi::errors::PreconditionNotMet( | ||
"The size %d of outputs must be equal to 1.", output_size)); | ||
PADDLE_ENFORCE( | ||
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>(), | ||
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>() || | ||
(*this)->result(0).type().isa<paddle::dialect::SelectedRowsType>(), | ||
phi::errors::PreconditionNotMet( | ||
"Type validation failed for the 0th output.")); | ||
} | ||
|
@@ -146,6 +152,262 @@ void AddNOp::InferMeta(phi::InferMetaContext *infer_meta) { | |
fn(infer_meta); | ||
} | ||
|
||
OpInfoTuple AddN_Op::GetOpInfo() { | ||
std::vector<paddle::dialect::OpInputInfo> inputs = { | ||
paddle::dialect::OpInputInfo( | ||
"inputs", | ||
"ir::VectorType<paddle::dialect::DenseTensorType>", | ||
false, | ||
false, | ||
false)}; | ||
std::vector<paddle::dialect::OpAttributeInfo> attributes = {}; | ||
std::vector<paddle::dialect::OpOutputInfo> outputs = { | ||
paddle::dialect::OpOutputInfo( | ||
"out", "paddle::dialect::DenseTensorType", false, false)}; | ||
paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo( | ||
"AddNInferMeta", {"inputs"}, {"add_n"}, {"inputs"}, {}, {}, {}, {}); | ||
return std::make_tuple(inputs, attributes, outputs, run_time_info, "add_n_"); | ||
} | ||
|
||
void AddN_Op::Build(ir::Builder &builder, | ||
ir::OperationArgument &argument, | ||
ir::OpResult inputs_) { | ||
VLOG(4) << "Builder construction inputs"; | ||
std::vector<ir::OpResult> argument_inputs = {inputs_}; | ||
argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); | ||
|
||
VLOG(4) << "Builder construction attributes"; | ||
|
||
VLOG(4) << "Builder construction outputs"; | ||
ir::VectorType inputs = inputs_.type().dyn_cast<ir::VectorType>(); | ||
(void)inputs; | ||
std::vector<phi::DenseTensor> vec_dense_inputs; | ||
for (size_t i = 0; i < static_cast<size_t>(inputs.size()); i++) { | ||
vec_dense_inputs.push_back(phi::DenseTensor( | ||
std::make_unique<paddle::experimental::DefaultAllocator>( | ||
paddle::platform::CPUPlace()) | ||
.get(), | ||
phi::DenseTensorMeta( | ||
paddle::dialect::TransToPhiDataType( | ||
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()), | ||
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dims(), | ||
inputs[i] | ||
.dyn_cast<paddle::dialect::DenseTensorType>() | ||
.data_layout(), | ||
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().lod(), | ||
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().offset()))); | ||
} | ||
std::vector<phi::MetaTensor> vec_meta_inputs; | ||
for (size_t i = 0; i < vec_dense_inputs.size(); i++) { | ||
vec_meta_inputs.push_back(phi::MetaTensor(&vec_dense_inputs[i])); | ||
} | ||
|
||
std::vector<const phi::MetaTensor *> meta_inputs; | ||
for (size_t i = 0; i < static_cast<size_t>(vec_meta_inputs.size()); i++) { | ||
meta_inputs.push_back(&vec_meta_inputs[i]); | ||
} | ||
phi::DenseTensor dense_out; | ||
phi::MetaTensor meta_out(&dense_out); | ||
|
||
phi::AddNInferMeta(meta_inputs, &meta_out); | ||
|
||
std::vector<ir::Type> argument_outputs; | ||
ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( | ||
ir::IrContext::Instance(), | ||
paddle::dialect::TransToIrDataType(dense_out.dtype()), | ||
dense_out.dims(), | ||
dense_out.layout(), | ||
dense_out.lod(), | ||
dense_out.offset()); | ||
argument_outputs.push_back(out_dense_tensor_type); | ||
argument.AddOutputs(argument_outputs.begin(), argument_outputs.end()); | ||
} | ||
|
||
void AddN_Op::Verify() { | ||
VLOG(4) << "Start Verifying inputs, outputs and attributes for: AddN_Op."; | ||
VLOG(4) << "Verifying inputs:"; | ||
{ | ||
auto input_size = num_operands(); | ||
PADDLE_ENFORCE_EQ( | ||
input_size, | ||
1u, | ||
phi::errors::PreconditionNotMet( | ||
"The size %d of inputs must be equal to 1.", input_size)); | ||
if (auto vec_type = | ||
(*this)->operand_source(0).type().dyn_cast<ir::VectorType>()) { | ||
for (size_t i = 0; i < vec_type.size(); ++i) { | ||
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>() || | ||
vec_type[i].isa<paddle::dialect::SelectedRowsType>(), | ||
phi::errors::PreconditionNotMet( | ||
"Type validation failed for the 0th input.")); | ||
} | ||
} else { | ||
PADDLE_ENFORCE((*this)->operand_source(0) | ||
.type() | ||
.isa<paddle::dialect::DenseTensorType>() || | ||
(*this) | ||
->operand_source(0) | ||
.type() | ||
.isa<paddle::dialect::SelectedRowsType>(), | ||
phi::errors::PreconditionNotMet( | ||
"Type validation failed for the 0th input.")); | ||
} | ||
} | ||
VLOG(4) << "Verifying attributes:"; | ||
{ | ||
// Attributes num is 0, not need to check attributes type. | ||
} | ||
VLOG(4) << "Verifying outputs:"; | ||
{ | ||
auto output_size = num_results(); | ||
PADDLE_ENFORCE_EQ( | ||
output_size, | ||
1u, | ||
phi::errors::PreconditionNotMet( | ||
"The size %d of outputs must be equal to 1.", output_size)); | ||
PADDLE_ENFORCE( | ||
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>() || | ||
(*this)->result(0).type().isa<paddle::dialect::SelectedRowsType>(), | ||
phi::errors::PreconditionNotMet( | ||
"Type validation failed for the 0th output.")); | ||
} | ||
VLOG(4) << "End Verifying for: AddN_Op."; | ||
} | ||
|
||
void AddN_Op::InferMeta(phi::InferMetaContext *infer_meta) { | ||
auto fn = PD_INFER_META(phi::AddNInferMeta); | ||
fn(infer_meta); | ||
} | ||
|
||
OpInfoTuple AddNWithKernelOp::GetOpInfo() { | ||
std::vector<paddle::dialect::OpInputInfo> inputs = { | ||
paddle::dialect::OpInputInfo( | ||
"inputs", | ||
"ir::VectorType<paddle::dialect::DenseTensorType>", | ||
false, | ||
false, | ||
false)}; | ||
std::vector<paddle::dialect::OpAttributeInfo> attributes = {}; | ||
std::vector<paddle::dialect::OpOutputInfo> outputs = { | ||
paddle::dialect::OpOutputInfo( | ||
"out", "paddle::dialect::DenseTensorType", false, false)}; | ||
paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo( | ||
"AddNInferMeta", {"inputs"}, {"add_n"}, {"inputs"}, {}, {}, {}, {}); | ||
return std::make_tuple( | ||
inputs, attributes, outputs, run_time_info, "add_n_with_kernel"); | ||
} | ||
|
||
void AddNWithKernelOp::Build(ir::Builder &builder, | ||
ir::OperationArgument &argument, | ||
ir::OpResult inputs_) { | ||
VLOG(4) << "Builder construction inputs"; | ||
std::vector<ir::OpResult> argument_inputs = {inputs_}; | ||
argument.AddOperands(argument_inputs.begin(), argument_inputs.end()); | ||
|
||
VLOG(4) << "Builder construction attributes"; | ||
|
||
VLOG(4) << "Builder construction outputs"; | ||
ir::VectorType inputs = inputs_.type().dyn_cast<ir::VectorType>(); | ||
(void)inputs; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 同上 |
||
std::vector<phi::DenseTensor> vec_dense_inputs; | ||
for (size_t i = 0; i < static_cast<size_t>(inputs.size()); i++) { | ||
vec_dense_inputs.push_back(phi::DenseTensor( | ||
std::make_unique<paddle::experimental::DefaultAllocator>( | ||
paddle::platform::CPUPlace()) | ||
.get(), | ||
phi::DenseTensorMeta( | ||
paddle::dialect::TransToPhiDataType( | ||
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()), | ||
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dims(), | ||
inputs[i] | ||
.dyn_cast<paddle::dialect::DenseTensorType>() | ||
.data_layout(), | ||
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().lod(), | ||
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().offset()))); | ||
} | ||
std::vector<phi::MetaTensor> vec_meta_inputs; | ||
for (size_t i = 0; i < vec_dense_inputs.size(); i++) { | ||
vec_meta_inputs.push_back(phi::MetaTensor(&vec_dense_inputs[i])); | ||
} | ||
|
||
std::vector<const phi::MetaTensor *> meta_inputs; | ||
for (size_t i = 0; i < static_cast<size_t>(vec_meta_inputs.size()); i++) { | ||
meta_inputs.push_back(&vec_meta_inputs[i]); | ||
} | ||
phi::DenseTensor dense_out; | ||
phi::MetaTensor meta_out(&dense_out); | ||
|
||
phi::AddNInferMeta(meta_inputs, &meta_out); | ||
|
||
std::vector<ir::Type> argument_outputs; | ||
ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get( | ||
ir::IrContext::Instance(), | ||
paddle::dialect::TransToIrDataType(dense_out.dtype()), | ||
dense_out.dims(), | ||
dense_out.layout(), | ||
dense_out.lod(), | ||
dense_out.offset()); | ||
argument_outputs.push_back(out_dense_tensor_type); | ||
argument.AddOutputs(argument_outputs.begin(), argument_outputs.end()); | ||
} | ||
|
||
void AddNWithKernelOp::Verify() { | ||
VLOG(4) << "Start Verifying inputs, outputs and attributes for: " | ||
"AddNWithKernelOp."; | ||
VLOG(4) << "Verifying inputs:"; | ||
{ | ||
auto input_size = num_operands(); | ||
PADDLE_ENFORCE_EQ( | ||
input_size, | ||
1u, | ||
phi::errors::PreconditionNotMet( | ||
"The size %d of inputs must be equal to 1.", input_size)); | ||
if (auto vec_type = | ||
(*this)->operand_source(0).type().dyn_cast<ir::VectorType>()) { | ||
for (size_t i = 0; i < vec_type.size(); ++i) { | ||
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>() || | ||
vec_type[i].isa<paddle::dialect::SelectedRowsType>(), | ||
phi::errors::PreconditionNotMet( | ||
"Type validation failed for the 0th input.")); | ||
} | ||
} else { | ||
PADDLE_ENFORCE((*this)->operand_source(0) | ||
.type() | ||
.isa<paddle::dialect::DenseTensorType>() || | ||
(*this) | ||
->operand_source(0) | ||
.type() | ||
.isa<paddle::dialect::SelectedRowsType>(), | ||
phi::errors::PreconditionNotMet( | ||
"Type validation failed for the 0th input.")); | ||
} | ||
} | ||
VLOG(4) << "Verifying attributes:"; | ||
{ | ||
// Attributes num is 0, not need to check attributes type. | ||
} | ||
VLOG(4) << "Verifying outputs:"; | ||
{ | ||
auto output_size = num_results(); | ||
PADDLE_ENFORCE_EQ( | ||
output_size, | ||
1u, | ||
phi::errors::PreconditionNotMet( | ||
"The size %d of outputs must be equal to 1.", output_size)); | ||
PADDLE_ENFORCE( | ||
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>() || | ||
(*this)->result(0).type().isa<paddle::dialect::SelectedRowsType>(), | ||
phi::errors::PreconditionNotMet( | ||
"Type validation failed for the 0th output.")); | ||
} | ||
VLOG(4) << "End Verifying for: AddNWithKernelOp."; | ||
} | ||
|
||
void AddNWithKernelOp::InferMeta(phi::InferMetaContext *infer_meta) { | ||
auto fn = PD_INFER_META(phi::AddNInferMeta); | ||
fn(infer_meta); | ||
} | ||
|
||
const char *SplitGradOp::attributes_name[1] = {"axis"}; | ||
|
||
OpInfoTuple SplitGradOp::GetOpInfo() { | ||
|
@@ -364,3 +626,5 @@ void SplitGradOp::InferMeta(phi::InferMetaContext *infer_meta) { | |
|
||
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp) | ||
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp) | ||
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddN_Op) | ||
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddNWithKernelOp) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这一行应该是不需要的
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
好的我先合入,我再删掉这行