Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[NewIR]new ir support builtin slice op #55381

Merged
merged 2 commits into from
Jul 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -957,7 +957,7 @@ void BuildOpFuncList(

if (op_name == "builtin.combine" || op_name == "pd.feed" ||
op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter") {
op_name == "builtin.get_parameter" || op_name == "builtin.slice") {
VLOG(6) << "skip process " << op_name;
continue;
}
Expand All @@ -977,6 +977,7 @@ void BuildOpFuncList(
phi::MetaTensor,
phi::MetaTensor,
paddle::small_vector<phi::MetaTensor, phi::kInputSmallVectorSize>,
paddle::small_vector<phi::MetaTensor, phi::kInputSmallVectorSize>,
false>((*it),
value_2_name_map,
scope,
Expand All @@ -1003,6 +1004,7 @@ void BuildOpFuncList(
const phi::TensorBase*,
phi::TensorBase*,
paddle::small_vector<const phi::TensorBase*>,
paddle::small_vector<phi::TensorBase*>,
true>((*it),
value_2_name_map,
scope,
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/tensor_ref_array.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@ namespace paddle {
namespace framework {

template <>
struct PhiVectorType<const phi::DenseTensor*> {
const char* type_name = "PhiTensorRefArray";
struct PhiVectorType<const framework::Variable*> {
const char* type_name = "VariableRefArray";
};

using TensorRefArray = PhiVector<const phi::DenseTensor*>;
using VariableRefArray = PhiVector<const framework::Variable*>;

} // namespace framework
} // namespace paddle
2 changes: 1 addition & 1 deletion paddle/fluid/framework/type_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,6 @@ template class TypeInfoTraits<phi::TensorBase, paddle::framework::FeedList>;
template class TypeInfoTraits<phi::TensorBase, egr::VariableCompatTensor>;
template class TypeInfoTraits<phi::TensorBase, paddle::prim::DescTensor>;
template class TypeInfoTraits<phi::TensorBase,
paddle::framework::TensorRefArray>;
paddle::framework::VariableRefArray>;

} // namespace phi
2 changes: 1 addition & 1 deletion paddle/fluid/framework/var_type_traits.h
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ using VarTypeRegistry = detail::VarTypeRegistryImpl<
std::vector<float>,
std::vector<std::string>,
RawTensor,
TensorRefArray>;
VariableRefArray>;
template <typename T>
struct VarTypeTrait {
static_assert(VarTypeRegistry::IsRegistered<T>(), "Must be registered type");
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_adaptor.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ class PhiKernelAdaptor {
phi::MetaTensor,
phi::MetaTensor,
paddle::small_vector<phi::MetaTensor, phi::kInputSmallVectorSize>,
paddle::small_vector<phi::MetaTensor, phi::kInputSmallVectorSize>,
false>((*it), name_map, scope_, nullptr, op_yaml_info_parser, &ctx);

infer_meta_impl->infer_meta_(&ctx);
Expand All @@ -106,6 +107,7 @@ class PhiKernelAdaptor {
const phi::TensorBase*,
phi::TensorBase*,
paddle::small_vector<const phi::TensorBase*>,
paddle::small_vector<phi::TensorBase*>,
true>(
(*it), name_map, scope_, nullptr, op_yaml_info_parser, &kernel_ctx);
kernel_fn(&kernel_ctx);
Expand Down
68 changes: 56 additions & 12 deletions paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@

namespace ir {

using VariableNameMap =
std::unordered_map<const paddle::framework::Variable*, std::string>;

paddle::framework::Variable* CreateVar(ir::Value value,
const std::string& name,
paddle::framework::Scope* scope,
Expand Down Expand Up @@ -89,6 +92,7 @@ void BuildValue(ir::Value value,
paddle::framework::Scope* scope,
paddle::framework::Scope* local_scope,
std::unordered_map<ir::Value, std::string>* name_map,
VariableNameMap* variable_name_map,
int& count) { // NOLINT
auto inner_local_scope = local_scope != nullptr ? local_scope : scope;
std::string name;
Expand All @@ -107,7 +111,7 @@ void BuildValue(ir::Value value,
} else if (value.type().isa<paddle::dialect::AllocatedSelectedRowsType>()) {
var->GetMutable<phi::SelectedRows>();
} else if (value.type().isa<ir::VectorType>()) {
auto tensor_array = var->GetMutable<paddle::framework::TensorRefArray>();
auto tensor_array = var->GetMutable<paddle::framework::VariableRefArray>();
for (size_t i = 0; i < value.type().dyn_cast<ir::VectorType>().size();
i++) {
PADDLE_ENFORCE(value.type()
Expand All @@ -118,7 +122,9 @@ void BuildValue(ir::Value value,
"DenseTensorType"));
std::string name_i = "inner_var_" + std::to_string(count++);
auto var_i = CreateVar(value, name_i, scope, inner_local_scope);
tensor_array->emplace_back(var_i->GetMutable<phi::DenseTensor>());
var_i->GetMutable<phi::DenseTensor>();
tensor_array->emplace_back(var_i);
variable_name_map->emplace(var_i, name_i);
}
} else {
PADDLE_THROW(phi::errors::PreconditionNotMet(
Expand All @@ -127,6 +133,7 @@ void BuildValue(ir::Value value,
}

void HandleForSpecialOp(ir::Operation* op,
const VariableNameMap& variable_name_map,
paddle::framework::Scope* scope,
paddle::framework::Scope* local_scope,
std::unordered_map<ir::Value, std::string>* name_map,
Expand Down Expand Up @@ -179,7 +186,7 @@ void HandleForSpecialOp(ir::Operation* op,
}

auto var = CreateVar(out_value, name, scope, local_scope);
auto tensor_array = var->GetMutable<paddle::framework::TensorRefArray>();
auto tensor_array = var->GetMutable<paddle::framework::VariableRefArray>();
// clear tensor array
tensor_array->clear();

Expand All @@ -191,8 +198,7 @@ void HandleForSpecialOp(ir::Operation* op,
true,
phi::errors::PreconditionNotMet("can not found input of combine op"));
tensor_array->emplace_back(
&(CreateVar(value, name_map->at(value), scope, local_scope)
->Get<phi::DenseTensor>()));
CreateVar(value, name_map->at(value), scope, local_scope));
}
}

Expand Down Expand Up @@ -222,6 +228,34 @@ void HandleForSpecialOp(ir::Operation* op,
auto out_ptr = op->result(0);
name_map->emplace(out_ptr, param_name);
}

if (op_name == "builtin.slice") {
VLOG(6) << "Handle for builtin.slice";
auto out_value = op->result(0);

auto in_value = op->operand(0);

PADDLE_ENFORCE_EQ(name_map->count(in_value),
true,
phi::errors::PreconditionNotMet(
"input of buildin slice not in name map"));

int index =
op->attributes().at("index").dyn_cast<ir::Int32Attribute>().data();
auto in_var = scope->FindVar(name_map->at(in_value));
auto variable_array = in_var->Get<paddle::framework::VariableRefArray>();

PADDLE_ENFORCE_EQ(
variable_name_map.count(variable_array[index]),
true,
phi::errors::PreconditionNotMet("[%d] the variable in build slice "
"input MUST in variable name map",
index));

std::string var_name = variable_name_map.at(variable_array[index]);

name_map->emplace(out_value, var_name);
}
}

void HandleForInplaceOp(ir::Operation* op,
Expand All @@ -241,7 +275,7 @@ void HandleForInplaceOp(ir::Operation* op,
paddle::dialect::OpYamlInfoParser yaml_parser(
op_info.GetInterfaceImpl<paddle::dialect::OpYamlInfoInterface>()
->get_op_info_());

VariableNameMap variable_name_map;
for (size_t i = 0; i < op->num_results(); ++i) {
ir::Value value = op->result(i);
std::string value_name = yaml_parser.OutputNames()[i];
Expand All @@ -254,7 +288,8 @@ void HandleForInplaceOp(ir::Operation* op,
<< " (var: " << var_name << ")";
name_map->emplace(value, var_name);
} else {
BuildValue(value, scope, local_scope, name_map, count);
BuildValue(
value, scope, local_scope, name_map, &variable_name_map, count);
}
}
}
Expand All @@ -272,8 +307,11 @@ void BuildScope(const ir::Block& block,
VLOG(6) << "Build: scope [" << scope << "] inner_local_scope ["
<< inner_local_scope << "]";

std::unordered_map<const paddle::framework::Variable*, std::string>
variable_name_map;

// int count = name_map->size();
int count = inner_local_scope->Size();
int count = name_map->size();
for (auto it = block.begin(); it != block.end(); ++it) {
ir::Operation* op = *it;

Expand All @@ -287,9 +325,10 @@ void BuildScope(const ir::Block& block,

if (op_name == "pd.feed" || op_name == "pd.fetch" ||
op_name == "builtin.combine" || op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter") {
VLOG(4) << "HandleForSpecialOp: " << op_name;
HandleForSpecialOp(op, scope, inner_local_scope, name_map, count);
op_name == "builtin.get_parameter" || op_name == "builtin.slice") {
VLOG(6) << "HandleForSpecialOp: " << op_name;
HandleForSpecialOp(
op, variable_name_map, scope, inner_local_scope, name_map, count);
continue;
}

Expand All @@ -305,7 +344,12 @@ void BuildScope(const ir::Block& block,
continue;
} else {
for (size_t i = 0; i < op->num_results(); ++i) {
BuildValue(op->result(i), scope, local_scope, name_map, count);
BuildValue(op->result(i),
scope,
local_scope,
name_map,
&variable_name_map,
count);
}
}
}
Expand Down
39 changes: 27 additions & 12 deletions paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,8 @@ void BuildScope(const ir::Block& block,
template <typename Context,
typename InType,
typename OutType,
typename ListType,
typename InListType,
typename OutListType,
bool is_kernel>
void BuildPhiContext(
ir::Operation* op,
Expand Down Expand Up @@ -121,11 +122,12 @@ void BuildPhiContext(
if (var->IsType<phi::DenseTensor>()) {
const phi::TensorBase* tensor_in = &(var->Get<phi::DenseTensor>());
ctx->EmplaceBackInput(InType(tensor_in));
} else if (var->IsType<paddle::framework::TensorRefArray>()) {
ListType inputs;
auto& tensor_array = var->Get<paddle::framework::TensorRefArray>();
for (size_t i = 0; i < tensor_array.size(); ++i) {
inputs.emplace_back(InType(tensor_array[i]));
} else if (var->IsType<paddle::framework::VariableRefArray>()) {
InListType inputs;
auto& variable_array = var->Get<paddle::framework::VariableRefArray>();
for (size_t i = 0; i < variable_array.size(); ++i) {
inputs.emplace_back(InType(const_cast<phi::DenseTensor*>(
&(variable_array[i]->Get<phi::DenseTensor>()))));
}
ctx->EmplaceBackInputs(inputs);
} else {
Expand Down Expand Up @@ -157,18 +159,21 @@ void BuildPhiContext(
VLOG(6) << "ctx->EmplaceBack mutable attr: " << t << "\t" << in_var_name;
if (tensor_attr_type == "paddle::dialect::IntArrayAttribute") {
if (ptr.type().isa<paddle::dialect::AllocatedDenseTensorType>()) {
phi::Attribute r1 = phi::TensorRef(
phi::Attribute attr = phi::TensorRef(
&(inner_scope->FindVar(in_var_name)->Get<phi::DenseTensor>()));
ctx->EmplaceBackAttr(r1);
ctx->EmplaceBackAttr(attr);
} else if (ptr.type().isa<ir::VectorType>()) {
auto& tensor_array = inner_scope->FindVar(in_var_name)
->Get<paddle::framework::TensorRefArray>();
->Get<paddle::framework::VariableRefArray>();
if (tensor_array.size() == 1) {
ctx->EmplaceBackAttr(phi::TensorRef(tensor_array[0]));
phi::Attribute attr =
phi::TensorRef(&(tensor_array[0]->Get<phi::DenseTensor>()));
ctx->EmplaceBackAttr(attr);
} else {
std::vector<phi::TensorRef> vec_ref;
for (size_t i = 0; i < tensor_array.size(); ++i) {
vec_ref.emplace_back(phi::TensorRef(tensor_array[i]));
vec_ref.emplace_back(
phi::TensorRef(&(tensor_array[i]->Get<phi::DenseTensor>())));
}
ctx->EmplaceBackAttr(vec_ref);
}
Expand Down Expand Up @@ -328,8 +333,18 @@ void BuildPhiContext(
} else if (out_type.isa<paddle::dialect::AllocatedSelectedRowsType>()) {
ctx->EmplaceBackOutput(OutType(const_cast<phi::SelectedRows*>(
&(scope->Var(name)->Get<phi::SelectedRows>()))));
} else if (out_type.isa<ir::VectorType>()) {
OutListType outputs;
auto& variable_array =
scope->Var(name)->Get<paddle::framework::VariableRefArray>();
for (size_t i = 0; i < variable_array.size(); ++i) {
outputs.emplace_back(OutType(const_cast<phi::DenseTensor*>(
&(variable_array[i]->Get<phi::DenseTensor>()))));
}
ctx->EmplaceBackOutputs(outputs);
} else {
PADDLE_THROW("not support type");
PADDLE_THROW(
phi::errors::Unimplemented("only support DenseTensor and vector "));
}

if (output_map != nullptr) {
Expand Down
Loading