Skip to content

Commit

Permalink
Replace framework::LoDTensorArray part2 [fluid_ops] (#66913)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored Aug 5, 2024
1 parent 5914bd9 commit 7174d1b
Show file tree
Hide file tree
Showing 34 changed files with 164 additions and 166 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/framework/attribute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ paddle::any GetAttrValue(const Attribute& attr) {
default:
PADDLE_THROW(common::errors::Unimplemented(
"Unsupported Attribute value type `%s` for phi.",
platform::demangle(attr.type().name())));
common::demangle(attr.type().name())));
}
}

Expand Down
18 changes: 9 additions & 9 deletions paddle/fluid/framework/attribute.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ struct ExtractAttribute {
PADDLE_THROW(common::errors::InvalidArgument(
"Cannot get attribute (%s) by type %s, its type is %s.",
attr_name_,
paddle::platform::demangle(typeid(T).name()),
paddle::platform::demangle(attr.type().name())));
common::demangle(typeid(T).name()),
common::demangle(attr.type().name())));
}
return attr_value;
}
Expand Down Expand Up @@ -88,7 +88,7 @@ struct ExtractAttribute<bool> {
PADDLE_THROW(common::errors::InvalidArgument(
"Cannot get attribute (%s) by type bool, its type is %s.",
attr_name_,
paddle::platform::demangle(attr.type().name())));
common::demangle(attr.type().name())));
}
return attr_value;
}
Expand Down Expand Up @@ -116,7 +116,7 @@ struct ExtractAttribute<int64_t> {
PADDLE_THROW(common::errors::InvalidArgument(
"Cannot get attribute (%s) by type int64_t, its type is %s.",
attr_name_,
paddle::platform::demangle(attr.type().name())));
common::demangle(attr.type().name())));
}
return attr_value;
}
Expand Down Expand Up @@ -147,7 +147,7 @@ struct ExtractAttribute<std::vector<int64_t>> {
"Cannot get attribute (%s) by type std::vector<int64_t>, its type is "
"%s.",
attr_name_,
paddle::platform::demangle(attr.type().name())));
common::demangle(attr.type().name())));
}
return attr_value;
}
Expand Down Expand Up @@ -175,7 +175,7 @@ struct ExtractAttribute<float> {
PADDLE_THROW(common::errors::InvalidArgument(
"Cannot get attribute (%s) by type float, its type is %s.",
attr_name_,
paddle::platform::demangle(attr.type().name())));
common::demangle(attr.type().name())));
}
return attr_value;
}
Expand Down Expand Up @@ -206,7 +206,7 @@ struct ExtractAttribute<double> {
PADDLE_THROW(common::errors::InvalidArgument(
"Cannot get attribute (%s) by type double, its type is %s.",
attr_name_,
paddle::platform::demangle(attr.type().name())));
common::demangle(attr.type().name())));
}
return attr_value;
}
Expand Down Expand Up @@ -237,7 +237,7 @@ struct ExtractAttribute<std::vector<double>> {
"Cannot get attribute (%s) by type std::vector<double>, its type is "
"%s.",
attr_name_,
paddle::platform::demangle(attr.type().name())));
common::demangle(attr.type().name())));
}
return attr_value;
}
Expand All @@ -259,7 +259,7 @@ struct ExtractAttribute<paddle::experimental::Scalar> {
"Cannot get attribute (%s) by type Scalar, its type is %s, index is "
"%d",
attr_name_,
paddle::platform::demangle(attr.type().name()),
common::demangle(attr.type().name()),
attr.index()));
}
return attr_value;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_type.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ proto::VarType::Type ToDataType(std::type_index type) {
return it->second;
}
PADDLE_THROW(common::errors::Unimplemented(
"Not support %s as tensor data type.", platform::demangle(type.name())));
"Not support %s as tensor data type.", common::demangle(type.name())));
}

std::type_index ToTypeIndex(proto::VarType::Type type) {
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/details/eager_deletion_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ void EagerDeletionOpHandle::RunImpl() {
CallOnce();
}

platform::RecordEvent record_event(
phi::RecordEvent record_event(
Name(), platform::TracerEventType::UserDefined, 2);
std::deque<std::shared_ptr<memory::Allocation>> garbages;
for (size_t i = 0; i < var_infos_.size(); ++i) {
Expand All @@ -154,8 +154,8 @@ void EagerDeletionOpHandle::RunImpl() {
garbages.emplace_back(var->GetMutable<phi::SelectedRows>()
->mutable_value()
->MoveMemoryHolder());
} else if (var->IsType<LoDTensorArray>()) {
auto *tensor_arr = var->GetMutable<LoDTensorArray>();
} else if (var->IsType<phi::TensorArray>()) {
auto *tensor_arr = var->GetMutable<phi::TensorArray>();
for (auto &t : *tensor_arr) {
garbages.emplace_back(t.MoveMemoryHolder());
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/dlpack_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ static ::DLDataType GetDLDataTypeCode() {
PADDLE_THROW(common::errors::Unavailable(
"Unsupported data type (%s), only supports float16, float, unsigned "
"int and int.",
platform::demangle(typeid(T).name())));
common::demangle(typeid(T).name())));
}
dtype.bits = 8 * sizeof(T);
dtype.lanes = 1;
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/framework/executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ void Executor::Run(const ProgramDesc& pdesc,
bool force_disable_gc,
bool keep_kid_scopes) {
LOG_FIRST_N(INFO, 1) << "Old Executor is Running.";
platform::RecordEvent record_run(
phi::RecordEvent record_run(
"Executor::Run", platform::TracerEventType::UserDefined, 1);
platform::RecordBlock b(block_id);
if (FLAGS_use_mkldnn) EnableMKLDNN(pdesc);
Expand Down Expand Up @@ -327,7 +327,7 @@ void Executor::Run(const ProgramDesc& program,
bool create_vars,
const std::string& feed_holder_name,
const std::string& fetch_holder_name) {
platform::RecordEvent record_run(
phi::RecordEvent record_run(
"Executor::Run", platform::TracerEventType::UserDefined, 1);
platform::RecordBlock b(kProgramId);
if (FLAGS_use_mkldnn) EnableMKLDNN(program);
Expand Down Expand Up @@ -470,9 +470,9 @@ void Executor::RunPartialPreparedContext(ExecutorPrepareContext* ctx,
bool create_local_scope,
bool create_vars,
bool keep_kids) {
platform::RecordEvent record_run("Executor::RunPartialPreparedContext",
platform::TracerEventType::UserDefined,
1);
phi::RecordEvent record_run("Executor::RunPartialPreparedContext",
platform::TracerEventType::UserDefined,
1);
platform::RecordBlock b(kProgramId);
PADDLE_ENFORCE_NOT_NULL(
scope, common::errors::InvalidArgument("Scope shouldn't be null"));
Expand All @@ -494,7 +494,7 @@ void Executor::RunPartialPreparedContext(ExecutorPrepareContext* ctx,
auto& op = ctx->ops_[i];
op->Run(*local_scope, place_);
if (gc) {
platform::RecordEvent record(
phi::RecordEvent record(
"CheckGC", platform::TracerEventType::UserDefined, 10);
DeleteUnusedTensors(*local_scope, op.get(), ctx->unused_vars_, gc.get());
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/executor_gc_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,8 @@ void DeleteUnusedTensors(const Scope &scope,
garbages.emplace_back(var->GetMutable<phi::SelectedRows>()
->mutable_value()
->MoveMemoryHolder());
} else if (var->IsType<LoDTensorArray>()) {
auto *lod_tensor_arr = var->GetMutable<LoDTensorArray>();
} else if (var->IsType<phi::TensorArray>()) {
auto *lod_tensor_arr = var->GetMutable<phi::TensorArray>();
for (auto &t : *lod_tensor_arr) {
garbages.emplace_back(t.MoveMemoryHolder());
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/feed_fetch_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ struct PhiVectorType<FeedType> {
using FeedList = paddle::framework::PhiVector<FeedType>;

using FetchType = paddle::variant<phi::DenseTensor,
LoDTensorArray,
phi::TensorArray,
framework::Vocab,
phi::SparseCooTensor>;
using FetchList = std::vector<FetchType>;
Expand All @@ -50,7 +50,7 @@ inline bool data_is_lod_tensor(const FetchType &data) {
}

inline bool data_is_lod_tensor_array(const FetchType &data) {
if (data.type() == typeid(LoDTensorArray)) {
if (data.type() == typeid(phi::TensorArray)) {
return true;
}
return false;
Expand Down
30 changes: 15 additions & 15 deletions paddle/fluid/framework/infershape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ bool CompatMetaTensor::is_dense() const {
bool CompatMetaTensor::is_tensor_array() const {
if (is_runtime_) {
auto* var = PADDLE_GET_CONST(Variable*, var_);
return var->IsType<framework::LoDTensorArray>();
return var->IsType<phi::TensorArray>();
} else {
auto* var = PADDLE_GET_CONST(VarDesc*, var_);
return var->GetType() == proto::VarType::LOD_TENSOR_ARRAY;
Expand All @@ -215,9 +215,9 @@ DDim CompatMetaTensor::dims() const {
return var->Get<phi::SelectedRows>().GetCompleteDims();
} else if (var->IsType<phi::SparseCooTensor>()) {
return var->Get<phi::SparseCooTensor>().dims();
} else if (var->IsType<framework::LoDTensorArray>()) {
} else if (var->IsType<phi::TensorArray>()) {
// use tensor array size as dims
auto& tensor_array = var->Get<framework::LoDTensorArray>();
auto& tensor_array = var->Get<phi::TensorArray>();
return common::make_ddim({static_cast<int64_t>(tensor_array.size())});
} else {
PADDLE_THROW(common::errors::Unimplemented(
Expand All @@ -243,9 +243,9 @@ phi::DataType CompatMetaTensor::dtype() const {
return var->Get<phi::SelectedRows>().dtype();
} else if (var->IsType<phi::SparseCooTensor>()) {
return var->Get<phi::SparseCooTensor>().dtype();
} else if (var->IsType<framework::LoDTensorArray>()) {
} else if (var->IsType<phi::TensorArray>()) {
// NOTE(chenweihang): do nothing
// Unsupported get dtype from LoDTensorArray now
// Unsupported get dtype from phi::TensorArray now
return phi::DataType::UNDEFINED;
} else {
PADDLE_THROW(common::errors::Unimplemented(
Expand All @@ -267,9 +267,9 @@ DataLayout CompatMetaTensor::layout() const {
return var->Get<phi::SelectedRows>().layout();
} else if (var->IsType<phi::SparseCooTensor>()) {
return var->Get<phi::SparseCooTensor>().layout();
} else if (var->IsType<framework::LoDTensorArray>()) {
} else if (var->IsType<phi::TensorArray>()) {
// NOTE(chenweihang): do nothing
// Unsupported get layout from LoDTensorArray now
// Unsupported get layout from phi::TensorArray now
return phi::DataLayout::UNDEFINED;
} else {
PADDLE_THROW(common::errors::Unimplemented(
Expand Down Expand Up @@ -298,16 +298,16 @@ void CompatMetaTensor::set_dims(const DDim& dims) {
} else if (var->IsType<phi::SparseCooTensor>()) {
auto* tensor = var->GetMutable<phi::SparseCooTensor>();
phi::DenseTensorUtils::GetMutableMeta(tensor)->dims = dims;
} else if (var->IsType<framework::LoDTensorArray>()) {
auto* tensor_array = var->GetMutable<framework::LoDTensorArray>();
} else if (var->IsType<phi::TensorArray>()) {
auto* tensor_array = var->GetMutable<phi::TensorArray>();
// Note: Here I want enforce `tensor_array->size() == 0UL`, because
// inplace using on LoDTensorArray is dangerous, but the unittest
// inplace using on phi::TensorArray is dangerous, but the unittest
// `test_list` contains this behavior
PADDLE_ENFORCE_EQ(dims.size(),
1UL,
common::errors::InvalidArgument(
"LoDTensorArray can only have one dimension."));
// only set the array size for LoDTensorArray input
// only set the array size for phi::TensorArray input
tensor_array->resize(dims[0]);
} else {
PADDLE_THROW(common::errors::Unimplemented(
Expand Down Expand Up @@ -335,9 +335,9 @@ void CompatMetaTensor::set_dtype(phi::DataType dtype) {
} else if (var->IsType<phi::SparseCooTensor>()) {
auto* tensor = var->GetMutable<phi::SparseCooTensor>();
phi::DenseTensorUtils::GetMutableMeta(tensor)->dtype = dtype;
} else if (var->IsType<framework::LoDTensorArray>()) {
} else if (var->IsType<phi::TensorArray>()) {
// NOTE(chenweihang): do nothing
// Unsupported set dtype for LoDTensorArray now
// Unsupported set dtype for phi::TensorArray now
} else {
PADDLE_THROW(common::errors::Unimplemented(
"Currently, only can set dtype from DenseTensor or SelectedRows."));
Expand Down Expand Up @@ -366,9 +366,9 @@ void CompatMetaTensor::set_layout(DataLayout layout) {
} else if (var->IsType<phi::SparseCooTensor>()) {
auto* tensor = var->GetMutable<phi::SparseCooTensor>();
phi::DenseTensorUtils::GetMutableMeta(tensor)->layout = layout;
} else if (var->IsType<framework::LoDTensorArray>()) {
} else if (var->IsType<phi::TensorArray>()) {
// NOTE(chenweihang): do nothing
// Unsupported set dtype for LoDTensorArray now
// Unsupported set dtype for phi::TensorArray now
} else {
PADDLE_THROW(common::errors::Unimplemented(
"Currently, only can set layout from DenseTensor or "
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,8 @@ class Graph {
PADDLE_THROW(common::errors::InvalidArgument(
"Invalid attribute type of %s, expected: %s, received: %s.",
attr_name,
platform::demangle(typeid(AttrType *).name()), // NOLINT
platform::demangle(attrs_.at(attr_name).type().name())));
common::demangle(typeid(AttrType *).name()), // NOLINT
common::demangle(attrs_.at(attr_name).type().name())));
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,9 @@ static int64_t GetMemorySize(
}

// Split all variables in the graph into phi::DenseTensor and
// Non-phi::DenseTensor (e.g. SelectedRows, LoDTensorArray) Since partial GC is
// based on static analysis of memory size of each variable So we should skip
// SelectedRows and LoDTensorArray here
// Non-phi::DenseTensor (e.g. SelectedRows, phi::TensorArray) Since partial GC
// is based on static analysis of memory size of each variable So we should skip
// SelectedRows and phi::TensorArray here
static void SplitIntoLoDTensorAndNonLoDTensorVars(
const OpToVarNameSetMap &m,
const details::GraphVars &vars,
Expand Down Expand Up @@ -186,7 +186,7 @@ static OpToVarNameSetMap ShrinkGCVars(const OpToVarNameSetMap &m,
}

/**
* Step 4: Combine other vars (SelectedRows, LoDTensorArray)
* Step 4: Combine other vars (SelectedRows, phi::TensorArray)
*/
if (!delete_lod_tensor_only) {
for (auto &op_vars_pair : other_vars) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,8 @@ void InterpreterCoreEventGarbageCollector::Add(Variable* var,
->MoveMemoryHolder(),
event,
ctx);
} else if (var->IsType<LoDTensorArray>()) {
auto* tensor_arr = var->GetMutable<LoDTensorArray>();
} else if (var->IsType<phi::TensorArray>()) {
auto* tensor_arr = var->GetMutable<phi::TensorArray>();
for (auto& t : *tensor_arr) {
Add(t.MoveMemoryHolder(), event, ctx);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ void InterpreterCoreFastGarbageCollector::Add(Variable* var) {
->mutable_value()
->MoveMemoryHolder());
var->GetMutable<phi::SelectedRows>()->mutable_rows()->clear();
} else if (var->IsType<LoDTensorArray>()) {
auto* tensor_arr = var->GetMutable<LoDTensorArray>();
} else if (var->IsType<phi::TensorArray>()) {
auto* tensor_arr = var->GetMutable<phi::TensorArray>();
for (auto& t : *tensor_arr) {
Add(t.MoveMemoryHolder());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ void InterpreterCoreNoEventGarbageCollector::Add(
var->GetMutable<phi::SparseCsrTensor>()->mutable_cols()->clear();
var->GetMutable<phi::SparseCsrTensor>()->mutable_crows()->clear();
var->GetMutable<phi::SparseCsrTensor>()->mutable_values()->clear();
} else if (var->IsType<LoDTensorArray>()) {
auto* tensor_arr = var->GetMutable<LoDTensorArray>();
} else if (var->IsType<phi::TensorArray>()) {
auto* tensor_arr = var->GetMutable<phi::TensorArray>();
for (auto& t : *tensor_arr) {
Add(t.MoveMemoryHolder(), ctx);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ const phi::DeviceContext& InstructionBase::DeviceContext() const {
}

void InstructionBase::RecordEvent(const Place& place) const {
platform::RecordEvent record(
phi::RecordEvent record(
"RecordStreamEvent", platform::TracerEventType::UserDefined, 10);
if (event_to_record_) {
VLOG(6) << "Record event at instruction: " << id_;
Expand All @@ -233,7 +233,7 @@ void InstructionBase::WaitEvent(const Place& place) const {
return;
}
for (const EventInter& event_iter : events_to_wait_) {
platform::RecordEvent record(
phi::RecordEvent record(
"WaitStreamEvent", platform::TracerEventType::UserDefined, 10);
VLOG(6) << "Wait instruction: " << event_iter.instr_id_
<< " 's event with waiter_type: " << event_iter.waiter_type_;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,9 +181,9 @@ PhiKernelInstruction::~PhiKernelInstruction() { delete phi_kernel_; }
void PhiKernelInstruction::Run() {
VLOG(6) << "Begin run op " << phi_op_name_ << " infer meta.";
if (infer_meta_interface_) {
platform::RecordEvent record_event("PhiKernelInstruction::infermeta",
platform::TracerEventType::UserDefined,
1);
phi::RecordEvent record_event("PhiKernelInstruction::infermeta",
platform::TracerEventType::UserDefined,
1);
infer_meta_interface_->infer_meta_(&(infer_meta_context_));
}
VLOG(6) << "End run op " << phi_op_name_ << " infer meta.";
Expand All @@ -192,9 +192,9 @@ void PhiKernelInstruction::Run() {
}
VLOG(6) << "Begin run op " << phi_op_name_ << " kernel.";
{
platform::RecordEvent record_event("PhiKernelInstruction::kernel launch",
platform::TracerEventType::UserDefined,
1);
phi::RecordEvent record_event("PhiKernelInstruction::kernel launch",
platform::TracerEventType::UserDefined,
1);
(*(phi_kernel_))(&(kernel_context_));
}

Expand Down
Loading

0 comments on commit 7174d1b

Please sign in to comment.