Skip to content

Commit

Permalink
Fix typos(oprations, etc) (#62043)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored Feb 26, 2024
1 parent 67bd48f commit 5998080
Show file tree
Hide file tree
Showing 9 changed files with 15 additions and 15 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fusion_group/operation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ void OperationMap::InsertUnaryElementwiseOperations() {
}

void OperationMap::InsertBinaryElementwiseOperations() {
// For binary elementwise oprations:
// For binary elementwise operations:
// ${0} - x
// ${1} - y
// ${2} - out
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fusion_group/operation.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ struct Operation {
return false;
}
if (IsGradOp() && exprs.size() != static_cast<size_t>(num_operands)) {
// When it is a backward opertion, it should hold a expression for each
// When it is a backward operation, it should hold a expression for each
// operand.
return false;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fusion_group/subgraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ class SubGraph {
!IsInputOfExternalOp(n)) {
// When the outputs size is 0, it is also considered a intermidiate
// output. It maybe an unused output or the fetching vars, so that we
// cannot eleiminate it directly here.
// cannot eliminate it directly here.
intermediate_out_vars.push_back(n);
}
}
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/framework/ir/ipu/optimizer_extract_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,8 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
// bool is_optimizer = is_optimizer_op(op_namescope);
bool is_regularization = is_regularization_op(op_namescope);

VLOG(10) << "found optimizer releated op: " << op_type;
// initial larning_rate will be set in ipu_backend
VLOG(10) << "found optimizer related op: " << op_type;
// initial learning_rate will be set in ipu_backend
set_ops.insert(op_type);
if (op_type == "sgd") {
auto type = std::string{"sgd"};
Expand Down Expand Up @@ -267,10 +267,10 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
auto value = PADDLE_GET_CONST(float, op->GetAttr("value"));
new_op.SetAttr("clip_norm", value);
} else if (ignored_ops.count(op_type)) {
VLOG(10) << "Ignore optimizer releated op: " << op_type;
VLOG(10) << "Ignore optimizer related op: " << op_type;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Unknown optimizer releated op_type: %s", op_type));
"Unknown optimizer related op_type: %s", op_type));
}
} else if (op_role == OpRole::kLoss) {
VLOG(10) << "found loss op type: " << op->Type();
Expand Down Expand Up @@ -312,7 +312,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const {
new_op.SetAttr("weight_decay_mode", std::string{"l2_regularization"});
}
} else {
VLOG(10) << "No weight deacy setting found";
VLOG(10) << "No weight decay setting found";
}

// setup grad clip
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ void BufferSharedCrossOpMemoryReusePass::RunOnScopeIdx(size_t idx) const {
}
}

// After all output args have been transversed, we should check whether
// After all output args have been traversed, we should check whether
// there is new unlived var after `op` runs.
for (auto op_iter = var_to_ops.begin(); op_iter != var_to_ops.end();) {
// erase op from `var_to_ops` first
Expand Down Expand Up @@ -355,7 +355,7 @@ void BufferSharedCrossOpMemoryReusePass::BuildOpDependencyMap() const {
// BFS to fill `preceding_ops`
graph_view.BreadthFirstVisit([&](OpHandleBase *cur_op) {
// All preceding ops of cur_op should be:
// - preceding ops of cur_op, that is connected to cur_op directely
// - preceding ops of cur_op, that is connected to cur_op directly
// - all preceding ops of `direct preceding ops of cur_op`
auto &all_preceding_ops_of_cur_op = preceding_ops[cur_op];
for (auto &preceding_op : graph_view.PrecedingOps(cur_op)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ class MemOptVarInfo {
* scheduled in many threads inside ParallelExecutor, runtime_ref_cnt_
* must be an atomic integer to guarantee the thread safety and visibility.
*
* Speciallly, if ref_cnt_ is 1, we do not need to reset runtime_ref_cnt_
* Specially, if ref_cnt_ is 1, we do not need to reset runtime_ref_cnt_
* after iteration ends.
*/
size_t ref_cnt_;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ static void TakeVarInfoFromMainGraph(
}

// This pass will be applied on both the main graph and all cinn subgraphs,
// and it distinguishs them according to whether the graph has the
// and it distinguishes them according to whether the graph has the
// kMemOptVarInfoFromMainGraph attribute or not.
// On the main graph, it finds all cinn_launch ops and shares MemOptVarInfos
// to their subgraphs.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class WhileOpEagerDeletionPass : public ir::Pass {
}
}
if (graph->IsConstructedByPartialProgram()) {
VLOG(4) << "Is Paritial Program";
VLOG(4) << "Is Partial Program";
PADDLE_ENFORCE_LE(
target_ops.size(),
1,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1021,7 +1021,7 @@ TEST(CpuQuantizeSquashPass, fc_dequant_more_than_one_op_after_dequant) {
// b->Quantize1(Scale)->c
// c->Fc1
// c->Fc2
TEST(CpuQuantizeSquashPass, quatize_with_same_scale) {
TEST(CpuQuantizeSquashPass, quantize_with_same_scale) {
auto first_scale = 1.2345f;
auto second_scale = 1.2345f;
auto use_mkldnn = true;
Expand All @@ -1033,7 +1033,7 @@ TEST(CpuQuantizeSquashPass, quatize_with_same_scale) {
}

// if scales are not the same, do not fuse
TEST(CpuQuantizeSquashPass, quatize_with_different_scale) {
TEST(CpuQuantizeSquashPass, quantize_with_different_scale) {
auto first_scale = 1.2345f;
auto second_scale = 1.5432f;
auto use_mkldnn = true;
Expand Down

0 comments on commit 5998080

Please sign in to comment.