diff --git a/paddle/fluid/framework/ir/fusion_group/operation.cc b/paddle/fluid/framework/ir/fusion_group/operation.cc index 908aa6d62b6f7..75b0d8d631f8a 100644 --- a/paddle/fluid/framework/ir/fusion_group/operation.cc +++ b/paddle/fluid/framework/ir/fusion_group/operation.cc @@ -152,7 +152,7 @@ void OperationMap::InsertUnaryElementwiseOperations() { } void OperationMap::InsertBinaryElementwiseOperations() { - // For binary elementwise oprations: + // For binary elementwise operations: // ${0} - x // ${1} - y // ${2} - out diff --git a/paddle/fluid/framework/ir/fusion_group/operation.h b/paddle/fluid/framework/ir/fusion_group/operation.h index 3edf2f598525a..dd939cd3cbbf1 100644 --- a/paddle/fluid/framework/ir/fusion_group/operation.h +++ b/paddle/fluid/framework/ir/fusion_group/operation.h @@ -55,7 +55,7 @@ struct Operation { return false; } if (IsGradOp() && exprs.size() != static_cast(num_operands)) { - // When it is a backward opertion, it should hold a expression for each + // When it is a backward operation, it should hold a expression for each // operand. return false; } diff --git a/paddle/fluid/framework/ir/fusion_group/subgraph.h b/paddle/fluid/framework/ir/fusion_group/subgraph.h index 057fc7efffb30..97caa43249002 100644 --- a/paddle/fluid/framework/ir/fusion_group/subgraph.h +++ b/paddle/fluid/framework/ir/fusion_group/subgraph.h @@ -150,7 +150,7 @@ class SubGraph { !IsInputOfExternalOp(n)) { // When the outputs size is 0, it is also considered a intermidiate // output. It maybe an unused output or the fetching vars, so that we - // cannot eleiminate it directly here. + // cannot eliminate it directly here. intermediate_out_vars.push_back(n); } } diff --git a/paddle/fluid/framework/ir/ipu/optimizer_extract_pass.cc b/paddle/fluid/framework/ir/ipu/optimizer_extract_pass.cc index 284d144bf7534..d09519dfa5b04 100644 --- a/paddle/fluid/framework/ir/ipu/optimizer_extract_pass.cc +++ b/paddle/fluid/framework/ir/ipu/optimizer_extract_pass.cc @@ -100,8 +100,8 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { // bool is_optimizer = is_optimizer_op(op_namescope); bool is_regularization = is_regularization_op(op_namescope); - VLOG(10) << "found optimizer releated op: " << op_type; - // initial larning_rate will be set in ipu_backend + VLOG(10) << "found optimizer related op: " << op_type; + // initial learning_rate will be set in ipu_backend set_ops.insert(op_type); if (op_type == "sgd") { auto type = std::string{"sgd"}; @@ -267,10 +267,10 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { auto value = PADDLE_GET_CONST(float, op->GetAttr("value")); new_op.SetAttr("clip_norm", value); } else if (ignored_ops.count(op_type)) { - VLOG(10) << "Ignore optimizer releated op: " << op_type; + VLOG(10) << "Ignore optimizer related op: " << op_type; } else { PADDLE_THROW(platform::errors::InvalidArgument( - "Unknown optimizer releated op_type: %s", op_type)); + "Unknown optimizer related op_type: %s", op_type)); } } else if (op_role == OpRole::kLoss) { VLOG(10) << "found loss op type: " << op->Type(); @@ -312,7 +312,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { new_op.SetAttr("weight_decay_mode", std::string{"l2_regularization"}); } } else { - VLOG(10) << "No weight deacy setting found"; + VLOG(10) << "No weight decay setting found"; } // setup grad clip diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_cross_op_memory_reuse_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_cross_op_memory_reuse_pass.cc index 2656580228049..b41b76c99aff6 100644 --- a/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_cross_op_memory_reuse_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_cross_op_memory_reuse_pass.cc @@ -245,7 +245,7 @@ void BufferSharedCrossOpMemoryReusePass::RunOnScopeIdx(size_t idx) const { } } - // After all output args have been transversed, we should check whether + // After all output args have been traversed, we should check whether // there is new unlived var after `op` runs. for (auto op_iter = var_to_ops.begin(); op_iter != var_to_ops.end();) { // erase op from `var_to_ops` first @@ -355,7 +355,7 @@ void BufferSharedCrossOpMemoryReusePass::BuildOpDependencyMap() const { // BFS to fill `preceding_ops` graph_view.BreadthFirstVisit([&](OpHandleBase *cur_op) { // All preceding ops of cur_op should be: - // - preceding ops of cur_op, that is connected to cur_op directely + // - preceding ops of cur_op, that is connected to cur_op directly // - all preceding ops of `direct preceding ops of cur_op` auto &all_preceding_ops_of_cur_op = preceding_ops[cur_op]; for (auto &preceding_op : graph_view.PrecedingOps(cur_op)) { diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h index 2980fa4e34a81..38238d8c7c307 100644 --- a/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h +++ b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h @@ -89,7 +89,7 @@ class MemOptVarInfo { * scheduled in many threads inside ParallelExecutor, runtime_ref_cnt_ * must be an atomic integer to guarantee the thread safety and visibility. * - * Speciallly, if ref_cnt_ is 1, we do not need to reset runtime_ref_cnt_ + * Specially, if ref_cnt_ is 1, we do not need to reset runtime_ref_cnt_ * after iteration ends. */ size_t ref_cnt_; diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/share_varinfo_into_cinn_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/share_varinfo_into_cinn_pass.cc index 2bc3d839af549..d9ea00e3935cc 100644 --- a/paddle/fluid/framework/ir/memory_optimize_pass/share_varinfo_into_cinn_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/share_varinfo_into_cinn_pass.cc @@ -111,7 +111,7 @@ static void TakeVarInfoFromMainGraph( } // This pass will be applied on both the main graph and all cinn subgraphs, -// and it distinguishs them according to whether the graph has the +// and it distinguishes them according to whether the graph has the // kMemOptVarInfoFromMainGraph attribute or not. // On the main graph, it finds all cinn_launch ops and shares MemOptVarInfos // to their subgraphs. diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/while_op_eager_deletion_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/while_op_eager_deletion_pass.cc index 42f395da7c8a8..2d26587fdb24f 100644 --- a/paddle/fluid/framework/ir/memory_optimize_pass/while_op_eager_deletion_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/while_op_eager_deletion_pass.cc @@ -55,7 +55,7 @@ class WhileOpEagerDeletionPass : public ir::Pass { } } if (graph->IsConstructedByPartialProgram()) { - VLOG(4) << "Is Paritial Program"; + VLOG(4) << "Is Partial Program"; PADDLE_ENFORCE_LE( target_ops.size(), 1, diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc index 90ed3009749ad..d2c6d981c3a2e 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc @@ -1021,7 +1021,7 @@ TEST(CpuQuantizeSquashPass, fc_dequant_more_than_one_op_after_dequant) { // b->Quantize1(Scale)->c // c->Fc1 // c->Fc2 -TEST(CpuQuantizeSquashPass, quatize_with_same_scale) { +TEST(CpuQuantizeSquashPass, quantize_with_same_scale) { auto first_scale = 1.2345f; auto second_scale = 1.2345f; auto use_mkldnn = true; @@ -1033,7 +1033,7 @@ TEST(CpuQuantizeSquashPass, quatize_with_same_scale) { } // if scales are not the same, do not fuse -TEST(CpuQuantizeSquashPass, quatize_with_different_scale) { +TEST(CpuQuantizeSquashPass, quantize_with_different_scale) { auto first_scale = 1.2345f; auto second_scale = 1.5432f; auto use_mkldnn = true;