Skip to content

Commit

Permalink
[clang-tidy] NO.10 enable cppcoreguidelines-init-variables (PaddleP…
Browse files Browse the repository at this point in the history
…addle#57871)

* [clang-tidy] enable cppcoreguidelines-init-variables check

* fix
  • Loading branch information
gouzil authored and Frida-a committed Oct 14, 2023
1 parent b44442f commit 24fc214
Show file tree
Hide file tree
Showing 118 changed files with 278 additions and 275 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ cppcoreguidelines-avoid-c-arrays,
-cppcoreguidelines-avoid-goto,
cppcoreguidelines-c-copy-assignment-signature,
cppcoreguidelines-explicit-virtual-functions,
-cppcoreguidelines-init-variables,
cppcoreguidelines-init-variables,
cppcoreguidelines-narrowing-conversions,
cppcoreguidelines-no-malloc,
-cppcoreguidelines-pro-type-const-cast,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ std::unordered_map<std::string, int64_t> ShardingMergeForTensors(
const bool merge_conflicts) {
std::unordered_map<std::string, int64_t> axis_to_dim_map;
std::unordered_map<int64_t, std::string> dim_to_axis_map;
int64_t merge_dim;
int64_t merge_dim = 0;

for (auto& pair : tensor_axes_to_dim_pairs) {
for (size_t i = 0; i < pair.second.size(); ++i) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/fleet_executor/dist_model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ bool LoadDataFromDistModelTensor(const DistModelTensor &input_data,
const platform::Place &place) {
VLOG(3) << "Loading data from DistModelTensor for " << input_data.name;
framework::DDim dims = phi::make_ddim(input_data.shape);
void *input_tensor_ptr;
void *input_tensor_ptr = nullptr;
if (input_data.dtype == DistModelDataType::INT64) {
input_tensor_ptr = input_tensor->mutable_data<int64_t>(dims, place);
} else if (input_data.dtype == DistModelDataType::FLOAT32) {
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/data_feed.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2448,9 +2448,9 @@ bool SlotRecordInMemoryDataFeed::ParseOneInstance(const std::string& line,
}
// parse_logkey
std::string log_key = std::string(str + pos, len);
uint64_t search_id;
uint32_t cmatch;
uint32_t rank;
uint64_t search_id = 0;
uint32_t cmatch = 0;
uint32_t rank = 0;
parser_log_key(log_key, &search_id, &cmatch, &rank);

rec->ins_id_ = log_key;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/gather_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ void GatherOpHandle::RunImpl() {
in_var_handles.size(),
places_.size()));

VarHandle *out_var_handle;
VarHandle *out_var_handle = nullptr;
{
auto out_var_handles = DynamicCast<VarHandle>(this->Outputs());
PADDLE_ENFORCE_EQ(
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/details/multi_devices_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ static bool IsDataParallelInferenceGraphImpl(
}

bool IsDataParallelInferenceGraph(const ir::Graph &graph) {
size_t place_num;
size_t place_num = 0;
std::unordered_map<details::OpHandleBase *, size_t> op_to_dev_idx;
return IsDataParallelInferenceGraphImpl(graph, &op_to_dev_idx, &place_num);
}
Expand All @@ -196,7 +196,7 @@ bool IsDataParallelInferenceGraph(const ir::Graph &graph) {
*/
std::vector<std::unique_ptr<ir::Graph>> TrySeparateToMultipleSingleDeviceGraphs(
ir::Graph *graph) {
size_t place_num;
size_t place_num = 0;
std::unordered_map<details::OpHandleBase *, size_t> op_to_dev_idx;
if (!IsDataParallelInferenceGraphImpl(*graph, &op_to_dev_idx, &place_num)) {
return {};
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/reduce_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ void ReduceOpHandle::RunImpl() {
in_var_handles.size(),
places_.size()));

VarHandle *out_var_handle;
VarHandle *out_var_handle = nullptr;
{
auto out_var_handles = DynamicCast<VarHandle>(outputs_);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ inline FetchResultType ThreadedSSAGraphExecutor::RunImpl(
run_all_ops(ready_ops);

// 2. Find ready variable
bool timeout;
bool timeout = false;
auto cur_ready_vars = ready_vars->PopAll(1, &timeout);
if (timeout) {
for (auto &run_op_future : run_op_futures_) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/downpour_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,7 @@ void DownpourWorker::TrainFilesWithProfiler() {
double push_sparse_time = 0.0;
double push_dense_time = 0.0;
double copy_table_time = 0.0;
int cur_batch;
int cur_batch = 0;
int batch_cnt = 0;
uint64_t total_inst = 0;
timeline.Start();
Expand Down Expand Up @@ -804,7 +804,7 @@ void DownpourWorker::TrainFiles() {
platform::SetNumThreads(1);
device_reader_->Start();
int batch_cnt = 0;
int cur_batch;
int cur_batch = 0;
while ((cur_batch = device_reader_->Next()) > 0) {
if (copy_table_config_.need_copy()) {
if (batch_cnt % copy_table_config_.batch_num() == 0) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/downpour_worker_opt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ void DownpourWorkerOpt::TrainFiles() {
platform::SetNumThreads(1);
device_reader_->Start();
int batch_cnt = 0;
int cur_batch;
int cur_batch = 0;
std::future<int32_t> pull_async_status;
std::string async_wait_name = "";
for (int i = 0; i < param_.program_config(0).pull_sparse_table_id_size();
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/hogwild_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ void HogwildWorker::TrainFilesWithProfiler() {
platform::Timer timeline;
double total_time = 0.0;
double read_time = 0.0;
int cur_batch;
int cur_batch = 0;
int batch_cnt = 0;
if (thread_id_ == 0) {
quit_flag_.store(false);
Expand Down Expand Up @@ -372,7 +372,7 @@ void HogwildWorker::TrainFiles() {
int total_batch_num = 0;
// how to accumulate fetched values here
device_reader_->Start();
int cur_batch;
int cur_batch = 0;
int batch_cnt = 0;
if (thread_id_ == 0) {
quit_flag_.store(false);
Expand Down Expand Up @@ -471,7 +471,7 @@ void HogwildWorker::PrintFetchVars() {
}

if (thread_id_ == 0 && batch_num_ % batch_per_print == 0) {
time_t curtime;
time_t curtime = 0;
time(&curtime);
std::array<char, 80> mbstr;
std::strftime(mbstr.data(),
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/io/crypto/cipher.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ namespace framework {
std::shared_ptr<Cipher> CipherFactory::CreateCipher(
const std::string& config_file) {
std::string cipher_name;
int iv_size;
int tag_size;
int iv_size = 0;
int tag_size = 0;
std::unordered_map<std::string, std::string> config;
if (!config_file.empty()) {
config = CipherUtils::LoadConfig(config_file);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/io/crypto/cipher_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ std::unordered_map<std::string, std::string> CipherUtils::LoadConfig(
"make sure input filename is available.",
config_file));
std::unordered_map<std::string, std::string> ret;
char c;
char c = 0;
std::string line;
std::istringstream iss;
while (std::getline(fin, line)) {
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/framework/io/crypto/cipher_utils_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,19 +46,19 @@ TEST(CipherUtils, load_config) {
EXPECT_TRUE(CipherUtils::GetValue<std::string>(config, "key_str", &out_str));
EXPECT_EQ(out_str, std::string("ciphername"));

int out_int;
int out_int = 0;
EXPECT_TRUE(CipherUtils::GetValue<int>(config, "key_int", &out_int));
EXPECT_EQ(out_int, 1);

bool out_bool;
bool out_bool = false;
EXPECT_TRUE(CipherUtils::GetValue<bool>(config, "key_bool", &out_bool));
EXPECT_EQ(out_bool, true);

bool out_bool1;
bool out_bool1 = false;
EXPECT_TRUE(CipherUtils::GetValue<bool>(config, "key_bool1", &out_bool1));
EXPECT_EQ(out_bool1, false);

bool out_bool2;
bool out_bool2 = false;
EXPECT_TRUE(CipherUtils::GetValue<bool>(config, "key_bool2", &out_bool2));
EXPECT_EQ(out_bool2, false);
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -371,8 +371,8 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const {
bool mkldnn_with_bias = is_mkldnn && has_bias;

// Create eltwise_y (conv bias) variable
phi::DenseTensor* eltwise_y_in_tensor;
Node* eltwise_y_in_node;
phi::DenseTensor* eltwise_y_in_tensor = nullptr;
Node* eltwise_y_in_node = nullptr;
if (!mkldnn_with_bias) {
VarDesc eltwise_y_in_desc(
patterns::PDNodeName("fuse_conv_bn", conv_type() + "_eltwise_y_in"));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ void TestMain(const std::string& conv_type) {
auto* in = layers.data("in", {1, 3, 20, 20});
auto* filters = layers.data("filters", {3, 3, 2, 2}, true);
auto* bias_0 = layers.data("bias_0", {3}, true);
VarDesc* conv_out;
VarDesc* conv_out = nullptr;
if (conv_type == "conv_transpose") {
conv_out = layers.conv2d_transpose(in, filters, bias_0);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ void DeleteQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const {
platform::errors::InvalidArgument(
"Input scale tensor's place should be CPU."));

float input_scale;
float input_scale = NAN;
if (input_scale_tensor.dtype() == phi::DataType::FLOAT32) {
const float* input_scale_data = input_scale_tensor.data<float>();
input_scale = input_scale_data[0];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,8 @@ void DeleteWeightDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const {
if (n->IsOp()) {
auto* op = n->Op();
if (op->Type() == "dequantize_linear") {
Node *weight_var_node, *calcu_op_node, *while_op_node;
Node *weight_var_node = nullptr, *calcu_op_node = nullptr,
*while_op_node = nullptr;
Node *dequantized_weight_var_node = nullptr, *scale_var_node = nullptr;
// 1. Judge whether for dequant weight and find
// weight_var_node/scale_var_node
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/framework/ir/fuse_gemm_epilogue_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ ir::Graph *FuseGemmEpiloguePass::FuseLinearFwd(ir::Graph *graph,
auto matmul_op_desc = matmul_op->Op();
if (!IsGemmFromLinear_(matmul_x_shape, matmul_w_shape)) return;

bool trans_x, trans_y;
bool trans_x = false, trans_y = false;
GetTransposeAttrsFromOp(*matmul_op_desc, &trans_x, &trans_y);

OpDesc fused_gemm_epilogue_op_desc(matmul_op->Op()->Block());
Expand Down Expand Up @@ -168,7 +168,7 @@ ir::Graph *FuseGemmEpiloguePass::FuseLinearActFwd(

auto activation = act_op->Op()->Type();

bool trans_x, trans_y;
bool trans_x = false, trans_y = false;
GetTransposeAttrsFromOp(*matmul_op_desc, &trans_x, &trans_y);

OpDesc fused_gemm_epilogue_op_desc(matmul_op->Op()->Block());
Expand Down Expand Up @@ -291,7 +291,7 @@ ir::Graph *FuseGemmEpiloguePass::FuseLinearBwd(ir::Graph *graph,
auto matmul_grad_op_desc = matmul_grad_op->Op();
if (!IsGemmFromLinear_(matmul_grad_x_shape, matmul_grad_w_shape)) return;

bool trans_x, trans_y;
bool trans_x = false, trans_y = false;
GetTransposeAttrsFromOp(*matmul_grad_op_desc, &trans_x, &trans_y);

OpDesc fused_gemm_epilogue_grad_op_desc(ele_add_grad_op->Op()->Block());
Expand Down Expand Up @@ -430,7 +430,7 @@ ir::Graph *FuseGemmEpiloguePass::FuseLinearActBwd(

auto activation_grad = act_grad_op->Op()->Type();

bool trans_x, trans_y;
bool trans_x = false, trans_y = false;
GetTransposeAttrsFromOp(*matmul_grad_op_desc, &trans_x, &trans_y);
OpDesc fused_gemm_epilogue_grad_op_desc(ele_add_grad_op->Op()->Block());
fused_gemm_epilogue_grad_op_desc.SetType("fused_gemm_epilogue_grad");
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/graph_viz_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ void GraphVizPass::ApplyImpl(ir::Graph* graph) const {
}
}
}
decltype(op_attrs)* attr;
decltype(op_attrs)* attr = nullptr;
if (marked_nodes.count(n)) {
attr = &marked_var_attrs;
} else if (const_cast<Node*>(n)->Var() &&
Expand Down
36 changes: 18 additions & 18 deletions paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -273,9 +273,9 @@ PDNode* MultiHeadMatmulPattern::operator()() {
auto* mul0_out_var =
pattern->NewNode(mul0_out_repr())->assert_is_ops_output(mul_ops);

decltype(mul0) eltadd0;
decltype(mul0) eltadd0_b_var;
decltype(mul0) eltadd0_out_var;
decltype(mul0) eltadd0 = nullptr;
decltype(mul0) eltadd0_b_var = nullptr;
decltype(mul0) eltadd0_out_var = nullptr;

mul0_out_var->AsIntermediate()->assert_is_op_input("elementwise_add");

Expand Down Expand Up @@ -353,9 +353,9 @@ PDNode* MultiHeadMatmulPattern::operator()() {
auto* mul1_out_var =
pattern->NewNode(mul1_out_repr())->assert_is_ops_output(mul_ops);

decltype(mul1) eltadd1;
decltype(mul1) eltadd1_b_var;
decltype(mul1) eltadd1_out_var;
decltype(mul1) eltadd1 = nullptr;
decltype(mul1) eltadd1_b_var = nullptr;
decltype(mul1) eltadd1_out_var = nullptr;

mul1_out_var->AsIntermediate()->assert_is_op_input("elementwise_add");
eltadd1 = pattern->NewNode(eltadd1_repr())->assert_is_op("elementwise_add");
Expand Down Expand Up @@ -389,9 +389,9 @@ PDNode* MultiHeadMatmulPattern::operator()() {
auto* mul2_out_var =
pattern->NewNode(mul2_out_repr())->assert_is_ops_output(mul_ops);

decltype(mul2) eltadd2;
decltype(mul2) eltadd2_b_var;
decltype(mul2) eltadd2_out_var;
decltype(mul2) eltadd2 = nullptr;
decltype(mul2) eltadd2_b_var = nullptr;
decltype(mul2) eltadd2_out_var = nullptr;

mul2_out_var->AsIntermediate()->assert_is_op_input("elementwise_add");
eltadd2 = pattern->NewNode(eltadd2_repr())->assert_is_op("elementwise_add");
Expand Down Expand Up @@ -465,9 +465,9 @@ PDNode* MultiHeadMatmulV3Pattern::operator()() {
auto* mul0_out_var =
pattern->NewNode(mul0_out_repr())->assert_is_ops_output(matmul_ops);

decltype(mul0) eltadd0;
decltype(mul0) eltadd0_b_var;
decltype(mul0) eltadd0_out_var;
decltype(mul0) eltadd0 = nullptr;
decltype(mul0) eltadd0_b_var = nullptr;
decltype(mul0) eltadd0_out_var = nullptr;

mul0_out_var->AsIntermediate()->assert_is_op_input("elementwise_add");

Expand Down Expand Up @@ -539,9 +539,9 @@ PDNode* MultiHeadMatmulV3Pattern::operator()() {
auto* mul1_out_var =
pattern->NewNode(mul1_out_repr())->assert_is_ops_output(matmul_ops);

decltype(mul1) eltadd1;
decltype(mul1) eltadd1_b_var;
decltype(mul1) eltadd1_out_var;
decltype(mul1) eltadd1 = nullptr;
decltype(mul1) eltadd1_b_var = nullptr;
decltype(mul1) eltadd1_out_var = nullptr;

mul1_out_var->AsIntermediate()->assert_is_op_input("elementwise_add");
eltadd1 = pattern->NewNode(eltadd1_repr())->assert_is_op("elementwise_add");
Expand Down Expand Up @@ -575,9 +575,9 @@ PDNode* MultiHeadMatmulV3Pattern::operator()() {
auto* mul2_out_var =
pattern->NewNode(mul2_out_repr())->assert_is_ops_output(matmul_ops);

decltype(mul2) eltadd2;
decltype(mul2) eltadd2_b_var;
decltype(mul2) eltadd2_out_var;
decltype(mul2) eltadd2 = nullptr;
decltype(mul2) eltadd2_b_var = nullptr;
decltype(mul2) eltadd2_out_var = nullptr;

mul2_out_var->AsIntermediate()->assert_is_op_input("elementwise_add");
eltadd2 = pattern->NewNode(eltadd2_repr())->assert_is_op("elementwise_add");
Expand Down
18 changes: 9 additions & 9 deletions paddle/fluid/framework/ir/multihead_matmul_roformer_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,9 @@ PDNode* MultiHeadMatmulRoformerPattern::operator()() {
auto* mul0_out_var =
pattern->NewNode(mul0_out_repr())->assert_is_ops_output(matmul_ops);

decltype(mul0) eltadd0;
decltype(mul0) eltadd0_b_var;
decltype(mul0) eltadd0_out_var;
decltype(mul0) eltadd0 = nullptr;
decltype(mul0) eltadd0_b_var = nullptr;
decltype(mul0) eltadd0_out_var = nullptr;

mul0_out_var->AsIntermediate()->assert_is_op_input("elementwise_add");

Expand Down Expand Up @@ -165,9 +165,9 @@ PDNode* MultiHeadMatmulRoformerPattern::operator()() {
auto* mul1_out_var =
pattern->NewNode(mul1_out_repr())->assert_is_ops_output(matmul_ops);

decltype(mul1) eltadd1;
decltype(mul1) eltadd1_b_var;
decltype(mul1) eltadd1_out_var;
decltype(mul1) eltadd1 = nullptr;
decltype(mul1) eltadd1_b_var = nullptr;
decltype(mul1) eltadd1_out_var = nullptr;

mul1_out_var->AsIntermediate()->assert_is_op_input("elementwise_add");
eltadd1 = pattern->NewNode(eltadd1_repr())->assert_is_op("elementwise_add");
Expand Down Expand Up @@ -232,9 +232,9 @@ PDNode* MultiHeadMatmulRoformerPattern::operator()() {
auto* mul2_out_var =
pattern->NewNode(mul2_out_repr())->assert_is_ops_output(matmul_ops);

decltype(mul2) eltadd2;
decltype(mul2) eltadd2_b_var;
decltype(mul2) eltadd2_out_var;
decltype(mul2) eltadd2 = nullptr;
decltype(mul2) eltadd2_b_var = nullptr;
decltype(mul2) eltadd2_out_var = nullptr;

mul2_out_var->AsIntermediate()->assert_is_op_input("elementwise_add");
eltadd2 = pattern->NewNode(eltadd2_repr())->assert_is_op("elementwise_add");
Expand Down
Loading

0 comments on commit 24fc214

Please sign in to comment.