From 2706d8e0bbd59937ef4aef9c9473b2b4c3d8224d Mon Sep 17 00:00:00 2001 From: Guanhuachen2003 <166631022+Guanhuachen2003@users.noreply.github.com> Date: Tue, 30 Jul 2024 17:19:56 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90Error=20Message=20No.=2019=E3=80=8122?= =?UTF-8?q?=20BUAA=E3=80=91modify=20error=20messages=20(#66698)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update fleet_wrapper.cc * Update data_set.cc * Update fleet_wrapper.cc * Update fleet_wrapper.cc * Update fleet_wrapper.cc --- paddle/fluid/framework/data_set.cc | 175 ++++++++++-- paddle/fluid/framework/fleet/fleet_wrapper.cc | 263 +++++++++++++++--- 2 files changed, 379 insertions(+), 59 deletions(-) diff --git a/paddle/fluid/framework/data_set.cc b/paddle/fluid/framework/data_set.cc index 12214e032a140..ea535e4dfd211 100644 --- a/paddle/fluid/framework/data_set.cc +++ b/paddle/fluid/framework/data_set.cc @@ -603,7 +603,13 @@ template void DatasetImpl::PreLoadIntoMemory() { VLOG(3) << "DatasetImpl::PreLoadIntoMemory() begin"; if (preload_thread_num_ != 0) { - CHECK(static_cast(preload_thread_num_) == preload_readers_.size()); + PADDLE_ENFORCE_EQ( + static_cast(preload_thread_num_), + preload_readers_.size(), + phi::errors::InvalidArgument("Preload thread number (%d) does not " + "match the size of preload readers (%d).", + preload_thread_num_, + preload_readers_.size())); preload_threads_.clear(); for (int64_t i = 0; i < preload_thread_num_; ++i) { preload_threads_.emplace_back( @@ -611,7 +617,13 @@ void DatasetImpl::PreLoadIntoMemory() { preload_readers_[i].get()); } } else { - CHECK(static_cast(thread_num_) == readers_.size()); + PADDLE_ENFORCE_EQ( + static_cast(thread_num_), + readers_.size(), + phi::errors::InvalidArgument( + "Thread number (%d) does not match the size of readers (%d).", + thread_num_, + readers_.size())); preload_threads_.clear(); for (int64_t i = 0; i < thread_num_; ++i) { preload_threads_.emplace_back( @@ -975,18 +987,39 @@ void DatasetImpl::DynamicAdjustChannelNum(int channel_num, int cur_channel = 0; uint64_t output_channels_data_size = 0; uint64_t consume_channels_data_size = 0; - CHECK(multi_output_channel_.size() == multi_consume_channel_.size()); + PADDLE_ENFORCE_EQ(multi_output_channel_.size(), + multi_consume_channel_.size(), + phi::errors::InvalidArgument( + "The size of multi_output_channel (%d) does not match " + "the size of multi_consume_channel (%d).", + multi_output_channel_.size(), + multi_consume_channel_.size())); + for (size_t i = 0; i < multi_output_channel_.size(); ++i) { output_channels_data_size += multi_output_channel_[i]->Size(); consume_channels_data_size += multi_consume_channel_[i]->Size(); } + if (output_channels_data_size != 0) { - CHECK(consume_channels_data_size == 0); // NOLINT + PADDLE_ENFORCE_EQ(consume_channels_data_size, + 0, + phi::errors::InvalidArgument( + "When output_channels_data_size (%d) is not zero, " + "consume_channels_data_size (%d) should be zero.", + output_channels_data_size, + consume_channels_data_size)); cur_channel = 0; } else { - CHECK(output_channels_data_size == 0); // NOLINT + PADDLE_ENFORCE_EQ( + output_channels_data_size, + 0, + phi::errors::InvalidArgument( + "When output_channels_data_size is zero, it should be zero. " + "consume_channels_data_size: %d", + consume_channels_data_size)); cur_channel = 1; } + if (cur_channel == 0) { // NOLINT origin_channels = &multi_output_channel_; other_channels = &multi_consume_channel_; @@ -998,10 +1031,22 @@ void DatasetImpl::DynamicAdjustChannelNum(int channel_num, origin_pv_channels = &multi_pv_consume_; other_pv_channels = &multi_pv_output_; } - CHECK(origin_channels != nullptr); // NOLINT - CHECK(other_channels != nullptr); // NOLINT - CHECK(origin_pv_channels != nullptr); // NOLINT - CHECK(other_pv_channels != nullptr); // NOLINT + PADDLE_ENFORCE_NOT_NULL(origin_channels, + phi::errors::InvalidArgument( + "origin_channels should not be nullptr, please " + "check if it is properly initialized.")); + PADDLE_ENFORCE_NOT_NULL( + other_channels, + phi::errors::InvalidArgument("other_channels should not be nullptr, " + "ensure it is correctly set before usage.")); + PADDLE_ENFORCE_NOT_NULL( + origin_pv_channels, + phi::errors::InvalidArgument("origin_pv_channels must not be nullptr, " + "verify its initialization.")); + PADDLE_ENFORCE_NOT_NULL( + other_pv_channels, + phi::errors::InvalidArgument( + "other_pv_channels must not be nullptr, confirm its setup.")); paddle::framework::Channel total_data_channel = paddle::framework::MakeChannel(); @@ -1096,9 +1141,27 @@ void DatasetImpl::CreateReaders() { VLOG(3) << "thread num in Dataset: " << thread_num_; VLOG(3) << "Filelist size in Dataset: " << filelist_.size(); VLOG(3) << "channel num in Dataset: " << channel_num_; - CHECK(thread_num_ > 0) << "thread num should > 0"; - CHECK(channel_num_ > 0) << "channel num should > 0"; - CHECK(channel_num_ <= thread_num_) << "channel num should <= thread num"; + PADDLE_ENFORCE_GT( + thread_num_, + 0, + phi::errors::InvalidArgument("The number of threads (thread_num) should " + "be greater than 0. Received: %d", + thread_num_)); + PADDLE_ENFORCE_GT( + channel_num_, + 0, + phi::errors::InvalidArgument("The number of channels (channel_num) " + "should be greater than 0. Received: %d", + channel_num_)); + PADDLE_ENFORCE_LE(channel_num_, + thread_num_, + phi::errors::InvalidArgument( + "The number of channels (channel_num) should be less " + "than or equal to the number of threads (thread_num). " + "Received channel_num: %d, thread_num: %d", + channel_num_, + thread_num_)); + VLOG(3) << "readers size: " << readers_.size(); if (!readers_.empty()) { VLOG(3) << "readers_.size() = " << readers_.size() @@ -1174,8 +1237,17 @@ void DatasetImpl::CreatePreLoadReaders() { if (preload_thread_num_ == 0) { preload_thread_num_ = thread_num_; } - CHECK(preload_thread_num_ > 0) << "thread num should > 0"; - CHECK(input_channel_ != nullptr); + PADDLE_ENFORCE_GT(preload_thread_num_, + 0, + phi::errors::InvalidArgument( + "The number of preload threads (preload_thread_num) " + "should be greater than 0. Received: %d", + preload_thread_num_)); + PADDLE_ENFORCE_NOT_NULL(input_channel_, + phi::errors::InvalidArgument( + "The input_channel should not be nullptr. Please " + "ensure it is properly initialized.")); + preload_readers_.clear(); for (int i = 0; i < preload_thread_num_; ++i) { preload_readers_.push_back( @@ -1296,7 +1368,14 @@ int MultiSlotDataset::ReceiveFromClient(int msg_type, while (ar.Cursor() < ar.Finish()) { data.push_back(ar.Get()); } - CHECK(ar.Cursor() == ar.Finish()); + PADDLE_ENFORCE_EQ(ar.Cursor(), + ar.Finish(), + phi::errors::InvalidArgument( + "Cursor position does not match finish position. The " + "cursor should be at the finish position. Received " + "cursor position: %d, expected finish position: %d.", + ar.Cursor(), + ar.Finish())); auto fleet_ptr = framework::FleetWrapper::GetInstance(); // not use random because it doesn't perform well here. @@ -1437,7 +1516,13 @@ void MultiSlotDataset::GenerateLocalTablesUnlock(int table_id, return; } - CHECK(multi_output_channel_.size() != 0); // NOLINT + PADDLE_ENFORCE_NE( + multi_output_channel_.size(), + 0, + phi::errors::InvalidArgument("The size of multi_output_channel should " + "not be zero. Received size: %zu.", + multi_output_channel_.size())); + // NOLINT auto fleet_ptr_ = framework::FleetWrapper::GetInstance(); std::vector>>& local_map_tables = fleet_ptr_->GetLocalTable(); @@ -1524,7 +1609,13 @@ void MultiSlotDataset::MergeByInsId() { use_slots_is_dense.push_back(slot.is_dense()); } } - CHECK(multi_output_channel_.size() != 0); // NOLINT + PADDLE_ENFORCE_NE( + multi_output_channel_.size(), + 0, + phi::errors::InvalidArgument("The size of multi_output_channel should " + "not be zero. Received size: %zu.", + multi_output_channel_.size())); + // NOLINT auto channel_data = paddle::framework::MakeChannel(); VLOG(3) << "multi_output_channel_.size() " << multi_output_channel_.size(); for (auto& item : multi_output_channel_) { @@ -1699,7 +1790,13 @@ void MultiSlotDataset::MergeByInsId() { vec_data.clear(); vec_data.shrink_to_fit(); } - CHECK(channel_data->Size() == 0); // NOLINT + PADDLE_ENFORCE_EQ( + channel_data->Size(), + 0, + phi::errors::InvalidArgument( + "The size of channel_data should be zero. Received size: %zu.", + channel_data->Size())); + // NOLINT channel_data->Clear(); VLOG(3) << "MultiSlotDataset::MergeByInsId end"; } @@ -1776,8 +1873,14 @@ void MultiSlotDataset::PreprocessChannel( input_channel_->Close(); input_channel_->ReadAll(slots_shuffle_original_data_); } else { - CHECK(out_channel_size > 0); // NOLINT - if (cur_channel_ == 0) { // NOLINT + PADDLE_ENFORCE_GT( + out_channel_size, + 0, + phi::errors::InvalidArgument("The out_channel_size should be greater " + "than 0. Received size: %d.", + out_channel_size)); + // NOLINT + if (cur_channel_ == 0) { // NOLINT for (auto& item : multi_output_channel_) { std::vector vec_data; item->Close(); @@ -1844,8 +1947,12 @@ void MultiSlotDataset::PreprocessChannel( // end_size += static_cast(item->Size()); // } // } - CHECK(input_channel_->Size() == 0) - << "input channel should be empty before slots shuffle"; + PADDLE_ENFORCE_EQ( + input_channel_->Size(), + 0, + phi::errors::InvalidArgument("The input channel should be empty before " + "slots shuffle. Received size: %zu.", + input_channel_->Size())); } // slots shuffle to input_channel_ with needed-shuffle slots @@ -1888,9 +1995,27 @@ void SlotRecordDataset::CreateReaders() { VLOG(3) << "thread num in Dataset: " << thread_num_; VLOG(3) << "Filelist size in Dataset: " << filelist_.size(); VLOG(3) << "channel num in Dataset: " << channel_num_; - CHECK(thread_num_ > 0) << "thread num should > 0"; - CHECK(channel_num_ > 0) << "channel num should > 0"; - CHECK(channel_num_ <= thread_num_) << "channel num should <= thread num"; + PADDLE_ENFORCE_GT( + thread_num_, + 0, + phi::errors::InvalidArgument( + "The thread number should be greater than 0. Received: %d.", + thread_num_)); + PADDLE_ENFORCE_GT( + channel_num_, + 0, + phi::errors::InvalidArgument( + "The channel number should be greater than 0. Received: %d.", + channel_num_)); + PADDLE_ENFORCE_LE( + channel_num_, + thread_num_, + phi::errors::InvalidArgument( + "The channel number should be less than or equal to the thread " + "number. Received channel number: %d, thread number: %d.", + channel_num_, + thread_num_)); + VLOG(3) << "readers size: " << readers_.size(); if (!readers_.empty()) { VLOG(3) << "readers_.size() = " << readers_.size() diff --git a/paddle/fluid/framework/fleet/fleet_wrapper.cc b/paddle/fluid/framework/fleet/fleet_wrapper.cc index dd3935b598b93..d7aa221ca4537 100644 --- a/paddle/fluid/framework/fleet/fleet_wrapper.cc +++ b/paddle/fluid/framework/fleet/fleet_wrapper.cc @@ -174,7 +174,13 @@ void FleetWrapper::HeterPullSparseVars( continue; } phi::DenseTensor* tensor = var->GetMutable(); - CHECK(tensor != nullptr) << "tensor of var " << name << " is null"; + PADDLE_ENFORCE_NOT_NULL( + tensor, + phi::errors::InvalidArgument( + "The tensor for variable '%s' is null. " + "Ensure that the tensor is properly initialized before use.", + name.c_str())); + int64_t* ids = tensor->data(); size_t len = tensor->numel(); @@ -254,7 +260,12 @@ void FleetWrapper::HeterPushSparseVars( show_index = 1; click_index = 2; } - CHECK_GE(grad_dim, 0); + PADDLE_ENFORCE_GE(grad_dim, + 0, + phi::errors::InvalidArgument( + "The gradient dimension (grad_dim) must be greater " + "than or equal to 0, but got %d.", + grad_dim)); sparse_push_keys.clear(); sparse_push_keys.reserve(fea_keys.size() + 1); @@ -305,14 +316,29 @@ void FleetWrapper::HeterPushSparseVars( continue; } sparse_push_keys.push_back(ids[id_idx]); - CHECK(fea_idx < push_values.size()); + PADDLE_ENFORCE_LT(fea_idx, + push_values.size(), + phi::errors::InvalidArgument( + "The feature index (fea_idx) must be less than the " + "size of push_values. " + "Received fea_idx: %d, push_values size: %zu.", + fea_idx, + push_values.size())); if (use_cvm || no_cvm) { memcpy(push_values[fea_idx].data() + offset + slot_offset, g, sizeof(float) * emb_dim); } else { - CHECK(fea_idx < fea_labels.size()); + PADDLE_ENFORCE_LT(fea_idx, + fea_labels.size(), + phi::errors::InvalidArgument( + "The feature index (fea_idx) must be less than " + "the size of fea_labels. " + "Received fea_idx: %d, fea_labels size: %zu.", + fea_idx, + fea_labels.size())); + memcpy(push_values[fea_idx].data() + offset + slot_offset, g, sizeof(float) * emb_dim); @@ -349,10 +375,26 @@ void FleetWrapper::HeterPushSparseVars( ++no_grad_fea_num; } } - CHECK(fea_idx + no_grad_fea_num == fea_keys.size()) - << "fea_idx: " << fea_idx << " no_grad_fea_num: " << no_grad_fea_num - << " features size: " << fea_keys.size(); - CHECK(fea_idx == sparse_push_keys.size()); + PADDLE_ENFORCE_EQ( + fea_idx + no_grad_fea_num, + fea_keys.size(), + phi::errors::InvalidArgument( + "The sum of fea_idx and no_grad_fea_num must be equal to the size of " + "fea_keys. " + "Received fea_idx: %d, no_grad_fea_num: %d, fea_keys size: %zu.", + fea_idx, + no_grad_fea_num, + fea_keys.size())); + + PADDLE_ENFORCE_EQ( + fea_idx, + sparse_push_keys.size(), + phi::errors::InvalidArgument( + "The fea_idx must be equal to the size of sparse_push_keys. " + "Received fea_idx: %d, sparse_push_keys size: %zu.", + fea_idx, + sparse_push_keys.size())); + if (fea_idx == 0) { return; } @@ -446,7 +488,13 @@ void FleetWrapper::PullSparseVarsFromLocal( continue; } phi::DenseTensor* tensor = var->GetMutable(); - CHECK(tensor != nullptr) << "tensor of var " << name << " is null"; + PADDLE_ENFORCE_NOT_NULL( + tensor, + phi::errors::InvalidArgument( + "The tensor for variable '%s' is null. Please ensure the tensor is " + "properly initialized before use.", + name.c_str())); + int64_t* ids = tensor->data(); size_t len = tensor->numel(); for (auto i = 0u; i < len; ++i) { @@ -510,7 +558,13 @@ std::future FleetWrapper::PullSparseVarsAsync( continue; } phi::DenseTensor* tensor = var->GetMutable(); - CHECK(tensor != nullptr) << "tensor of var " << name << " is null"; + PADDLE_ENFORCE_NOT_NULL( + tensor, + phi::errors::InvalidArgument( + "The tensor for variable '%s' is null. Please ensure the tensor is " + "properly initialized before use.", + name.c_str())); + int64_t* ids = tensor->data(); size_t len = tensor->numel(); for (auto i = 0u; i < len; ++i) { @@ -555,7 +609,13 @@ void FleetWrapper::PullSparseVarsSync( continue; } phi::DenseTensor* tensor = var->GetMutable(); - CHECK(tensor != nullptr) << "tensor of var " << name << " is null"; + PADDLE_ENFORCE_NOT_NULL( + tensor, + phi::errors::InvalidArgument( + "The tensor for variable '%s' is null. Please ensure the tensor is " + "properly initialized before use.", + name.c_str())); + int64_t* ids = tensor->data(); size_t len = tensor->numel(); @@ -640,13 +700,31 @@ void FleetWrapper::PullSparseToTensorSync( for (size_t i = 0; i < len; ++i, output_len += fea_dim) { if (!output || output_len == size_t(output->numel())) { ++output_index; - CHECK(output_index < outputs->size()); // NOLINT + PADDLE_ENFORCE_LT( + output_index, + outputs->size(), + phi::errors::InvalidArgument( + "The output index must be less than the size of outputs. " + "Received output_index: %d, outputs size: %zu.", + output_index, + outputs->size())); // NOLINT output = outputs->at(output_index); output_data = output->mutable_data(place); output_len = 0; - CHECK(output->numel() % fea_dim == 0); // NOLINT - CHECK(output_data != nullptr); // NOLINT + PADDLE_ENFORCE_EQ(output->numel() % fea_dim, + 0, + phi::errors::InvalidArgument( + "The number of elements in the output must be " + "divisible by fea_dim. " + "Received output numel: %d, fea_dim: %d.", + output->numel(), + fea_dim)); // NOLINT + PADDLE_ENFORCE_NOT_NULL( + output_data, + phi::errors::InvalidArgument( + "The output data pointer is null.")); // NOLINT } + uint64_t real_id = static_cast(ids[i]); if (real_id == padding_id) { memcpy(output_data + output_len, @@ -765,7 +843,13 @@ void FleetWrapper::PushDenseParamSync( std::vector regions; for (auto& t : var_names) { Variable* var = scope.FindVar(t); - CHECK(var != nullptr) << "var[" << t << "] not found"; + PADDLE_ENFORCE_NOT_NULL( + var, + phi::errors::NotFound( + "Variable 'var' with identifier [%s] not found. Please ensure the " + "variable is correctly initialized.", + t.c_str())); + phi::DenseTensor* tensor = var->GetMutable(); float* g = tensor->mutable_data(place); paddle::ps::Region reg(g, tensor->numel()); @@ -775,7 +859,14 @@ void FleetWrapper::PushDenseParamSync( regions.data(), regions.size(), table_id); push_status.wait(); auto status = push_status.get(); - CHECK(status == 0) << "push dense param failed, status[" << status << "]"; + PADDLE_ENFORCE_EQ( + status, + 0, + phi::errors::InvalidArgument( + "Pushing dense parameter failed. Received status: %d. Please check " + "the status and ensure the operation is successful.", + status)); + #endif } @@ -973,7 +1064,14 @@ void FleetWrapper::PushSparseVarsWithLabelAsync( show_index = 1; click_index = 2; } - CHECK_GE(grad_dim, 0); + PADDLE_ENFORCE_GE( + grad_dim, + 0, + phi::errors::InvalidArgument("The gradient dimension (grad_dim) must be " + "greater than or equal to 0. " + "Received grad_dim: %d. Please ensure that " + "the gradient dimension is valid.", + grad_dim)); sparse_push_keys->clear(); sparse_push_keys->reserve(fea_keys.size() + 1); @@ -1036,14 +1134,33 @@ void FleetWrapper::PushSparseVarsWithLabelAsync( continue; } sparse_push_keys->push_back(ids[id_idx]); - CHECK(fea_idx < (*push_values).size()); + PADDLE_ENFORCE_LT( + fea_idx, + (*push_values).size(), + phi::errors::InvalidArgument( + "Feature index (fea_idx) must be less than the size of " + "push_values. " + "Received fea_idx: %d, push_values size: %zu. Please ensure the " + "feature index is within the valid range.", + fea_idx, + (*push_values).size())); if (use_cvm || no_cvm) { memcpy((*push_values)[fea_idx].data() + offset + slot_offset, g, sizeof(float) * emb_dim); } else { - CHECK(fea_idx < fea_labels.size()); + PADDLE_ENFORCE_LT( + fea_idx, + fea_labels.size(), + phi::errors::InvalidArgument( + "Feature index (fea_idx) must be less than the size of " + "fea_labels. " + "Received fea_idx: %d, fea_labels size: %zu. Please ensure the " + "feature index is within the valid range.", + fea_idx, + fea_labels.size())); + memcpy((*push_values)[fea_idx].data() + offset + slot_offset, g, sizeof(float) * emb_dim); @@ -1080,10 +1197,26 @@ void FleetWrapper::PushSparseVarsWithLabelAsync( ++no_grad_fea_num; } } - CHECK(fea_idx + no_grad_fea_num == fea_keys.size()) - << "fea_idx: " << fea_idx << " no_grad_fea_num: " << no_grad_fea_num - << " features size: " << fea_keys.size(); - CHECK(fea_idx == sparse_push_keys->size()); + PADDLE_ENFORCE_EQ( + fea_idx + no_grad_fea_num, + fea_keys.size(), + phi::errors::InvalidArgument( + "The sum of feature index (fea_idx) and no_grad_fea_num must be " + "equal to the size of fea_keys. " + "Received fea_idx: %d, no_grad_fea_num: %d, fea_keys size: %zu.", + fea_idx, + no_grad_fea_num, + fea_keys.size())); + + PADDLE_ENFORCE_EQ(fea_idx, + sparse_push_keys->size(), + phi::errors::InvalidArgument( + "The feature index (fea_idx) must be equal to the size " + "of sparse_push_keys. " + "Received fea_idx: %d, sparse_push_keys size: %zu.", + fea_idx, + sparse_push_keys->size())); + if (fea_idx == 0) { return; } @@ -1136,7 +1269,14 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync( slot_offset = 0; grad_dim = fea_dim; } - CHECK(grad_dim >= 0); // NOLINT + PADDLE_ENFORCE_GE( + grad_dim, + 0, + phi::errors::InvalidArgument("The gradient dimension (grad_dim) must be " + "greater than or equal to 0. " + "Received grad_dim: %d. Please ensure that " + "the gradient dimension is valid.", + grad_dim)); int batch_size = -1; for (auto* input : *inputs) { @@ -1145,10 +1285,26 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync( if (batch_size == -1) { batch_size = cur_batch_size; } else { - CHECK(batch_size == cur_batch_size); // NOLINT + PADDLE_ENFORCE_EQ(batch_size, + cur_batch_size, + phi::errors::InvalidArgument( + "The batch size (batch_size) must be equal to the " + "current batch size (cur_batch_size). " + "Received batch_size: %d, cur_batch_size: %d. " + "Please ensure that the batch sizes match.", + batch_size, + cur_batch_size)); + // NOLINT } } - CHECK(batch_size > 0); // NOLINT + PADDLE_ENFORCE_GT(batch_size, + 0, + phi::errors::InvalidArgument( + "The batch size (batch_size) must be greater than 0. " + "Received batch_size: %d. Please ensure that the batch " + "size is valid.", + batch_size)); + // NOLINT std::vector g; for (const phi::DenseTensor* g_tensor : *outputs) { @@ -1170,9 +1326,17 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync( framework::Variable* var = scope.FindVar(click_name); size_t global_idx = 0; if (click_name != "") { - CHECK(var != nullptr); // NOLINT + PADDLE_ENFORCE_NOT_NULL( + var, + phi::errors::InvalidArgument("The variable (var) is null when " + "click_name is not an empty string.")); + phi::DenseTensor* label_tensor = var->GetMutable(); - CHECK(label_tensor != nullptr); // NOLINT + PADDLE_ENFORCE_NOT_NULL( + label_tensor, + phi::errors::InvalidArgument("The label tensor is null when attempting " + "to get mutable DenseTensor from var.")); + int64_t* label_ptr = label_tensor->data(); for (auto* tensor : *inputs) { @@ -1228,9 +1392,25 @@ void FleetWrapper::PushSparseFromTensorWithLabelAsync( } } - CHECK(output_len == g.size()); // NOLINT + PADDLE_ENFORCE_EQ( + output_len, + g.size(), + phi::errors::InvalidArgument( + "The output length (output_len) must be equal to the size of g. " + "Received output_len: %d, g.size(): %d.", + output_len, + g.size())); + if (click_name != "") { - CHECK(input_idx == global_idx); // NOLINT + PADDLE_ENFORCE_EQ( + input_idx, + global_idx, + phi::errors::InvalidArgument( + "The input index (input_idx) must be equal to the global index " + "(global_idx) when click_name is not an empty string. " + "Received input_idx: %d, global_idx: %d.", + input_idx, + global_idx)); } std::vector push_g_vec(input_idx, nullptr); @@ -1307,7 +1487,10 @@ void FleetWrapper::LoadFromPaddleModel(Scope& scope, float* old_data = old_tensor->data(); // new model data, here we assume data type is float Variable* var = scope.FindVar(t); - CHECK(var != nullptr) << "var[" << t << "] not found"; + PADDLE_ENFORCE_NOT_NULL( + var, + phi::errors::NotFound("Variable (var) is null. var[%s] not found.", t)); + phi::DenseTensor* tensor = var->GetMutable(); float* data = tensor->data(); // copy from old data to new data @@ -1621,7 +1804,11 @@ void FleetWrapper::ShrinkDenseTable(int table_id, for (std::string& name : var_list) { if (name.find("batch_sum") != std::string::npos) { Variable* var = scope->FindVar(name); - CHECK(var != nullptr) << "var[" << name << "] not found"; + PADDLE_ENFORCE_NOT_NULL( + var, + phi::errors::NotFound("Variable (var) is null. var[%s] not found.", + name)); + VLOG(0) << "prepare shrink dense batch_sum"; phi::DenseTensor* tensor = var->GetMutable(); float* g = tensor->data(); @@ -1631,7 +1818,11 @@ void FleetWrapper::ShrinkDenseTable(int table_id, size_name.replace( size_name.find("batch_sum"), size_name.length(), "batch_size"); Variable* var_size = scope->FindVar(size_name); - CHECK(var_size != nullptr) << "var[" << size_name << "] not found"; + PADDLE_ENFORCE_NOT_NULL( + var_size, + phi::errors::NotFound("Variable size is null. var[%s] not found.", + size_name)); + VLOG(3) << "shrink dense batch_sum: " << name << ", " << size_name; float* g_size = var_size->GetMutable()->data(); @@ -1642,7 +1833,11 @@ void FleetWrapper::ShrinkDenseTable(int table_id, regions.emplace_back(std::move(reg)); } else { Variable* var = scope->FindVar(name); - CHECK(var != nullptr) << "var[" << name << "] not found"; + PADDLE_ENFORCE_NOT_NULL( + var, + phi::errors::NotFound("Variable (var) is null. var[%s] not found.", + name)); + phi::DenseTensor* tensor = var->GetMutable(); float* g = tensor->data(); paddle::ps::Region reg(g, tensor->numel());