Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Error Message No. 95,133,220,223,225,240,279,290-291,386,388,398-400,402,411,412】[BUAA]将CHECK宏进行替换 #67048

Merged
merged 27 commits into from
Aug 8, 2024
Merged
Show file tree
Hide file tree
Changes from 26 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions paddle/cinn/ir/buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

#include "paddle/cinn/common/common.h"
#include "paddle/cinn/ir/ir.h"
#include "paddle/common/enforce.h"

namespace cinn {
namespace ir {
Expand Down Expand Up @@ -108,10 +109,15 @@ class _Buffer_ : public ExprNode<_Buffer_> {

static Buffer Make(const std::string& name,
const std::vector<Expr>& shape = {});

static Buffer Make(const std::string& name, Type type) {
CHECK(!type.is_void());
CHECK(!type.is_unk());
PADDLE_ENFORCE_EQ(!type.is_void(),
true,
::common::errors::InvalidArgument(
"Input argument `type` should not be void"));
PADDLE_ENFORCE_EQ(!type.is_unk(),
true,
::common::errors::InvalidArgument(
"Invalid input argument `type` type"));
auto n = make_shared<_Buffer_>();
n->name = name;
n->dtype = type;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,20 @@ inline ExprVec GetExprVecFromData(const ShapeOrData &shapeordata) {
TensorListExprs list =
shapeordata.dyn_cast<symbol::TensorListShapeOrDataDimExprs>();
for (size_t i = 0; i < list.size(); i++) {
CHECK(list.at(i).data().has_value());
PADDLE_ENFORCE_EQ(list.at(i).data().has_value(),
true,
common::errors::InvalidArgument(
"i-th element of list has no value, please check"));
for (auto expr : list.at(i).data().value()) {
result.emplace_back(expr);
}
}
return result;
} else {
CHECK(shapeordata.data().has_value());
PADDLE_ENFORCE_EQ(shapeordata.data().has_value(),
true,
common::errors::InvalidArgument(
"Input `shapeordata.data` is empty, please check"));
return shapeordata.data().value();
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1268,7 +1268,11 @@ bool SplitOpInferSymbolicShape(pir::Operation *op,
const auto &x_dims_sym = x_shape_or_data.shape();

// axis
CHECK(op->operand_source(2).defining_op()->isa<paddle::dialect::FullOp>());
PADDLE_ENFORCE_EQ(
op->operand_source(2).defining_op()->isa<paddle::dialect::FullOp>(),
true,
common::errors::InvalidArgument(
"Invalid input args : axis, pleace check"));

int64_t axis = op->operand_source(2)
.defining_op<paddle::dialect::FullOp>()
Expand Down
5 changes: 4 additions & 1 deletion paddle/fluid/pir/dialect/operator/ir/op_dialect.cc
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,10 @@ struct SliceOpInferSymbolicShapeInterfaceModel
op->attributes().at("index").dyn_cast<pir::Int32Attribute>().data();
const auto& input_shape =
infer_context->GetShapeOrDataForValue(op->operand_source(0));
CHECK(input_shape.isa<symbol::TensorListShapeOrDataDimExprs>());
PADDLE_ENFORCE_EQ(input_shape.isa<symbol::TensorListShapeOrDataDimExprs>(),
true,
common::errors::InvalidArgument(
"Input shape can not be converted, please check"));
const symbol::TensorListShapeOrDataDimExprs& data_shape_list =
input_shape.dyn_cast<symbol::TensorListShapeOrDataDimExprs>();
const symbol::TensorShapeOrDataDimExprs& output_shape =
Expand Down
3 changes: 2 additions & 1 deletion paddle/phi/kernels/fusion/xpu/blha_get_max_len_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ void GetMaxLenTensor(const Context& dev_ctx,
max_len_tensor_data,
{bsz},
{0});
PD_CHECK(r == 0, "baidu::xpu::api::reduce_max failed.");
PADDLE_ENFORCE_EQ(
r, 0, common::errors::Fatal("baidu::xpu::api::reduce_max failed."));
MemcpyD2HKernel(dev_ctx, max_len_tensor, 0, out);
}

Expand Down
30 changes: 24 additions & 6 deletions paddle/phi/kernels/fusion/xpu/weight_only_linear_kernel_xpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,11 @@ void WeightOnlyLinearKernel(const Context& dev_ctx,
const int32_t arch,
const int32_t group_size,
DenseTensor* out) {
PD_CHECK(weight_dtype == "int8",
"WeightOnlyLinearKernel xpu just support int8 weight only");
PADDLE_ENFORCE_EQ(
weight_dtype,
"int8",
common::errors::Fatal(
"WeightOnlyLinearKernel xpu just support int8 weight only"));
phi::XPUPlace place(phi::backends::xpu::GetXPUCurrentDeviceId());
auto xpu_ctx = static_cast<const phi::XPUContext*>(&dev_ctx);
dev_ctx.template Alloc<T>(out);
Expand Down Expand Up @@ -55,14 +58,21 @@ void WeightOnlyLinearKernel(const Context& dev_ctx,
false,
weight_dtype == "int8" ? 127.f : 7.f,
0.f);
PD_CHECK(r == 0, "scale failed");
PADDLE_ENFORCE_EQ(
r,
0,
common::errors::Fatal(
"scale failed, scale related variable `r` is %d", r));
r = baidu::xpu::api::cast_v2<XPUType, float>(
xpu_ctx->x_context(),
reinterpret_cast<const XPUType*>(
max_value_fp16.data<phi::dtype::float16>()),
max_value.data<float>(),
max_value.numel());
PD_CHECK(r == 0, "cast_v2 failed");
PADDLE_ENFORCE_EQ(r,
0,
common::errors::Fatal(
"cast_v2 failed, related variable `r` is %d", r));
} else if (weight_scale.dtype() == phi::DataType::FLOAT32) {
r = baidu::xpu::api::scale(xpu_ctx->x_context(),
weight_scale.data<float>(),
Expand All @@ -71,7 +81,10 @@ void WeightOnlyLinearKernel(const Context& dev_ctx,
false,
weight_dtype == "int8" ? 127.f : 7.f,
0.f);
PD_CHECK(r == 0, "scale failed");
PADDLE_ENFORCE_EQ(r,
0,
common::errors::Fatal(
"scale failed, related variable `r` is %d", r));
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Only support that weight scale as type float32 ot float16."));
Expand Down Expand Up @@ -115,7 +128,12 @@ void WeightOnlyLinearKernel(const Context& dev_ctx,
: nullptr,
baidu::xpu::api::Activation_t::LINEAR,
max_value.data<float>());
PD_CHECK(r == 0, "baidu::xpu::api::gpt_fc_fusion failed.");
PADDLE_ENFORCE_EQ(r,
0,
common::errors::Fatal(
"baidu::xpu::api::gpt_fc_fusion failed, related "
"variable `r` is %d",
r));
} else if (weight_dtype == "int4") {
PD_THROW("only support int8 weight only now");
}
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/gpu/shuffle_batch_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ struct CacheAllocator {
VLOG(2) << "deallocate ";
allocation_map_type::iterator iter = busy_allocation_.find(ptr);
CHECK(iter != busy_allocation_.end());
PADDLE_ENFORCE_NE(iter,
busy_allocation_.end(),
common::errors::InvalidArgument(
"Deallocate failed, can not find right position"));
busy_allocation_.erase(iter);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,13 @@ void InferSymExprForOp(Operation* op,
std::vector<symbol::ShapeOrDataDimExprs> cached_result_shape_or_data =
infer_context->GetOpInferSymbolicShapeCache(op_infer_cache_key)
.value();
CHECK(cached_result_shape_or_data.size() == op->num_results());
PADDLE_ENFORCE_EQ(cached_result_shape_or_data.size(),
op->num_results(),
common::errors::Fatal(
"Cached number of result %u is not equal to the "
"given number of output %u",
cached_result_shape_or_data.size(),
op->num_results()));
for (uint32_t i = 0; i < op->num_results(); ++i) {
infer_context->SetShapeOrDataForValue(op->result(i),
cached_result_shape_or_data[i]);
Expand Down
10 changes: 7 additions & 3 deletions test/auto_parallel/custom_op/custom_relu_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,18 @@
#include "paddle/phi/api/ext/spmd_infer.h"
#include "paddle/phi/infermeta/spmd_rules/rules.h"

#define CHECK_CPU_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
#define CHECK_CPU_INPUT(x) \
PADDLE_ENFORCE_EQ( \
x.is_cpu(), true, common::errors::Fatal(#x " must be a CPU Tensor."))

template <typename data_t>
void relu_cpu_forward_kernel(const data_t* x_data,
data_t* out_data,
int64_t x_numel) {
PD_CHECK(x_data != nullptr, "x_data is nullptr.");
PD_CHECK(out_data != nullptr, "out_data is nullptr.");
PADDLE_ENFORCE_NE(
x_data, nullptr, common::errors::Fatal("x_data is nullptr."));
PADDLE_ENFORCE_NE(
out_data, nullptr, phi::errors::Fatal("out_data is nullptr."));
for (int64_t i = 0; i < x_numel; ++i) {
out_data[i] = std::max(static_cast<data_t>(0.), x_data[i]);
}
Expand Down
36 changes: 30 additions & 6 deletions test/cpp/eager/data_structure_tests/grad_node_info_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,11 @@ TEST(GradNodeInfo, GradSlotMeta) {
auto grad_slot = egr::GradSlotMeta();
VLOG(6) << "Set SetStopGradient";
grad_slot.SetStopGradient();
CHECK(grad_slot.IsStopGradient() == true);
PADDLE_ENFORCE_EQ(
grad_slot.IsStopGradient(),
true,
common::errors::Fatal("`grad_slot.IsStopGradient()` should be "
"true, please check related function"));
}

void TestGradNodeBase(bool is_remove_gradient_hook) {
Expand Down Expand Up @@ -80,8 +84,16 @@ void TestGradNodeBase(bool is_remove_gradient_hook) {
grad_test_node0->InputMeta()[1][0].GetTensorMeta().dtype,
meta.dtype,
phi::errors::InvalidArgument("Dtype of input tensor mismatch."));
CHECK(grad_test_node0->OutputMeta()[0][0].IsStopGradient());
CHECK(grad_test_node0->OutputMeta()[1][0].IsStopGradient());
PADDLE_ENFORCE_EQ(grad_test_node0->OutputMeta()[0][0].IsStopGradient(),
true,
common::errors::Fatal(
"`grad_test_node0->OutputMeta()[0][0].IsStopGradient()"
"` should be true, please related function"));
PADDLE_ENFORCE_EQ(grad_test_node0->OutputMeta()[1][0].IsStopGradient(),
true,
common::errors::Fatal(
"`grad_test_node0->OutputMeta()[1][0].IsStopGradient()"
"` should be true, please related function"));
PADDLE_ENFORCE_EQ(
grad_test_node0->OutputMeta()[0][0].GetTensorMeta().dtype,
meta.dtype,
Expand Down Expand Up @@ -160,9 +172,17 @@ TEST(GradNodeInfo, Edge) {
auto auto_grad1 = std::make_shared<egr::AutogradMeta>();
VLOG(6) << "Test Construct Edge";
egr::Edge edge0 = egr::Edge();
CHECK(edge0.IsInitialized() == false);
PADDLE_ENFORCE_EQ(
edge0.IsInitialized(),
false,
common::errors::Fatal("`edge0.IsInitialized()` should be "
"false, please check related function"));
egr::Edge edge1 = egr::Edge(grad_test_node0, size_t(0), size_t(0));
CHECK(edge1.IsInitialized() == true);
PADDLE_ENFORCE_EQ(
edge1.IsInitialized(),
true,
common::errors::Fatal("`edge1.IsInitialized()` should be "
"true, please check related function"));
egr::Edge edge2 =
egr::Edge(grad_test_node0, std::make_pair(size_t(1), size_t(0)));
VLOG(6) << "Test Set Edge's Grad Node";
Expand All @@ -175,7 +195,11 @@ TEST(GradNodeInfo, Edge) {
2UL,
phi::errors::InvalidArgument("Size of input mismatch. Expected 2."));
std::vector<egr::AutogradMeta*> metas = {auto_grad1.get()};
CHECK(grad_node->InputMeta()[0][0].IsStopGradient() == true);
PADDLE_ENFORCE_EQ(
grad_node->InputMeta()[0][0].IsStopGradient(),
true,
common::errors::Fatal("`grad_node->InputMeta()[0][0].IsStopGradient()` "
"should be true, please check related function"));
VLOG(6) << "Test Get/Set Edge Rank Info";
PADDLE_ENFORCE_EQ(
edge2.GetEdgeRankInfo().first,
Expand Down
6 changes: 5 additions & 1 deletion test/cpp/eager/data_structure_tests/tensor_wrapper_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,5 +100,9 @@ TEST(TensorWrapper, Basic) {
// Test Raw recover
paddle::Tensor et3;
auto tw2 = egr::TensorWrapper(et3);
CHECK(tw2.recover().initialized() == false);
PADDLE_ENFORCE_EQ(
tw2.recover().initialized(),
false,
phi::errors::Fatal(
"Variable `tw2` should not be initialized after recover"));
}
10 changes: 7 additions & 3 deletions test/cpp_extension/mix_relu_and_extension.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,18 @@
#include "custom_power.h" // NOLINT
#include "paddle/extension.h"

#define CHECK_CPU_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
#define CHECK_CPU_INPUT(x) \
PADDLE_ENFORCE_EQ( \
x.is_cpu(), true, common::errors::Fatal(#x " must be a CPU Tensor."))

template <typename data_t>
void relu_cpu_forward_kernel(const data_t* x_data,
data_t* out_data,
int64_t x_numel) {
PD_CHECK(x_data != nullptr, "x_data is nullptr.");
PD_CHECK(out_data != nullptr, "out_data is nullptr.");
PADDLE_ENFORCE_NE(
x_data, nullptr, common::errors::Fatal("x_data is nullptr."));
PADDLE_ENFORCE_NE(
out_data, nullptr, phi::errors::Fatal("out_data is nullptr."));
for (int64_t i = 0; i < x_numel; ++i) {
out_data[i] = std::max(static_cast<data_t>(0.), x_data[i]);
}
Expand Down
5 changes: 4 additions & 1 deletion test/deprecated/cpp/inference/analysis/analyzer_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,10 @@ void TestWord2vecPrediction(const std::string& model_path) {
// For simplicity, we set all the slots with the same data.
std::vector<PaddleTensor> slots(4, tensor);
std::vector<PaddleTensor> outputs;
CHECK(predictor->Run(slots, &outputs));
PADDLE_ENFORCE_EQ(
predictor->Run(slots, &outputs),
true,
common::errors::Fatal("Paddle predictor failed runing, please check"));

PADDLE_ENFORCE_EQ(outputs.size(),
1UL,
Expand Down
11 changes: 9 additions & 2 deletions test/deprecated/cpp/inference/api/analyzer_dam_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,15 @@ struct DataRecord {
response.begin() + batch_end);
data.response_mask.assign(response_mask.begin() + batch_iter,
response_mask.begin() + batch_end);
CHECK(!data.response.empty());
CHECK(!data.response_mask.empty());
PADDLE_ENFORCE_EQ(!data.response.empty(),
true,
common::errors::Fatal(
"Variable `data` response is empty, please check"));
PADDLE_ENFORCE_EQ(
!data.response_mask.empty(),
true,
common::errors::Fatal(
"Variable `data` response mask is empty, please check"));
PADDLE_ENFORCE_EQ(data.response.size(),
data.response_mask.size(),
phi::errors::InvalidArgument(
Expand Down
6 changes: 5 additions & 1 deletion test/deprecated/cpp/inference/api/analyzer_rnn2_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,11 @@ struct DataRecord {
link_step_data_all.begin() + batch_end);
// Prepare LoDs
data.lod.push_back(0);
CHECK(!data.link_step_data_all.empty()) << "empty";
PADDLE_ENFORCE_EQ(
!data.link_step_data_all.empty(),
true,
common::errors::InvalidArgument(
"`data.link_step_data_all` is empty, please check"));
for (size_t j = 0; j < data.link_step_data_all.size(); j++) {
for (const auto &d : data.link_step_data_all[j]) {
data.rnn_link_data.push_back(d);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,14 @@ struct DataReader {
tensor.lod.front().push_back(data.size());

tensor.data.Resize(data.size() * sizeof(int64_t));
CHECK(tensor.data.data() != nullptr);
CHECK(data.data() != nullptr);
PADDLE_ENFORCE_NE(
tensor.data.data(),
nullptr,
common::errors::Fatal("Variable `tensor.data.data()` is nullptr"));
PADDLE_ENFORCE_NE(
data.data(),
nullptr,
common::errors::Fatal("Variable `data.data()` is nullptr"));
memcpy(tensor.data.data(), data.data(), data.size() * sizeof(int64_t));
tensor.shape.push_back(data.size());
tensor.shape.push_back(1);
Expand Down
6 changes: 4 additions & 2 deletions test/ipu/custom_ops/leaky_relu_cpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,10 @@

#include "paddle/extension.h"

#define CHECK_INPUT(x) \
PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")
#define CHECK_INPUT(x) \
PADDLE_ENFORCE_EQ(x.place() == paddle::PlaceType::kCPU, \
true, \
common::errors::Fatal(#x " must be a CPU Tensor."))

template <typename data_t>
void leaky_relu_cpu_forward_kernel(const data_t* x_data,
Expand Down