Skip to content

Commit

Permalink
【Error Message No.22】 paddle/cinn/hlir/framework/* (#64651)
Browse files Browse the repository at this point in the history
* fix

* fix

* fix

* fix
  • Loading branch information
enkilee authored May 30, 2024
1 parent f0b0060 commit 10f33a4
Show file tree
Hide file tree
Showing 12 changed files with 181 additions and 63 deletions.
14 changes: 11 additions & 3 deletions paddle/cinn/hlir/framework/graph_compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
#include "paddle/cinn/utils/profiler.h"

#include "paddle/cinn/ast_gen_ius/tensor_group.h"

#include "paddle/common/enforce.h"
namespace cinn {
namespace hlir {
namespace framework {
Expand Down Expand Up @@ -412,7 +412,11 @@ std::vector<ir::LoweredFunc> GetFuncFromImpl(
VLOG(3) << "input_output_nodes.size() is: " << input_output_nodes.size()
<< ", all_arg_tensors.size() is: " << all_arg_tensors.size();
std::vector<ir::LoweredFunc> funcs_after_schedule;
CHECK_GE(funcs.size(), expr_pack.size());
PADDLE_ENFORCE_GE(
funcs.size(),
expr_pack.size(),
phi::errors::InvalidArgument(
"The number of funcs should not less than expr_pack's"));
if (funcs.size() > expr_pack.size() ||
all_arg_tensors.size() > input_output_nodes.size()) {
for (int i = 0; i < funcs.size(); i++) {
Expand All @@ -432,7 +436,11 @@ std::vector<ir::LoweredFunc> GetFuncFromImpl(
PADDLE_THROW(phi::errors::InvalidArgument(
"The number of funcs should not less than expr_pack's"));
}
CHECK_EQ(funcs_after_schedule.size(), expr_pack.size());
PADDLE_ENFORCE_EQ(funcs_after_schedule.size(),
expr_pack.size(),
phi::errors::InvalidArgument(
"The number of funcs after schedule should be equal to "
"expr_pack's"));
std::vector<ir::LoweredFunc> res;
for (int i = 0; i < funcs_after_schedule.size(); i++) {
cinn::common::DefaultDeviceTarget().arch.Match(
Expand Down
18 changes: 13 additions & 5 deletions paddle/cinn/hlir/framework/instruction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
#include "paddle/cinn/hlir/framework/accuracy_checker.h"
#include "paddle/cinn/runtime/flags.h"
#include "paddle/cinn/utils/profiler.h"

#include "paddle/common/enforce.h"
PD_DECLARE_bool(cinn_sync_run);
PD_DECLARE_string(cinn_self_check_accuracy);

Expand Down Expand Up @@ -90,8 +90,10 @@ void Instruction::UpdateArgsCache(

if (name2podargs != nullptr) {
for (const auto& arg : all_args) {
CHECK_NE(name2podargs->count(arg), 0)
<< "Argument [" << arg << "] not found in the name2podargs";
PADDLE_ENFORCE_NE(name2podargs->count(arg),
0,
phi::errors::InvalidArgument(
"Argument not found in the name2podargs"));
VLOG(5) << "Get a argument, name=" << arg
<< ",type_code=" << name2podargs->at(arg).type_code();
builder.Add(name2podargs->at(arg));
Expand Down Expand Up @@ -354,11 +356,17 @@ void Instruction::Run(
pod_args[1],
static_cast<cudaStream_t>(stream));
} else if (function_name_ == "softmax") {
CHECK_EQ(pod_args.size(), 3);
PADDLE_ENFORCE_EQ(pod_args.size(),
3,
phi::errors::InvalidArgument(
"The pod_args size of softmax should be 3"));
runtime::cuda::cinn_gpu_cudnn_softmax(
attrs, pod_args[0], pod_args[1], static_cast<cudaStream_t>(stream));
} else if (function_name_ == "mul") {
CHECK_EQ(pod_args.size(), 4);
PADDLE_ENFORCE_EQ(
pod_args.size(),
4,
phi::errors::InvalidArgument("The pod_args size of mul should be 4"));
runtime::cuda::cinn_gpu_cublas_mul(attrs,
pod_args[0],
pod_args[1],
Expand Down
21 changes: 17 additions & 4 deletions paddle/cinn/hlir/framework/instruction.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
#endif
#include "paddle/cinn/utils/string.h"
#include "paddle/cinn/utils/timer.h"

#include "paddle/common/enforce.h"
namespace cinn {
namespace hlir {
namespace framework {
Expand Down Expand Up @@ -89,16 +89,29 @@ class Instruction {

void PreRun(
const std::map<std::string, cinn_pod_value_t>* name2podargs = nullptr) {
CHECK_EQ(fn_ptrs_.size(), 4);
PADDLE_ENFORCE_EQ(
fn_ptrs_.size(),
4,
phi::errors::InvalidArgument("The number of functions should be 4"));
if (fn_ptrs_.size() > 1 && fn_ptrs_.size() != in_args_.size()) {
out_args_.back()[0] = out_args_.front()[0];
out_args_.erase(out_args_.begin());
in_args_.erase(in_args_.begin());
}
UpdateArgsCache(name2podargs);

CHECK_EQ(fn_ptrs_.size(), in_args_.size());
CHECK_EQ(fn_ptrs_.size(), out_args_.size());
PADDLE_ENFORCE_EQ(
fn_ptrs_.size(),
in_args_.size(),
phi::errors::InvalidArgument(
"The number of functions should be equal to the number of "
"in_args"));
PADDLE_ENFORCE_EQ(
fn_ptrs_.size(),
out_args_.size(),
phi::errors::InvalidArgument(
"The number of functions should be equal to the number of "
"out_args"));

int flag = -1;
void* stream = nullptr;
Expand Down
31 changes: 22 additions & 9 deletions paddle/cinn/hlir/framework/node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,10 @@
#include <algorithm>

#include "paddle/cinn/common/context.h"

#include "paddle/cinn/hlir/dialect/operator/ir/symbol_bindings.h"
#include "paddle/cinn/hlir/framework/node.h"

#include "paddle/common/enforce.h"
namespace cinn {
namespace hlir {
namespace framework {
Expand Down Expand Up @@ -90,8 +91,12 @@ std::ostream& operator<<(std::ostream& os, const NodeAttr& node_attr) {
bool edge_index_compare(
const cinn::common::Shared<cinn::common::GraphEdge>& a,
const cinn::common::Shared<cinn::common::GraphEdge>& b) {
CHECK_NOTNULL(a.get());
CHECK_NOTNULL(b.get());
PADDLE_ENFORCE_NOT_NULL(
a.get(),
phi::errors::InvalidArgument("The input edge should not be nullptr."));
PADDLE_ENFORCE_NOT_NULL(
b.get(),
phi::errors::InvalidArgument("The input edge should not be nullptr."));
return a->index() < b->index();
}

Expand All @@ -100,9 +105,13 @@ Node::inlinks_in_order() const {
std::vector<cinn::common::Shared<cinn::common::GraphEdge>> ordered_links;
for (auto& in_edge : this->inlinks()) {
ordered_links.push_back(in_edge);
CHECK_GE(in_edge->index(), 0)
<< "The index of a node's inlinks should be >= 0! Now index is: "
<< in_edge->index() << ". Please check.";
PADDLE_ENFORCE_GE(
in_edge->index(),
0,
phi::errors::InvalidArgument(
"The index of a node's inlinks should be >= 0! Now index is: %d. "
"Please check.",
in_edge->index()));
}
std::sort(ordered_links.begin(), ordered_links.end(), edge_index_compare);
return ordered_links;
Expand All @@ -113,9 +122,13 @@ Node::outlinks_in_order() const {
std::vector<cinn::common::Shared<cinn::common::GraphEdge>> ordered_links;
for (auto& out_edge : this->outlinks()) {
ordered_links.push_back(out_edge);
CHECK_GE(out_edge->index(), 0)
<< "The index of a node's outlinks should be >= 0! Now index is: "
<< out_edge->index() << ". Please check.";
PADDLE_ENFORCE_GE(
out_edge->index(),
0,
phi::errors::InvalidArgument(
"The index of a node's outlinks should be >= 0! Now index is: %d. "
"Please check.",
out_edge->index()));
}
std::sort(ordered_links.begin(), ordered_links.end(), edge_index_compare);
return ordered_links;
Expand Down
9 changes: 5 additions & 4 deletions paddle/cinn/hlir/framework/op.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
#include "paddle/cinn/common/macros.h"
#include "paddle/cinn/utils/registry.h"
#include "paddle/cinn/utils/type_defs.h"

#include "paddle/common/enforce.h"
template <typename R, typename... Args>
inline auto MakeOpFunction(R (*func)(Args...)) {
return std::function<R(Args...)>(func);
Expand Down Expand Up @@ -209,9 +209,10 @@ template <typename ValueType>
const ValueType& OpValueType<ValueType>::operator[](const Operator* op) const {
CHECK(op) << "The input op is nullptr and it is invalid! Please check again.";
const uint32_t idx = op->index;
CHECK_LT(idx, data.size())
<< "Attribute " << attr_name << " has not been registered for Operator "
<< op->name;
PADDLE_ENFORCE_LT(idx,
data.size(),
phi::errors::InvalidArgument(
"Attribute has not been registered for Operator"));
return data[idx];
}

Expand Down
7 changes: 5 additions & 2 deletions paddle/cinn/hlir/framework/op_lowering_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#include "paddle/cinn/backends/nvrtc/nvrtc_util.h"
#include "paddle/cinn/common/target.h"
#include "paddle/cinn/frontend/decomposer/test_helper.h"

#include "paddle/common/enforce.h"
namespace cinn {
namespace hlir {
namespace framework {
Expand Down Expand Up @@ -75,7 +75,10 @@ void Compile(NetBuilder& net_builder) { // NOLINT
auto op_lowerer = CreateOpLowerer(dtype_dict, shape_dict, target);
for (auto& fusion_op : graph->fusion_groups) {
auto lowered_func = op_lowerer.Lower(fusion_op);
CHECK_EQ(lowered_func.size(), 1);
PADDLE_ENFORCE_EQ(
lowered_func.size(),
1,
phi::errors::InvalidArgument("The size of lowered_func should be 1."));
CodeGen(lowered_func[0]);
}
}
Expand Down
46 changes: 36 additions & 10 deletions paddle/cinn/hlir/framework/op_lowering_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
#include "paddle/cinn/common/float16.h"
#endif
#include <queue>

#include "paddle/common/enforce.h"
namespace cinn {
namespace hlir {
namespace framework {
Expand Down Expand Up @@ -198,7 +198,9 @@ std::vector<int> GetInputShape(
<< node->id() << "\"";

auto* producer_data = in_links.front()->source()->safe_as<NodeData>();
CHECK_NOTNULL(producer_data);
PADDLE_ENFORCE_NOT_NULL(producer_data,
phi::errors::InvalidArgument(
"The input node data should not be nullptr."));
return shape_dict.at(producer_data->id());
}

Expand Down Expand Up @@ -585,7 +587,10 @@ void LoopOrderAssignReduce(ir::IRSchedule& ir_sch, // NOLINT
ir_sch.Split(loops[index], {-1, idx});
break;
}
CHECK_GT(idx, 1);
PADDLE_ENFORCE_GT(idx,
1,
phi::errors::InvalidArgument(
"Cannot find the split factor for loop."));
}
}

Expand Down Expand Up @@ -777,7 +782,11 @@ void LoopAssignReduceWithLast(ir::IRSchedule& ir_sch, // NOLINT
--idx;
} while (idx >= max_num_threads / 2);
// if can't be divide by(1024, 512), it's shouldn't be fused.
CHECK_GE(idx, max_num_threads / 2) << "Check bounds exist, can't fuse!";
PADDLE_ENFORCE_GE(
idx,
max_num_threads / 2,
phi::errors::InvalidArgument(
"Error! Can't be divide by(1024, 512), Please check!"));
} else {
int axis = axes[index];
int prefix = inshape[axis];
Expand All @@ -788,8 +797,11 @@ void LoopAssignReduceWithLast(ir::IRSchedule& ir_sch, // NOLINT
ir_sch.Split(block_name, axis, {-1, idx});
break;
}
CHECK_GT(idx, (max_num_threads / 2) / tail)
<< "Error, it's shouldn't fuse!";
PADDLE_ENFORCE_GT(
idx,
(max_num_threads / 2) / tail,
phi::errors::InvalidArgument(
"Error! Can't be divide by(1024, 512), Please check!"));
}
}
LoopOrderAssignReduce(ir_sch, block_name, first_axes, target);
Expand Down Expand Up @@ -1227,8 +1239,16 @@ void MergeLoops(ir::Expr root,
if (index < 0) {
return;
}
CHECK_GT(src.size(), index) << "\nindex -> " << index << "\n" << src[0];
CHECK_GT(dst.size(), index) << "\nindex -> " << index << "\n" << dst[0];
PADDLE_ENFORCE_GT(
src.size(),
index,
phi::errors::InvalidArgument(
"The index of src should be less than the size of src."));
PADDLE_ENFORCE_GT(
dst.size(),
index,
phi::errors::InvalidArgument(
"The index of dst should be less than the size of dst."));

if (src[0] == dst[0]) {
return;
Expand Down Expand Up @@ -1393,7 +1413,10 @@ void MergeReduceToReduce(
auto n_loops = ir_sch.GetLoops(n_tensor->name + "__reduce_init");
auto m_loops = ir_sch.GetLoops(m_tensor->name + "__reduce_init");

CHECK_EQ(n_loops.size(), m_loops.size());
PADDLE_ENFORCE_EQ(n_loops.size(),
m_loops.size(),
phi::errors::InvalidArgument(
"The size of loops should be equal."));
MergeLoops(ir_sch.GetModule().GetExprs().at(0),
n_loops,
m_loops,
Expand Down Expand Up @@ -1472,7 +1495,10 @@ void MergeReduceToReduce(

auto n_loops = ir_sch.GetLoops(n_tensor->name);
auto m_loops = ir_sch.GetLoops(m_tensor->name);
CHECK_EQ(n_loops.size(), m_loops.size());
PADDLE_ENFORCE_EQ(
n_loops.size(),
m_loops.size(),
phi::errors::InvalidArgument("The size of loops should be equal."));

std::vector<ir::Var> src_vars;
std::vector<ir::Expr> dst_vars;
Expand Down
11 changes: 7 additions & 4 deletions paddle/cinn/hlir/framework/pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

#include "paddle/cinn/hlir/framework/visualize_helper.h"
#include "paddle/cinn/hlir/pass/use_pass.h"

#include "paddle/common/enforce.h"
namespace cinn {
namespace hlir {
namespace framework {
Expand All @@ -32,9 +32,12 @@ void ApplyPasses(Graph* g, const std::vector<std::string>& passes) {
for (auto* r : fpass) {
cinn::hlir::framework::PassPrinter::GetInstance()->PassBegin(r->name, g);
for (auto& dep : r->graph_attr_dependency) {
CHECK_NE(g->attrs.count(dep), 0)
<< "To apply pass [" << r->name << "], Graph's attribute [" << dep
<< "] is required, but it is not available.";
PADDLE_ENFORCE_NE(
g->attrs.count(dep),
0,
phi::errors::InvalidArgument("To apply pass, Graph's attribute is "
"required, but it is not available."));

if (g->attrs.count(dep) == 0) {
auto* pass_dep = FindPassDep(dep);
CHECK(!pass_dep) << "And the attribute is provided by pass ["
Expand Down
7 changes: 5 additions & 2 deletions paddle/cinn/hlir/framework/pir/compilation_task.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
#include "paddle/cinn/common/target.h"
#include "paddle/cinn/hlir/framework/op_lowering.h"
#include "paddle/common/enforce.h"

namespace cinn {
namespace hlir {
namespace framework {
Expand Down Expand Up @@ -61,7 +60,11 @@ void CompilationTask::Lowering() {
std::shared_ptr<pir::CompilationResult> CompilationTask::CodegenAndJit() {
ir::Module::Builder builder(cinn::common::UniqName("module"),
context_->target_);
CHECK_EQ(context_->predicates_.size(), context_->lowered_funcs_.size());
PADDLE_ENFORCE_EQ(context_->predicates_.size(),
context_->lowered_funcs_.size(),
phi::errors::InvalidArgument(
"The size of predicates and lowered_funcs should be "
"the same."));
for (const ir::Expr& predicate : context_->predicates_) {
builder.AddPredicate(predicate);
}
Expand Down
8 changes: 5 additions & 3 deletions paddle/cinn/hlir/framework/pir/group.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
// limitations under the License.

#include "paddle/cinn/hlir/framework/pir/group.h"

#include "paddle/common/enforce.h"
namespace cinn {
namespace hlir {
namespace framework {
Expand All @@ -22,8 +22,10 @@ namespace pir {
std::shared_ptr<Group> Group::Clone(::pir::Block* target_block,
::pir::IrMapping& ir_mapping,
const Options& option) const {
CHECK_EQ(option.OnlyCloneOps(), true)
<< "Only Support Clone Group ops information.";
PADDLE_ENFORCE_EQ(option.OnlyCloneOps(),
true,
phi::errors::InvalidArgument(
"OnlyCloneOps is the only supported option."));
std::vector<::pir::Operation*> new_ops;
// Mapper from original to new ops.
std::unordered_map<::pir::Operation*, ::pir::Operation*> ops_mapper;
Expand Down
Loading

0 comments on commit 10f33a4

Please sign in to comment.