Skip to content

Commit

Permalink
[skip ci] enforce code format
Browse files Browse the repository at this point in the history
  • Loading branch information
taichi-gardener committed May 13, 2020
1 parent 3d7424b commit f284688
Show file tree
Hide file tree
Showing 14 changed files with 98 additions and 94 deletions.
6 changes: 3 additions & 3 deletions docs/hello.rst
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,9 @@ Let's dive into this simple Taichi program.

import taichi as ti
-------------------
Taichi is a domain-specific language (DSL) embedded in Python. To make Taichi as easy to use as a Python package,
we have done heavy engineering with this goal in mind - letting every Python programmer write Taichi codes with
minimal learning effort. You can even use your favorite Python package management system, Python IDEs and other
Taichi is a domain-specific language (DSL) embedded in Python. To make Taichi as easy to use as a Python package,
we have done heavy engineering with this goal in mind - letting every Python programmer write Taichi codes with
minimal learning effort. You can even use your favorite Python package management system, Python IDEs and other
Python packages in conjunction with Taichi.

Portability
Expand Down
2 changes: 1 addition & 1 deletion docs/utilities.rst
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ For example, this is part of the output by ``ti regression`` after enabling cons
The suggested workflow for **the PR author** to run the regression tests is:

* When a performance related PR is ready, checkout that PR locally.

* Run ``ti benchmark && ti regression`` to obtain the result.

* Decide wheater to approve or request change, depends on the result.
Expand Down
14 changes: 8 additions & 6 deletions python/taichi/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,10 @@ def display_benchmark_regression(xd, yd):
def parse_dat(file):
dict = {}
for line in open(file).readlines():
try: a, b = line.strip().split(':')
except: continue
try:
a, b = line.strip().split(':')
except:
continue
dict[a.strip()] = int(float(b))
return dict

Expand Down Expand Up @@ -267,7 +269,9 @@ def main(debug=False):
os.chdir(baseline_dir)
print('[benchmark] pushing baseline data...')
os.system('git add .')
os.system(f"git commit -m 'update baseline for taichi@{ti.core.get_commit_hash()}'")
os.system(
f"git commit -m 'update baseline for taichi@{ti.core.get_commit_hash()}'"
)
os.system('git push')
os.chdir(old_cwd)
print('[benchmark] baseline data uploaded')
Expand All @@ -279,9 +283,7 @@ def main(debug=False):
print('[benchmark] fetching latest baseline...')
os.system('git pull')
os.chdir(old_cwd)
display_benchmark_regression(
baseline_dir,
output_dir)
display_benchmark_regression(baseline_dir, output_dir)
elif mode == "build":
ti.core.build()
elif mode == "format":
Expand Down
30 changes: 16 additions & 14 deletions taichi/analysis/verify.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,12 @@ class IRVerifier : public BasicStmtVisitor {
TI_ASSERT(stmt->loop);
if (stmt->loop->is<OffloadedStmt>()) {
TI_ASSERT(stmt->loop->as<OffloadedStmt>()->task_type ==
OffloadedStmt::TaskType::struct_for ||
stmt->loop->as<OffloadedStmt>()->task_type ==
OffloadedStmt::TaskType::range_for);
OffloadedStmt::TaskType::struct_for ||
stmt->loop->as<OffloadedStmt>()->task_type ==
OffloadedStmt::TaskType::range_for);
} else {
TI_ASSERT(stmt->loop->is<StructForStmt>() ||
stmt->loop->is<RangeForStmt>());
stmt->loop->is<RangeForStmt>());
}
}

Expand All @@ -92,16 +92,18 @@ class IRVerifier : public BasicStmtVisitor {
if (for_stmt->loop_var) {
TI_ASSERT(for_stmt->loop_var->is<AllocaStmt>());
TI_ASSERT_INFO(irpass::analysis::gather_statements(
for_stmt->loop_var->parent,
[&](Stmt *s) {
if (auto store = s->cast<LocalStoreStmt>())
return store->ptr == for_stmt->loop_var;
else if (auto atomic = s->cast<AtomicOpStmt>()) {
return atomic->dest == for_stmt->loop_var;
} else {
return false;
}
}).empty(), "loop_var of {} modified", for_stmt->id);
for_stmt->loop_var->parent,
[&](Stmt *s) {
if (auto store = s->cast<LocalStoreStmt>())
return store->ptr == for_stmt->loop_var;
else if (auto atomic = s->cast<AtomicOpStmt>()) {
return atomic->dest == for_stmt->loop_var;
} else {
return false;
}
})
.empty(),
"loop_var of {} modified", for_stmt->id);
}
for_stmt->body->accept(this);
}
Expand Down
12 changes: 5 additions & 7 deletions taichi/backends/metal/codegen_metal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -454,17 +454,15 @@ class KernelCodegen : public IRVisitor {
TI_ASSERT(for_stmt->width() == 1);
auto loop_var_name = for_stmt->raw_name();
if (!for_stmt->reversed) {
emit("for (int {}_ = {}; {}_ < {}; {}_ = {}_ + {}) {{",
loop_var_name, for_stmt->begin->raw_name(),
loop_var_name, for_stmt->end->raw_name(),
loop_var_name, loop_var_name, 1);
emit("for (int {}_ = {}; {}_ < {}; {}_ = {}_ + {}) {{", loop_var_name,
for_stmt->begin->raw_name(), loop_var_name,
for_stmt->end->raw_name(), loop_var_name, loop_var_name, 1);
emit(" int {} = {}_;", loop_var_name, loop_var_name);
} else {
// reversed for loop
emit("for (int {}_ = {} - 1; {}_ >= {}; {}_ = {}_ - {}) {{",
loop_var_name, for_stmt->end->raw_name(),
loop_var_name, for_stmt->begin->raw_name(),
loop_var_name, loop_var_name, 1);
loop_var_name, for_stmt->end->raw_name(), loop_var_name,
for_stmt->begin->raw_name(), loop_var_name, loop_var_name, 1);
emit(" int {} = {}_;", loop_var_name, loop_var_name);
}
for_stmt->body->accept(this);
Expand Down
5 changes: 3 additions & 2 deletions taichi/backends/metal/shaders/helpers.metal.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,9 @@ STR(

inline int ifloordiv(int lhs, int rhs) {
const int intm = (lhs / rhs);
return (((lhs < 0) != (rhs < 0) && lhs &&
(rhs * intm != lhs)) ? (intm - 1) : intm);
return (((lhs < 0) != (rhs < 0) && lhs && (rhs * intm != lhs))
? (intm - 1)
: intm);
}

int32_t pow_i32(int32_t x, int32_t n) {
Expand Down
43 changes: 21 additions & 22 deletions taichi/backends/opengl/codegen_opengl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -414,14 +414,13 @@ class KernelGen : public IRVisitor {
emit("{} {} = atan({}, {});", dt_name, bin_name, lhs_name, rhs_name);
}
return;
} else if (bin->op_type == BinaryOpType::pow
&& is_integral(bin->rhs->element_type())) {
// The GLSL `pow` is not so percise for `int`... e.g.: `pow(5, 3)` obtains 124
// So that we have to use some hack to make it percise.
// Discussion: https://github.com/taichi-dev/taichi/pull/943#issuecomment-626354902
} else if (bin->op_type == BinaryOpType::pow &&
is_integral(bin->rhs->element_type())) {
// The GLSL `pow` is not so percise for `int`... e.g.: `pow(5, 3)` obtains
// 124 So that we have to use some hack to make it percise. Discussion:
// https://github.com/taichi-dev/taichi/pull/943#issuecomment-626354902
emit("{} {} = {}(fast_pow_{}({}, {}));", dt_name, bin_name, dt_name,
data_type_short_name(bin->lhs->element_type()),
lhs_name, rhs_name);
data_type_short_name(bin->lhs->element_type()), lhs_name, rhs_name);
used.fast_pow = true;
return;
}
Expand Down Expand Up @@ -603,9 +602,9 @@ class KernelGen : public IRVisitor {

void visit(LoopIndexStmt *stmt) override {
TI_ASSERT(stmt->loop->is<RangeForStmt>() ||
(stmt->loop->is<OffloadedStmt>() &&
stmt->loop->as<OffloadedStmt>()->task_type ==
OffloadedStmt::TaskType::range_for));
(stmt->loop->is<OffloadedStmt>() &&
stmt->loop->as<OffloadedStmt>()->task_type ==
OffloadedStmt::TaskType::range_for));
TI_ASSERT(stmt->index == 0); // TODO: multiple indices
emit("int {} = _itv;", stmt->short_name());
}
Expand All @@ -614,17 +613,15 @@ class KernelGen : public IRVisitor {
TI_ASSERT(for_stmt->width() == 1);
auto loop_var_name = for_stmt->raw_name();
if (!for_stmt->reversed) {
emit("for (int {}_ = {}; {}_ < {}; {}_ = {}_ + {}) {{",
loop_var_name, for_stmt->begin->raw_name(),
loop_var_name, for_stmt->end->raw_name(),
loop_var_name, loop_var_name, 1);
emit("for (int {}_ = {}; {}_ < {}; {}_ = {}_ + {}) {{", loop_var_name,
for_stmt->begin->raw_name(), loop_var_name,
for_stmt->end->raw_name(), loop_var_name, loop_var_name, 1);
emit(" int {} = {}_;", loop_var_name, loop_var_name);
} else {
// reversed for loop
emit("for (int {}_ = {} - 1; {}_ >= {}; {}_ = {}_ - {}) {{",
loop_var_name, for_stmt->end->raw_name(),
loop_var_name, for_stmt->begin->raw_name(),
loop_var_name, loop_var_name, 1);
loop_var_name, for_stmt->end->raw_name(), loop_var_name,
for_stmt->begin->raw_name(), loop_var_name, loop_var_name, 1);
emit(" int {} = {}_;", loop_var_name, loop_var_name);
}
for_stmt->body->accept(this);
Expand Down Expand Up @@ -722,12 +719,14 @@ void OpenglCodeGen::lower() {
auto ir = kernel_->ir;
auto &config = kernel_->program.config;
config.demote_dense_struct_fors = true;
auto res = irpass::compile_to_offloads(ir, config,
/*vectorize=*/false, kernel_->grad,
/*ad_use_stack=*/false, config.print_ir,
/*lower_global_access*/true);
auto res =
irpass::compile_to_offloads(ir, config,
/*vectorize=*/false, kernel_->grad,
/*ad_use_stack=*/false, config.print_ir,
/*lower_global_access*/ true);
global_tmps_buffer_size_ = res.total_size;
TI_TRACE("[glsl] Global temporary buffer size {} B", global_tmps_buffer_size_);
TI_TRACE("[glsl] Global temporary buffer size {} B",
global_tmps_buffer_size_);
#ifdef _GLSL_DEBUG
irpass::print(ir);
#endif
Expand Down
20 changes: 9 additions & 11 deletions taichi/codegen/codegen_llvm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -808,15 +808,13 @@ void CodeGenLLVM::create_naive_range_for(RangeForStmt *for_stmt) {
builder->SetInsertPoint(loop_test);
llvm::Value *cond;
if (!for_stmt->reversed) {
cond =
builder->CreateICmp(llvm::CmpInst::Predicate::ICMP_SLT,
builder->CreateLoad(loop_var),
llvm_val[for_stmt->end]);
cond = builder->CreateICmp(llvm::CmpInst::Predicate::ICMP_SLT,
builder->CreateLoad(loop_var),
llvm_val[for_stmt->end]);
} else {
cond =
builder->CreateICmp(llvm::CmpInst::Predicate::ICMP_SGE,
builder->CreateLoad(loop_var),
llvm_val[for_stmt->begin]);
cond = builder->CreateICmp(llvm::CmpInst::Predicate::ICMP_SGE,
builder->CreateLoad(loop_var),
llvm_val[for_stmt->begin]);
}
builder->CreateCondBr(cond, body, after_loop);
}
Expand Down Expand Up @@ -1413,13 +1411,13 @@ void CodeGenLLVM::visit(LoopIndexStmt *stmt) {
TI_ASSERT(&module->getContext() == tlctx->get_this_thread_context());
if (stmt->loop->is<OffloadedStmt>() &&
stmt->loop->as<OffloadedStmt>()->task_type ==
OffloadedStmt::TaskType::struct_for) {
OffloadedStmt::TaskType::struct_for) {
llvm_val[stmt] = builder->CreateLoad(builder->CreateGEP(
current_coordinates, {tlctx->get_constant(0), tlctx->get_constant(0),
tlctx->get_constant(stmt->index)}));
} else {
llvm_val[stmt] = builder->CreateLoad(
loop_vars_llvm[stmt->loop][stmt->index]);
llvm_val[stmt] =
builder->CreateLoad(loop_vars_llvm[stmt->loop][stmt->index]);
}
}

Expand Down
3 changes: 2 additions & 1 deletion taichi/ir/ir.h
Original file line number Diff line number Diff line change
Expand Up @@ -835,7 +835,8 @@ class Block : public IRNode {
void insert(VecStatement &&stmt, int location = -1);
void replace_statements_in_range(int start, int end, VecStatement &&stmts);
void set_statements(VecStatement &&stmts);
void replace_with(Stmt *old_statement, std::unique_ptr<Stmt> &&new_statement,
void replace_with(Stmt *old_statement,
std::unique_ptr<Stmt> &&new_statement,
bool replace_usages = true);
void insert_before(Stmt *old_statement, VecStatement &&new_statements);
void replace_with(Stmt *old_statement,
Expand Down
15 changes: 7 additions & 8 deletions taichi/ir/transforms.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,14 +49,13 @@ void demote_dense_struct_fors(IRNode *root);
void demote_atomics(IRNode *root);
void reverse_segments(IRNode *root); // for autograd
std::unique_ptr<ScratchPads> initialize_scratch_pad(StructForStmt *root);
OffloadedResult compile_to_offloads(
IRNode *ir,
const CompileConfig &config,
bool vectorize,
bool grad,
bool ad_use_stack,
bool verbose,
bool lower_global_access = true);
OffloadedResult compile_to_offloads(IRNode *ir,
const CompileConfig &config,
bool vectorize,
bool grad,
bool ad_use_stack,
bool verbose,
bool lower_global_access = true);

} // namespace irpass

Expand Down
15 changes: 7 additions & 8 deletions taichi/transforms/compile_to_offloads.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,13 @@ TLANG_NAMESPACE_BEGIN

namespace irpass {

OffloadedResult compile_to_offloads(
IRNode *ir,
const CompileConfig &config,
bool vectorize,
bool grad,
bool ad_use_stack,
bool verbose,
bool lower_global_access) {
OffloadedResult compile_to_offloads(IRNode *ir,
const CompileConfig &config,
bool vectorize,
bool grad,
bool ad_use_stack,
bool verbose,
bool lower_global_access) {
TI_AUTO_PROF;

auto print = [&](const std::string &name) {
Expand Down
10 changes: 6 additions & 4 deletions taichi/transforms/convert_into_loop_index.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,14 @@ class ConvertIntoLoopIndexStmt : public BasicStmtVisitor {
[&](Stmt *load) {
if (auto local_load = load->cast<LocalLoadStmt>()) {
return local_load->width() == 1 &&
local_load->ptr[0].var == loop_var &&
local_load->ptr[0].offset == 0;
local_load->ptr[0].var == loop_var &&
local_load->ptr[0].offset == 0;
}
return false;
},
[&]() { return Stmt::make<LoopIndexStmt>(loop, index, is_struct_for); });
[&]() {
return Stmt::make<LoopIndexStmt>(loop, index, is_struct_for);
});
}

void preprocess_container_stmt(Stmt *stmt) override {
Expand All @@ -35,7 +37,7 @@ class ConvertIntoLoopIndexStmt : public BasicStmtVisitor {
auto leaf = struct_for->snode;
for (int i = 0; i < (int)struct_for->loop_vars.size(); i++) {
convert(struct_for, struct_for->loop_vars[i],
leaf->physical_index_position[i], true);
leaf->physical_index_position[i], true);
struct_for->loop_vars[i] = nullptr;
}
}
Expand Down
3 changes: 2 additions & 1 deletion taichi/transforms/demote_dense_struct_fors.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,8 @@ VecStatement convert_to_range_for(StructForStmt *struct_for) {
[&](Stmt *s) {
if (auto loop_index = s->cast<LoopIndexStmt>()) {
return loop_index->loop == struct_for &&
loop_index->index == snodes.back()->physical_index_position[i];
loop_index->index ==
snodes.back()->physical_index_position[i];
}
return false;
},
Expand Down
14 changes: 8 additions & 6 deletions taichi/transforms/ir_printer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -259,17 +259,18 @@ class IRPrinter : public IRVisitor {

void visit(RangeForStmt *for_stmt) override {
print("{} : {}for {} in range({}, {}, step {}) {{", for_stmt->name(),
for_stmt->reversed ? "reversed " : "", for_stmt->loop_var ?
for_stmt->loop_var->name() : "nullptr",
for_stmt->reversed ? "reversed " : "",
for_stmt->loop_var ? for_stmt->loop_var->name() : "nullptr",
for_stmt->begin->name(), for_stmt->end->name(), for_stmt->vectorize);
for_stmt->body->accept(this);
print("}}");
}

void visit(StructForStmt *for_stmt) override {
auto loop_vars = make_list<Stmt *>(
for_stmt->loop_vars,
[](Stmt *const &stmt) -> std::string { return stmt ? stmt->name() : "nullptr"; });
auto loop_vars = make_list<Stmt *>(for_stmt->loop_vars,
[](Stmt *const &stmt) -> std::string {
return stmt ? stmt->name() : "nullptr";
});
print("{} : for {} where {} active, step {} {{", for_stmt->name(),
loop_vars, for_stmt->snode->get_node_type_name_hinted(),
for_stmt->vectorize);
Expand Down Expand Up @@ -459,7 +460,8 @@ class IRPrinter : public IRVisitor {
}

void visit(LoopIndexStmt *stmt) override {
print("{}{} = loop {} index {}", stmt->type_hint(), stmt->name(), stmt->loop->name(), stmt->index);
print("{}{} = loop {} index {}", stmt->type_hint(), stmt->name(),
stmt->loop->name(), stmt->index);
}

void visit(GlobalTemporaryStmt *stmt) override {
Expand Down

0 comments on commit f284688

Please sign in to comment.