Skip to content

Commit

Permalink
[skip ci] enforce code format
Browse files Browse the repository at this point in the history
  • Loading branch information
taichi-gardener committed May 3, 2020
1 parent cd8e7c2 commit eb7bdc1
Show file tree
Hide file tree
Showing 12 changed files with 56 additions and 37 deletions.
2 changes: 1 addition & 1 deletion docs/gui.rst
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ A *event filter* is a list combined of *key*, *type* and *(type, key)* tuple, e.
:parameter gui: (GUI)
:parameter key: (EventKey) keys you want to detect
:return: (bool) ``True`` if one of the keys pressed, vice versa

.. warning::

Must be used together with ``gui.get_event``, or it won't be updated!
Expand Down
12 changes: 6 additions & 6 deletions docs/snode.rst
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@ See :ref:`layout` for more details about data layout.
:parameter snode: (SNode)
:parameter index: alone which axis? (0 for ``i`` and 1 for ``j``)
:return: (scalar) the size of tensor alone that axis

Equivalent to snode.shape[i]

::

ti.root.dense(ti.ijk, (3, 5, 4)).place(x)
x.get_shape(0) # 3
x.get_shape(1) # 5
Expand All @@ -44,11 +44,11 @@ See :ref:`layout` for more details about data layout.

:parameter snode: (SNode or tensor)
:return: (scalar) the dimension of node / tensor

Equivalent to `len(snode.shape)`.

::

ti.root.dense(ti.ijk, (8, 9, 10)).place(x)
x.dim() # 3

Expand Down
20 changes: 17 additions & 3 deletions misc/ci_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,23 @@ def run(self):
execute_command('rm get-pip.py')

subprocess.run([
get_python_executable(), "-m", "pip", "install", "--user",
"colorama", "numpy", "Pillow", "scipy", "pybind11", "GitPython",
"yapf", "distro", "pytest", "autograd", "astor", "pytest-xdist",
get_python_executable(),
"-m",
"pip",
"install",
"--user",
"colorama",
"numpy",
"Pillow",
"scipy",
"pybind11",
"GitPython",
"yapf",
"distro",
"pytest",
"autograd",
"astor",
"pytest-xdist",
"pytest-rerunfailures",
])
print("importing numpy test:")
Expand Down
1 change: 1 addition & 0 deletions python/taichi/lang/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ def cast(obj, type):
else:
return Expr(taichi_lang_core.value_cast(Expr(obj).ptr, type))


def bit_cast(obj, type):
if is_taichi_class(obj):
raise ValueError('Cannot apply bit_cast on Taichi classes')
Expand Down
10 changes: 5 additions & 5 deletions python/taichi/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,10 @@ def test_python(args):
pytest_args += ['-s', '-v']
if args.rerun:
pytest_args += ['--reruns', args.rerun]
if int(pytest.main([os.path.join(root_dir, 'misc/empty_pytest.py'),
'-n1', '-q'])) == 0: # test if pytest has xdist or not
if int(
pytest.main(
[os.path.join(root_dir, 'misc/empty_pytest.py'), '-n1',
'-q'])) == 0: # test if pytest has xdist or not
try:
from multiprocessing import cpu_count
threads = min(8, cpu_count()) # To prevent running out of memory
Expand Down Expand Up @@ -73,9 +75,7 @@ def make_argument_parser():
'--verbose',
action='store_true',
help='Run with verbose outputs')
parser.add_argument('-r',
'--rerun',
help='Rerun failed tests once again')
parser.add_argument('-r', '--rerun', help='Rerun failed tests once again')
parser.add_argument('-t',
'--threads',
help='Number of threads for parallel testing')
Expand Down
22 changes: 11 additions & 11 deletions taichi/backends/metal/codegen_metal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -326,18 +326,18 @@ class KernelCodegen : public IRVisitor {

void visit(UnaryOpStmt *stmt) override {
if (stmt->op_type == UnaryOpType::cast_value) {
emit("const {} {} = static_cast<{}>({});",
metal_data_type_name(stmt->element_type()), stmt->raw_name(),
metal_data_type_name(stmt->cast_type), stmt->operand->raw_name());
emit("const {} {} = static_cast<{}>({});",
metal_data_type_name(stmt->element_type()), stmt->raw_name(),
metal_data_type_name(stmt->cast_type), stmt->operand->raw_name());
} else if (stmt->op_type == UnaryOpType::cast_bits) {
// reinterpret the bit pattern
const auto to_type = to_metal_type(stmt->cast_type);
const auto to_type_name = metal_data_type_name(to_type);
TI_ASSERT(metal_data_type_bytes(
to_metal_type(stmt->operand->element_type())) ==
metal_data_type_bytes(to_type));
emit("const {} {} = union_cast<{}>({});", to_type_name,
stmt->raw_name(), to_type_name, stmt->operand->raw_name());
// reinterpret the bit pattern
const auto to_type = to_metal_type(stmt->cast_type);
const auto to_type_name = metal_data_type_name(to_type);
TI_ASSERT(
metal_data_type_bytes(to_metal_type(stmt->operand->element_type())) ==
metal_data_type_bytes(to_type));
emit("const {} {} = union_cast<{}>({});", to_type_name, stmt->raw_name(),
to_type_name, stmt->operand->raw_name());
} else {
emit("const {} {} = {}({});", metal_data_type_name(stmt->element_type()),
stmt->raw_name(), metal_unary_op_type_symbol(stmt->op_type),
Expand Down
7 changes: 3 additions & 4 deletions taichi/backends/opengl/codegen_opengl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -350,12 +350,11 @@ class KernelGen : public IRVisitor {
emit("{} {} = {}(~{});", dt_name, stmt->short_name(), dt_name,
stmt->operand->short_name());
} else if (stmt->op_type == UnaryOpType::cast_value) {
emit("{} {} = {}({});", dt_name, stmt->short_name(),
opengl_data_type_name(stmt->cast_type),
stmt->operand->short_name());
emit("{} {} = {}({});", dt_name, stmt->short_name(),
opengl_data_type_name(stmt->cast_type), stmt->operand->short_name());
} else if (stmt->op_type == UnaryOpType::cast_bits) {
if (stmt->cast_type == DataType::f32 &&
stmt->operand->element_type() == DataType::i32) {
stmt->operand->element_type() == DataType::i32) {
emit("{} {} = intBitsToFloat({});", dt_name, stmt->short_name(),
stmt->operand->short_name());
} else if (stmt->cast_type == DataType::i32 &&
Expand Down
3 changes: 2 additions & 1 deletion taichi/ir/expr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ Expr operator~(const Expr &expr) {
}

Expr cast(const Expr &input, DataType dt) {
auto ret = std::make_shared<UnaryOpExpression>(UnaryOpType::cast_value, input);
auto ret =
std::make_shared<UnaryOpExpression>(UnaryOpType::cast_value, input);
ret->cast_type = dt;
return Expr(ret);
}
Expand Down
3 changes: 2 additions & 1 deletion taichi/python/export_lang.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,8 @@ void export_lang(py::module &m) {
m.def("layout", layout);

m.def("value_cast", static_cast<Expr (*)(const Expr &expr, DataType)>(cast));
m.def("bits_cast", static_cast<Expr (*)(const Expr &expr, DataType)>(bit_cast));
m.def("bits_cast",
static_cast<Expr (*)(const Expr &expr, DataType)>(bit_cast));

m.def("expr_atomic_add", [&](const Expr &a, const Expr &b) {
return Expr::make<AtomicOpExpression>(AtomicOpType::add, ptr_if_global(a),
Expand Down
4 changes: 2 additions & 2 deletions taichi/transforms/ir_printer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,8 @@ class IRPrinter : public IRVisitor {

void visit(UnaryOpStmt *stmt) override {
if (stmt->is_cast()) {
std::string reint = stmt->op_type == UnaryOpType::cast_value ?
"" : "reinterpret_";
std::string reint =
stmt->op_type == UnaryOpType::cast_value ? "" : "reinterpret_";
print("{}{} = {}{}<{}> {}", stmt->type_hint(), stmt->name(), reint,
unary_op_type_name(stmt->op_type),
data_type_short_name(stmt->cast_type), stmt->operand->name());
Expand Down
3 changes: 2 additions & 1 deletion taichi/transforms/make_adjoint.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,8 @@ class MakeAdjoint : public IRVisitor {
accumulate(stmt->operand,
mul(adjoint(stmt), div(constant(0.5f), sqrt(stmt->operand))));
} else if (stmt->op_type == UnaryOpType::cast_value) {
if (is_real(stmt->cast_type) && is_real(stmt->operand->ret_type.data_type)) {
if (is_real(stmt->cast_type) &&
is_real(stmt->operand->ret_type.data_type)) {
accumulate(stmt->operand, adjoint(stmt));
}
} else if (stmt->op_type == UnaryOpType::logic_not) {
Expand Down
6 changes: 4 additions & 2 deletions taichi/transforms/type_check.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,8 @@ class TypeCheck : public IRVisitor {
Stmt *insert_type_cast_before(Stmt *anchor,
Stmt *input,
DataType output_type) {
auto &&cast_stmt = Stmt::make_typed<UnaryOpStmt>(UnaryOpType::cast_value, input);
auto &&cast_stmt =
Stmt::make_typed<UnaryOpStmt>(UnaryOpType::cast_value, input);
cast_stmt->cast_type = output_type;
cast_stmt->accept(this);
auto stmt = cast_stmt.get();
Expand All @@ -197,7 +198,8 @@ class TypeCheck : public IRVisitor {
Stmt *insert_type_cast_after(Stmt *anchor,
Stmt *input,
DataType output_type) {
auto &&cast_stmt = Stmt::make_typed<UnaryOpStmt>(UnaryOpType::cast_value, input);
auto &&cast_stmt =
Stmt::make_typed<UnaryOpStmt>(UnaryOpType::cast_value, input);
cast_stmt->cast_type = output_type;
cast_stmt->accept(this);
auto stmt = cast_stmt.get();
Expand Down

0 comments on commit eb7bdc1

Please sign in to comment.