Skip to content

Commit

Permalink
[refactor] Some renamings (#4959)
Browse files Browse the repository at this point in the history
  • Loading branch information
k-ye authored May 11, 2022
1 parent e21a4e3 commit 80f20f2
Show file tree
Hide file tree
Showing 6 changed files with 17 additions and 17 deletions.
2 changes: 1 addition & 1 deletion taichi/backends/cuda/codegen_cuda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class CodeGenLLVMCUDA : public CodeGenLLVM {
continue;
}
arg_buffers[i] = context.get_arg<void *>(i);
if (!context.is_device_allocation[i]) {
if (!context.is_device_allocations[i]) {
// Note: both numpy and PyTorch support arrays/tensors with zeros
// in shapes, e.g., shape=(0) or shape=(100, 0, 200). This makes
// `arr_sz` zero.
Expand Down
2 changes: 1 addition & 1 deletion taichi/codegen/codegen_llvm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2296,7 +2296,7 @@ FunctionType CodeGenLLVM::compile_module_to_executable() {
// For taichi ndarrays, context.args saves pointer to its
// |DeviceAllocation|, CPU backend actually want to use the raw ptr here.
for (int i = 0; i < (int)args.size(); i++) {
if (args[i].is_array && context.is_device_allocation[i] &&
if (args[i].is_array && context.is_device_allocations[i] &&
context.array_runtime_sizes[i] > 0) {
DeviceAllocation *ptr =
static_cast<DeviceAllocation *>(context.get_arg<void *>(i));
Expand Down
8 changes: 4 additions & 4 deletions taichi/program/context.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@ struct RuntimeContext {
int32 cpu_thread_id;

// Note that I've tried to group `array_runtime_size` and
// `is_device_allocation` into a small struct. However, it caused some test
// `is_device_allocations` into a small struct. However, it caused some test
// cases to stuck.

// `array_runtime_size` records the runtime size of the
// corresponding array arguments.
uint64 array_runtime_sizes[taichi_max_num_args_total]{0};
// `is_device_allocation` is true iff args[i] is a DeviceAllocation*.
bool is_device_allocation[taichi_max_num_args_total]{false};
// `is_device_allocations` is true iff i-th arg is a `DeviceAllocation*`.
bool is_device_allocations[taichi_max_num_args_total]{false};
// We move the pointer of result buffer from LLVMRuntime to RuntimeContext
// because each real function need a place to store its result, but
// LLVMRuntime is shared among functions. So we moved the pointer to
Expand Down Expand Up @@ -61,7 +61,7 @@ struct RuntimeContext {
}

void set_array_is_device_allocation(int i, bool is_device_allocation) {
this->is_device_allocation[i] = is_device_allocation;
this->is_device_allocations[i] = is_device_allocation;
}

template <typename T>
Expand Down
10 changes: 5 additions & 5 deletions taichi/runtime/opengl/opengl_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -402,19 +402,19 @@ void DeviceCompiledTaichiKernel::launch(RuntimeContext &ctx,
int i = item.first;
TI_ASSERT(args[i].is_array);
const auto arr_sz = ctx.array_runtime_sizes[i];
if (arr_sz == 0 || ctx.is_device_allocation[i]) {
if (arr_sz == 0 || ctx.is_device_allocations[i]) {
continue;
}
has_ext_arr = true;
if (arr_sz != item.second.total_size ||
if (arr_sz != item.second.runtime_size ||
ext_arr_bufs_[i] == kDeviceNullAllocation) {
if (ext_arr_bufs_[i] != kDeviceNullAllocation) {
device_->dealloc_memory(ext_arr_bufs_[i]);
}
ext_arr_bufs_[i] = device_->allocate_memory({arr_sz, /*host_write=*/true,
/*host_read=*/true,
/*export_sharing=*/false});
item.second.total_size = arr_sz;
item.second.runtime_size = arr_sz;
}
void *host_ptr = (void *)ctx.args[i];
void *baseptr = device_->map(ext_arr_bufs_[i]);
Expand Down Expand Up @@ -471,7 +471,7 @@ void DeviceCompiledTaichiKernel::launch(RuntimeContext &ctx,
// On most devices this number is 8. But I need to look up how
// to query this information so currently this is thrown from OpenGl.
for (const auto [arg_id, bind_id] : program_.used.arr_arg_to_bind_idx) {
if (ctx.is_device_allocation[arg_id]) {
if (ctx.is_device_allocations[arg_id]) {
DeviceAllocation *ptr =
static_cast<DeviceAllocation *>((void *)ctx.args[arg_id]);

Expand Down Expand Up @@ -507,7 +507,7 @@ void DeviceCompiledTaichiKernel::launch(RuntimeContext &ctx,
for (auto &item : program_.arr_args) {
int i = item.first;
const auto arr_sz = ctx.array_runtime_sizes[i];
if (arr_sz > 0 && !ctx.is_device_allocation[i]) {
if (arr_sz > 0 && !ctx.is_device_allocations[i]) {
uint8_t *baseptr = (uint8_t *)device_->map(ext_arr_bufs_[i]);
memcpy((void *)ctx.args[i], baseptr, arr_sz);
device_->unmap(ext_arr_bufs_[i]);
Expand Down
2 changes: 1 addition & 1 deletion taichi/runtime/opengl/opengl_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ struct CompiledArrayArg {
bool is_scalar{false};
std::vector<int> element_shape;
size_t shape_offset_in_bytes_in_args_buf{0};
size_t total_size{0}; // Runtime information
size_t runtime_size{0}; // Runtime information

TI_IO_DEF(field_dim,
is_scalar,
Expand Down
10 changes: 5 additions & 5 deletions taichi/runtime/vulkan/runtime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ class HostDeviceContextBlitter {
char *device_ptr = device_base + arg.offset_in_mem;
do {
if (arg.is_array) {
if (!host_ctx_->is_device_allocation[i] && ext_arr_size.at(i)) {
if (!host_ctx_->is_device_allocations[i] && ext_arr_size.at(i)) {
// Only need to blit ext arrs (host array)
DeviceAllocation buffer = ext_arrays.at(i);
char *const device_arr_ptr =
Expand Down Expand Up @@ -150,7 +150,7 @@ class HostDeviceContextBlitter {
for (int i = 0; i < ctx_attribs_->args().size(); ++i) {
const auto &arg = ctx_attribs_->args()[i];
if (arg.is_array) {
if (!host_ctx_->is_device_allocation[i] && ext_arr_size.at(i)) {
if (!host_ctx_->is_device_allocations[i] && ext_arr_size.at(i)) {
require_sync = true;
}
}
Expand All @@ -166,7 +166,7 @@ class HostDeviceContextBlitter {
for (int i = 0; i < ctx_attribs_->args().size(); ++i) {
const auto &arg = ctx_attribs_->args()[i];
if (arg.is_array) {
if (!host_ctx_->is_device_allocation[i] && ext_arr_size.at(i)) {
if (!host_ctx_->is_device_allocations[i] && ext_arr_size.at(i)) {
// Only need to blit ext arrs (host array)
DeviceAllocation buffer = ext_arrays.at(i);
char *const device_arr_ptr =
Expand Down Expand Up @@ -455,7 +455,7 @@ void VkRuntime::launch_kernel(KernelHandle handle, RuntimeContext *host_ctx) {
const auto &args = ti_kernel->ti_kernel_attribs().ctx_attribs.args();
for (auto &arg : args) {
if (arg.is_array) {
if (host_ctx->is_device_allocation[i]) {
if (host_ctx->is_device_allocations[i]) {
// NDArray
if (host_ctx->args[i]) {
any_arrays[i] = *(DeviceAllocation *)(host_ctx->args[i]);
Expand Down Expand Up @@ -546,7 +546,7 @@ void VkRuntime::launch_kernel(KernelHandle handle, RuntimeContext *host_ctx) {
// Dealloc external arrays
for (auto pair : any_arrays) {
if (pair.second != kDeviceNullAllocation) {
if (!host_ctx->is_device_allocation[pair.first]) {
if (!host_ctx->is_device_allocations[pair.first]) {
device_->dealloc_memory(pair.second);
}
}
Expand Down

0 comments on commit 80f20f2

Please sign in to comment.