Skip to content

Commit

Permalink
#11838: Remove num_cores_to_core_range_set
Browse files Browse the repository at this point in the history
  • Loading branch information
VirdhatchaniKN committed Sep 7, 2024
1 parent 3802793 commit 014940b
Show file tree
Hide file tree
Showing 8 changed files with 8 additions and 38 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def run(
# TODO: row_wise=False and ROW_MAJOR shard orientation gives bad PCC
# TODO: COL_MAJOR shard orientation doesn't work for get_matmul_program_config
input_a_memory_config.shard_spec = ttnn.ShardSpec(
ttnn.num_cores_to_core_range_set(num_cores_height, core_grid, row_wise=True),
ttnn.num_cores_to_corerange_set(num_cores_height, core_grid, row_wise=True),
(per_core_height, k_size),
ttnn.ShardOrientation.ROW_MAJOR,
False,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def run(
# TODO: row_wise=False and ROW_MAJOR shard orientation gives bad PCC
# TODO: COL_MAJOR shard orientation doesn't work for get_matmul_program_config
input_a_memory_config.shard_spec = ttnn.ShardSpec(
ttnn.num_cores_to_core_range_set(num_cores_width, core_grid, row_wise=True),
ttnn.num_cores_to_corerange_set(num_cores_width, core_grid, row_wise=True),
(total_height, per_core_width),
ttnn.ShardOrientation.ROW_MAJOR,
False,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class TensorMemoryConfigs(enum.Enum):
memory_layout=ttnn.TensorMemoryLayout.WIDTH_SHARDED,
buffer_type=ttnn.BufferType.L1,
shard_spec=ttnn.ShardSpec(
ttnn.num_cores_to_core_range_set(28, core_grid, row_wise=True),
ttnn.num_cores_to_corerange_set(28, core_grid, row_wise=True),
(64, IN0_INNER_DIM_PER_CORE),
ttnn.ShardOrientation.ROW_MAJOR,
False,
Expand Down Expand Up @@ -105,7 +105,7 @@ class TensorMemoryConfigs(enum.Enum):
memory_layout=ttnn.TensorMemoryLayout.WIDTH_SHARDED,
buffer_type=ttnn.BufferType.L1,
shard_spec=ttnn.ShardSpec(
ttnn.num_cores_to_core_range_set(35, core_grid, row_wise=True),
ttnn.num_cores_to_corerange_set(35, core_grid, row_wise=True),
(64, IN0_INNER_DIM_PER_CORE),
ttnn.ShardOrientation.ROW_MAJOR,
False,
Expand Down Expand Up @@ -134,7 +134,7 @@ class TensorMemoryConfigs(enum.Enum):
memory_layout=ttnn.TensorMemoryLayout.WIDTH_SHARDED,
buffer_type=ttnn.BufferType.L1,
shard_spec=ttnn.ShardSpec(
ttnn.num_cores_to_core_range_set(28, core_grid, row_wise=True),
ttnn.num_cores_to_corerange_set(28, core_grid, row_wise=True),
(64, IN0_INNER_DIM_PER_CORE),
ttnn.ShardOrientation.ROW_MAJOR,
False,
Expand Down Expand Up @@ -163,7 +163,7 @@ class TensorMemoryConfigs(enum.Enum):
memory_layout=ttnn.TensorMemoryLayout.WIDTH_SHARDED,
buffer_type=ttnn.BufferType.L1,
shard_spec=ttnn.ShardSpec(
ttnn.num_cores_to_core_range_set(30, core_grid, row_wise=True),
ttnn.num_cores_to_corerange_set(30, core_grid, row_wise=True),
(64, IN0_INNER_DIM_PER_CORE),
ttnn.ShardOrientation.ROW_MAJOR,
False,
Expand Down
6 changes: 0 additions & 6 deletions tt_metal/common/work_split.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -156,12 +156,6 @@ inline std::set<CoreRange> num_cores_to_corerange_set(
return num_cores_to_corerange_set({0, 0}, target_num_cores, grid_size, row_wise);
}

// TODO: Switch num_cores_to_corerange_set to always return CoreRangeSet
inline CoreRangeSet num_cores_to_core_range_set(
const uint32_t target_num_cores, const CoreCoord grid_size, const bool row_wise = false) {
return CoreRangeSet(num_cores_to_corerange_set({0, 0}, target_num_cores, grid_size, row_wise));
}

// This function takes in the core grid size, as well as the number of units of work to divide between the cores
// This function returns the number of cores, the CoreRangeSet of all cores, and then the CoreRangeSet that does
// the greater amount of work, and the CoreRangeSet that does less work if work cannot be evenly divided
Expand Down
8 changes: 0 additions & 8 deletions ttnn/cpp/pybind11/operations/core.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -311,14 +311,6 @@ void py_module(py::module& module) {
py::arg("memory_config") = std::nullopt,
py::arg("device") = nullptr});

module.def(
"num_cores_to_core_range_set",
&tt::tt_metal::num_cores_to_core_range_set,
py::arg("target_num_cores"),
py::arg("grid_size"),
py::arg("row_wise")= false,
R"doc(Returns a CoreRangeSet from number of cores)doc");

module.def(
"num_cores_to_corerange_set",
py::overload_cast<const uint32_t, const CoreCoord, const bool>(&tt::tt_metal::num_cores_to_corerange_set),
Expand Down
4 changes: 2 additions & 2 deletions ttnn/cpp/ttnn/operations/conv/conv2d/conv2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,14 +89,14 @@ ParallelConfig determine_parallel_config(

auto calculate_grid = [&](uint32_t num_cores_nhw) {
if (shard_layout == TensorMemoryLayout::HEIGHT_SHARDED) {
CoreRangeSet grid = num_cores_to_core_range_set(num_cores_nhw, device_grid_size_coord, true);
CoreRangeSet grid = num_cores_to_corerange_set(num_cores_nhw, device_grid_size_coord, true);
return grid;

} else if(shard_layout == TensorMemoryLayout::WIDTH_SHARDED) {
uint32_t num_cores_channels = find_closest_common_largest_divisor(
conv_out_2d_matrix_width_ntiles, std::ceil((double)input_channels / (double)tt::constants::TILE_WIDTH), max_num_cores);
log_debug(LogOp, "Num cores for Width Sharding : {}", num_cores_channels);
CoreRangeSet grid = num_cores_to_core_range_set(num_cores_channels, device_grid_size_coord, true);
CoreRangeSet grid = num_cores_to_corerange_set(num_cores_channels, device_grid_size_coord, true);
return grid;

} else if(shard_layout == TensorMemoryLayout::BLOCK_SHARDED) {
Expand Down
1 change: 0 additions & 1 deletion ttnn/ttnn/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,6 @@ def manage_config(name, value):
dump_memory_config,
load_memory_config,
dump_stack_trace_on_segfault,
num_cores_to_core_range_set,
num_cores_to_corerange_set,
)

Expand Down
15 changes: 0 additions & 15 deletions ttnn/ttnn/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,21 +44,6 @@ def is_sharded(tensor) -> bool:
get_memory_config = ttnn._ttnn.core.get_memory_config


def num_cores_to_core_range_set(
target_num_cores: int,
grid_size: ttnn.CoreCoord,
row_wise: bool = False,
):
"""
Returns a CoreRangeSet from number of cores
"""
return ttnn._ttnn.operations.core.num_cores_to_core_range_set(
target_num_cores,
grid_size,
row_wise,
)


def num_cores_to_corerange_set(
target_num_cores: int,
grid_size: ttnn.CoreCoord,
Expand Down

0 comments on commit 014940b

Please sign in to comment.