Skip to content

Commit

Permalink
#12164: Update files
Browse files Browse the repository at this point in the history
  • Loading branch information
mouliraj-mcw committed Sep 12, 2024
1 parent 01d959f commit 7dee85f
Show file tree
Hide file tree
Showing 6 changed files with 193 additions and 90 deletions.
10 changes: 9 additions & 1 deletion tests/ttnn/unit_tests/operations/backward/test_backward_rsub.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,15 @@ def test_bw_rsub_opt(input_shapes, device, are_required_outputs):
cq_id = 0

pages_before = ttnn._ttnn.reports.get_buffer_pages()
ttnn.rsub_bw(grad_tensor, input_tensor, other_tensor, input_grad=input_grad, other_grad=other_grad, queue_id=cq_id)
ttnn.rsub_bw(
grad_tensor,
input_tensor,
other_tensor,
are_required_outputs=are_required_outputs,
input_grad=input_grad,
other_grad=other_grad,
queue_id=cq_id,
)
assert len(pages_before) == len(ttnn._ttnn.reports.get_buffer_pages())
tt_output_tensor_on_device = [input_grad, other_grad]

Expand Down
42 changes: 40 additions & 2 deletions tests/ttnn/unit_tests/operations/backward/test_backward_sub.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,10 @@
(torch.Size([1, 3, 320, 384])),
),
)
def test_bw_sub(input_shapes, device):
def test_bw_sub(
input_shapes,
device,
):
in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True)
other_data, other_tensor = data_gen_with_range(input_shapes, -100, 100, device, True)
grad_data, grad_tensor = data_gen_with_range(input_shapes, -100, 100, device)
Expand Down Expand Up @@ -78,7 +81,15 @@ def test_bw_sub_opt(input_shapes, device, are_required_outputs):
cq_id = 0

pages_before = ttnn._ttnn.reports.get_buffer_pages()
ttnn.sub_bw(grad_tensor, input_tensor, other_tensor, input_grad=input_grad, other_grad=other_grad, queue_id=cq_id)
ttnn.sub_bw(
grad_tensor,
input_tensor,
other_tensor,
are_required_outputs=are_required_outputs,
input_grad=input_grad,
other_grad=other_grad,
queue_id=cq_id,
)
assert len(pages_before) == len(ttnn._ttnn.reports.get_buffer_pages())
tt_output_tensor_on_device = [input_grad, other_grad]

Expand All @@ -90,3 +101,30 @@ def test_bw_sub_opt(input_shapes, device, are_required_outputs):
if are_required_outputs[i]:
status = status & compare_pcc([tt_output_tensor_on_device[i]], [golden_tensor[i]])
assert status


@pytest.mark.parametrize(
"input_shapes",
(
(torch.Size([1, 1, 32, 32])),
(torch.Size([1, 1, 320, 384])),
(torch.Size([1, 3, 320, 384])),
),
)
@pytest.mark.parametrize("scalar", [0.05, 1.0, 0.5, 0.12, 0.0, -0.05, -1.0, -0.5, -0.12])
def test_bw_sub_scalar_opt_output(input_shapes, scalar, device):
in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True)
grad_data, grad_tensor = data_gen_with_range(input_shapes, -5, 5, device)

_, input_grad = data_gen_with_range(input_shapes, -1, 1, device)

cq_id = 0
pages_before = ttnn._ttnn.reports.get_buffer_pages()
ttnn.sub_bw(grad_tensor, input_tensor, scalar, input_grad=input_grad, queue_id=cq_id)
assert len(pages_before) == len(ttnn._ttnn.reports.get_buffer_pages())
tt_output_tensor_on_device = [input_grad]
golden_function = ttnn.get_golden_function(ttnn.sub_bw)
golden_tensor = golden_function(grad_data, in_data, scalar)

status = compare_pcc(tt_output_tensor_on_device, golden_tensor)
assert status
Original file line number Diff line number Diff line change
Expand Up @@ -61,22 +61,41 @@ def test_bw_subalpha_default(input_shapes, device):
(torch.Size([1, 3, 320, 384])),
),
)
def test_bw_subalpha_opt_output(input_shapes, device):
@pytest.mark.parametrize("are_required_outputs", [[True, True], [True, False], [False, True]])
def test_bw_subalpha_opt_output(input_shapes, device, are_required_outputs):
in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True)
other_data, other_tensor = data_gen_with_range(input_shapes, -100, 100, device, True)
grad_data, grad_tensor = data_gen_with_range(input_shapes, -100, 100, device)

_, input_grad = data_gen_with_range(input_shapes, -1, 1, device)
input_grad = None
other_grad = None
tt_output_tensor_on_device = None

if are_required_outputs[0]:
_, input_grad = data_gen_with_range(input_shapes, -1, 1, device)
if are_required_outputs[1]:
_, other_grad = data_gen_with_range(input_shapes, -1, 1, device)

cq_id = 0
pages_before = ttnn._ttnn.reports.get_buffer_pages()
ttnn.subalpha_bw(grad_tensor, input_tensor, other_tensor, input_grad=input_grad, queue_id=cq_id)
ttnn.subalpha_bw(
grad_tensor,
input_tensor,
other_tensor,
are_required_outputs=are_required_outputs,
input_grad=input_grad,
other_grad=other_grad,
queue_id=cq_id,
)
assert len(pages_before) == len(ttnn._ttnn.reports.get_buffer_pages())

tt_output_tensor_on_device = [input_grad]
tt_output_tensor_on_device = [input_grad, other_grad]

golden_function = ttnn.get_golden_function(ttnn.subalpha_bw)
golden_tensor = golden_function(grad_data, in_data, other_data)

status = compare_pcc(tt_output_tensor_on_device, golden_tensor)
status = True
for i in range(len(are_required_outputs)):
if are_required_outputs[i]:
status = status & compare_pcc([tt_output_tensor_on_device[i]], [golden_tensor[i]])
assert status
Original file line number Diff line number Diff line change
Expand Up @@ -151,15 +151,11 @@ struct ExecuteBackwardAdd {
float scalar,
const std::optional<MemoryConfig> &memory_config = std::nullopt);

static std::vector<std::optional<Tensor>> invoke(
uint8_t queue_id,
static std::vector<Tensor> invoke(
const Tensor &grad_tensor_arg,
const Tensor &input_tensor_a_arg,
const Tensor &input_tensor_b_arg,
const std::optional<MemoryConfig> &memory_config = std::nullopt,
const std::vector<bool> &are_required_outputs = std::vector<bool>{true, true},
std::optional<Tensor> input_grad = std::nullopt,
std::optional<Tensor> other_grad = std::nullopt);
const std::optional<MemoryConfig> &memory_config = std::nullopt);

static std::vector<ComplexTensor> invoke(
const ComplexTensor &grad_tensor_arg,
Expand All @@ -171,16 +167,36 @@ struct ExecuteBackwardAdd {
};

struct ExecuteBackwardSub {
static std::vector<Tensor> invoke(
static std::vector<std::optional<Tensor>> invoke(
uint8_t queue_id,
const Tensor &grad_tensor_arg,
const Tensor &input_tensor_arg,
float scalar,
const std::optional<MemoryConfig> &memory_config = std::nullopt,
std::optional<Tensor> input_grad = std::nullopt);

static std::vector<std::optional<Tensor>> invoke(
uint8_t queue_id,
const Tensor &grad_tensor_arg,
const Tensor &input_tensor_a_arg,
const Tensor &input_tensor_b_arg,
const std::vector<bool> &are_required_outputs = std::vector<bool>{true, true},
const std::optional<MemoryConfig> &memory_config = std::nullopt,
std::optional<Tensor> input_grad = std::nullopt,
std::optional<Tensor> other_grad = std::nullopt);

static std::vector<std::optional<Tensor>> invoke(
const Tensor &grad_tensor_arg,
const Tensor &input_tensor_arg,
float scalar,
const std::optional<MemoryConfig> &memory_config = std::nullopt);

static std::vector<Tensor> invoke(

static std::vector<std::optional<Tensor>> invoke(
const Tensor &grad_tensor_arg,
const Tensor &input_tensor_a_arg,
const Tensor &input_tensor_b_arg,
const std::vector<bool> &are_required_outputs = std::vector<bool>{true, true},
const std::optional<MemoryConfig> &memory_config = std::nullopt);

static std::vector<ComplexTensor> invoke(
Expand Down Expand Up @@ -275,8 +291,8 @@ struct ExecuteBackwardSubAlpha {
const Tensor &input_tensor_a_arg,
const Tensor &input_tensor_b_arg,
float alpha,
const std::optional<MemoryConfig> &memory_config = std::nullopt,
const std::vector<bool> &are_required_outputs = std::vector<bool>{true, true},
const std::optional<MemoryConfig> &memory_config = std::nullopt,
std::optional<Tensor> input_grad = std::nullopt,
std::optional<Tensor> other_grad = std::nullopt);

Expand All @@ -285,6 +301,7 @@ struct ExecuteBackwardSubAlpha {
const Tensor &input_tensor_a_arg,
const Tensor &input_tensor_b_arg,
float alpha,
const std::vector<bool> &are_required_outputs = std::vector<bool>{true, true},
const std::optional<MemoryConfig> &memory_config = std::nullopt);

};
Expand All @@ -295,15 +312,16 @@ struct ExecuteBackwardRsub {
const Tensor &grad_tensor_arg,
const Tensor &input_tensor_a_arg,
const Tensor &input_tensor_b_arg,
const std::optional<MemoryConfig> &memory_config = std::nullopt,
const std::vector<bool> &are_required_outputs = std::vector<bool>{true, true},
const std::optional<MemoryConfig> &memory_config = std::nullopt,
std::optional<Tensor> input_grad = std::nullopt,
std::optional<Tensor> other_grad = std::nullopt);

static std::vector<std::optional<ttnn::Tensor>> invoke(
const Tensor &grad_tensor_arg,
const Tensor &input_tensor_a_arg,
const Tensor &input_tensor_b_arg,
const std::vector<bool> &are_required_outputs = std::vector<bool>{true, true},
const std::optional<MemoryConfig> &memory_config = std::nullopt);

};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "ttnn/operations/eltwise/ternary_backward/ternary_backward.hpp"
#include "ttnn/types.hpp"
#include "ttnn/cpp/ttnn/common/constants.hpp"

namespace py = pybind11;

namespace ttnn {
Expand Down Expand Up @@ -353,7 +354,7 @@ void bind_binary_backward_float_default(py::module& module, const binary_backwar
template <typename binary_backward_operation_t>
void bind_binary_backward_sub_alpha(py::module& module, const binary_backward_operation_t& operation, const std::string& parameter_name, const std::string& parameter_doc, float parameter_value, std::string_view description, std::string_view supported_dtype) {
auto doc = fmt::format(
R"doc({0}(grad_tensor: ttnn.Tensor, input_tensor_a: ttnn.Tensor, input_tensor_b: ttnn.Tensor, {2}: float, *, memory_config: ttnn.MemoryConfig) -> std::vector<Tensor>
R"doc({0}(grad_tensor: ttnn.Tensor, input_tensor_a: ttnn.Tensor, input_tensor_b: ttnn.Tensor, {2}: float, *, are_required_outputs: Optional[List[bool]] = [True, True], memory_config: ttnn.MemoryConfig) -> std::vector<Tensor>
{5}
Expand All @@ -364,6 +365,7 @@ void bind_binary_backward_sub_alpha(py::module& module, const binary_backward_op
* :attr:`{2}` (float):`{3}`,Default value = {4}
Keyword args:
* :attr:`are_required_outputs` (Optional[bool]): required output gradients
* :attr:`memory_config` (Optional[ttnn.MemoryConfig]): memory config for the output tensor
* :attr:`output_tensor` (Optional[ttnn.Tensor]): preallocated output tensor
* :attr:`queue_id` (Optional[uint8]): command queue id
Expand Down Expand Up @@ -400,31 +402,31 @@ void bind_binary_backward_sub_alpha(py::module& module, const binary_backward_op
const ttnn::Tensor& input_tensor,
const ttnn::Tensor& other_tensor,
float alpha,
const std::optional<ttnn::MemoryConfig>& memory_config,
const std::vector<bool>& are_required_outputs,
const std::optional<ttnn::MemoryConfig>& memory_config,
const std::optional<ttnn::Tensor>& input_grad,
const std::optional<ttnn::Tensor>& other_grad,
const uint8_t& queue_id) -> std::vector<std::optional<ttnn::Tensor>> {
return self(queue_id, grad_tensor, input_tensor, other_tensor, alpha, memory_config, are_required_outputs, input_grad, other_grad);
return self(queue_id, grad_tensor, input_tensor, other_tensor, alpha, are_required_outputs, memory_config, input_grad, other_grad);
},
py::arg("grad_tensor"),
py::arg("input_tensor_a"),
py::arg("input_tensor_b"),
py::arg(parameter_name.c_str()) = parameter_value,
py::kw_only(),
py::arg("memory_config") = std::nullopt,
py::arg("are_required_outputs") = std::vector<bool>{true, true},
py::arg("memory_config") = std::nullopt,
py::arg("input_grad") = std::nullopt,
py::arg("other_grad") = std::nullopt,
py::arg("queue_id") = 0}
py::arg("queue_id") = ttnn::DefaultQueueId}
);
}

template <typename binary_backward_operation_t>
void bind_binary_backward_rsub(py::module& module, const binary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) {

auto doc = fmt::format(
R"doc({0}(grad_tensor: ttnn.Tensor, input_tensor_a: ttnn.Tensor, input_tensor_b: ttnn.Tensor, *, memory_config: ttnn.MemoryConfig) -> std::vector<Tensor>
R"doc({0}(grad_tensor: ttnn.Tensor, input_tensor_a: ttnn.Tensor, input_tensor_b: ttnn.Tensor, *, are_required_outputs: Optional[List[bool]] = [True, True], memory_config: ttnn.MemoryConfig) -> std::vector<Tensor>
{2}
Expand All @@ -434,6 +436,7 @@ void bind_binary_backward_rsub(py::module& module, const binary_backward_operati
* :attr:`input_tensor_b`
Keyword args:
* :attr:`are_required_outputs` (Optional[bool]): required output gradients
* :attr:`memory_config` (Optional[ttnn.MemoryConfig]): memory config for the output tensor
* :attr:`output_tensor` (Optional[ttnn.Tensor]): preallocated output tensor
* :attr:`queue_id` (Optional[uint8]): command queue id
Expand Down Expand Up @@ -466,22 +469,22 @@ void bind_binary_backward_rsub(py::module& module, const binary_backward_operati
const ttnn::Tensor& grad_tensor,
const ttnn::Tensor& input_tensor,
const ttnn::Tensor& other_tensor,
const std::optional<ttnn::MemoryConfig>& memory_config,
const std::vector<bool>& are_required_outputs,
const std::optional<ttnn::MemoryConfig>& memory_config,
const std::optional<ttnn::Tensor>& input_grad,
const std::optional<ttnn::Tensor>& other_grad,
const uint8_t& queue_id) -> std::vector<std::optional<ttnn::Tensor>> {
return self(queue_id, grad_tensor, input_tensor, other_tensor, memory_config, are_required_outputs, input_grad, other_grad);
return self(queue_id, grad_tensor, input_tensor, other_tensor, are_required_outputs, memory_config, input_grad, other_grad);
},
py::arg("grad_tensor"),
py::arg("input_tensor_a"),
py::arg("input_tensor_b"),
py::kw_only(),
py::arg("memory_config") = std::nullopt,
py::arg("are_required_outputs") = std::vector<bool>{true, true},
py::arg("memory_config") = std::nullopt,
py::arg("input_grad") = std::nullopt,
py::arg("other_grad") = std::nullopt,
py::arg("queue_id") = 0}
py::arg("queue_id") = ttnn::DefaultQueueId}
);
}

Expand Down Expand Up @@ -587,7 +590,7 @@ void bind_binary_bw_mul(py::module& module, const binary_backward_operation_t& o
template <typename binary_backward_operation_t>
void bind_binary_bw_sub(py::module& module, const binary_backward_operation_t& operation, std::string_view description, std::string_view supported_dtype) {
auto doc = fmt::format(
R"doc({0}(input_tensor_a: Union[ttnn.Tensor, ComplexTensor] , input_tensor_b: Union[ComplexTensor, ttnn.Tensor, int, float], *, memory_config: Optional[ttnn.MemoryConfig] = None, dtype: Optional[ttnn.DataType] = None, activations: Optional[List[str]] = None) -> ttnn.Tensor or ComplexTensor
R"doc({0}(input_tensor_a: Union[ttnn.Tensor, ComplexTensor] , input_tensor_b: Union[ComplexTensor, ttnn.Tensor, int, float], *, are_required_outputs: Optional[List[bool]] = [True, True], memory_config: Optional[ttnn.MemoryConfig] = None, dtype: Optional[ttnn.DataType] = None, activations: Optional[List[str]] = None) -> ttnn.Tensor or ComplexTensor
{2}
Supports broadcasting.
Expand All @@ -597,10 +600,9 @@ void bind_binary_bw_sub(py::module& module, const binary_backward_operation_t& o
* :attr:`input_tensor_b` (ComplexTensor or ttnn.Tensor or Number): the tensor or number to add to :attr:`input_tensor_a`.
Keyword args:
* :attr:`are_required_outputs` (Optional[bool]): required output gradients
* :attr:`memory_config` (Optional[ttnn.MemoryConfig]): memory config for the output tensor
* :attr:`dtype` (Optional[ttnn.DataType]): data type for the output tensor
* :attr:`output_tensor` (Optional[ttnn.Tensor]): preallocated output tensor
* :attr:`activations` (Optional[List[str]]): list of activation functions to apply to the output tensor
* :attr:`queue_id` (Optional[uint8]): command queue id
Supported dtypes, layouts, and ranks:
Expand Down Expand Up @@ -631,37 +633,41 @@ void bind_binary_bw_sub(py::module& module, const binary_backward_operation_t& o
const Tensor& grad_tensor,
const Tensor& input_tensor_a,
const float scalar,
const std::optional<MemoryConfig>& memory_config){
return self(grad_tensor, input_tensor_a, scalar, memory_config);
},
const std::optional<ttnn::MemoryConfig>& memory_config,
const std::optional<ttnn::Tensor>& input_grad,
const uint8_t& queue_id) -> std::vector<std::optional<ttnn::Tensor>> {
return self(queue_id, grad_tensor, input_tensor_a, scalar, memory_config, input_grad);
},
py::arg("grad_tensor"),
py::arg("input_tensor_a"),
py::arg("scalar"),
py::kw_only(),
py::arg("memory_config") = std::nullopt},
py::arg("memory_config") = std::nullopt,
py::arg("input_grad") = std::nullopt,
py::arg("queue_id") = ttnn::DefaultQueueId},

// tensor and tensor
ttnn::pybind_overload_t{
[](const binary_backward_operation_t& self,
const ttnn::Tensor& grad_tensor,
const ttnn::Tensor& input_tensor,
const ttnn::Tensor& other_tensor,
const std::optional<ttnn::MemoryConfig>& memory_config,
const std::vector<bool>& are_required_outputs,
const std::optional<ttnn::MemoryConfig>& memory_config,
const std::optional<ttnn::Tensor>& input_grad,
const std::optional<ttnn::Tensor>& other_grad,
const uint8_t& queue_id) -> std::vector<std::optional<ttnn::Tensor>> {
return self(queue_id, grad_tensor, input_tensor, other_tensor, memory_config, are_required_outputs, input_grad, other_grad);
return self(queue_id, grad_tensor, input_tensor, other_tensor, are_required_outputs, memory_config, input_grad, other_grad);
},
py::arg("grad_tensor"),
py::arg("input_tensor"),
py::arg("other_tensor"),
py::kw_only(),
py::arg("memory_config") = std::nullopt,
py::arg("are_required_outputs") = std::vector<bool>{true, true},
py::arg("memory_config") = std::nullopt,
py::arg("input_grad") = std::nullopt,
py::arg("other_grad") = std::nullopt,
py::arg("queue_id") = 0},
py::arg("queue_id") = ttnn::DefaultQueueId},

// complex tensor
ttnn::pybind_overload_t{
Expand Down
Loading

0 comments on commit 7dee85f

Please sign in to comment.