Skip to content

Commit

Permalink
Call sparse op from python (#40608)
Browse files Browse the repository at this point in the history
* call sparse api from python
  • Loading branch information
zhangkaihuo authored Mar 19, 2022
1 parent a8e5c9b commit 95fbbc5
Show file tree
Hide file tree
Showing 13 changed files with 298 additions and 70 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -730,7 +730,7 @@ def GenerateNodeCreationCodes(
else:
# Tuple api_result
if IsPlainTensorType(rtype):
output_autograd_meta = f" egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&api_result[{pos}]);"
output_autograd_meta = f" egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&std::get<{pos}>(api_result));"
else:
assert IsVectorTensorType(rtype)
output_autograd_meta = f" std::vector<egr::AutogradMeta*> {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&api_result[{pos}]);\n"
Expand Down Expand Up @@ -767,8 +767,11 @@ def GenerateNodeCreationCodes(
else:
set_tensor_wrappers = f" grad_node->SetTensorWrapper{name}({name}, true);"
else:
if IsVectorTensorType(atype):
tw_name = f"api_result[{pos}]"
if num_fwd_outputs > 1:
# Aligned with forward output position
assert name in forward_outputs_position_map.keys()
fwd_output_pos = forward_outputs_position_map[name][1]
tw_name = f"std::get<{fwd_output_pos}>(api_result)"
else:
tw_name = f"api_result"

Expand Down Expand Up @@ -805,8 +808,8 @@ def GenerateNodeCreationCodes(
set_retain_grad = f" egr::EagerUtils::CheckAndRetainGrad(api_result);"
set_grad_in_meta = f" grad_node->SetGradInMeta(api_result, {pos});"
else:
set_retain_grad = f" egr::EagerUtils::CheckAndRetainGrad(api_result[{pos}]);"
set_grad_in_meta = f" grad_node->SetGradInMeta(api_result[{pos}], {pos});"
set_retain_grad = f" egr::EagerUtils::CheckAndRetainGrad(std::get<{pos}>(api_result));"
set_grad_in_meta = f" grad_node->SetGradInMeta(std::get<{pos}>(api_result), {pos});"

set_out_rank_list.append(set_out_rank)
set_history_list.append(set_history)
Expand Down Expand Up @@ -934,7 +937,7 @@ def GenerateForwardDefinition(fwd_api_name, bwd_api_name,
returns_list[0] = f"api_result"
else:
# Tuple api_result
returns_list[pos] = f"api_result[{pos}]"
returns_list[pos] = f"std::get<{pos}>(api_result)"

if IsPlainTensorType(rtype):
returns_type_list[pos] = "paddle::experimental::Tensor"
Expand Down Expand Up @@ -1084,7 +1087,7 @@ def GenerateNodeCCFile(filepath, node_definition_str):
#include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h"
#include "paddle/fluid/eager/to_static/run_program_op_node.h"
#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/phi/api/backward/sparse_bw_api.h"
"""
file_contents += node_definition_str
with open(filepath, 'a') as f:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def GeneratePythonCFunction(self):
"paddle::experimental::", namespace, forward_api_name)
else:
fwd_function_name = FUNCTION_NAME_TEMPLATE.format(
"", namespace, GetForwardFunctionName(forward_api_name))
"::", namespace, GetForwardFunctionName(forward_api_name))

# Generate Record Event for performance profiling
pythonc_record_event_str = RECORD_EVENT_TEMPLATE.format(
Expand Down
114 changes: 114 additions & 0 deletions paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ limitations under the License. */
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"

namespace paddle {
namespace pybind {
Expand Down Expand Up @@ -718,6 +720,98 @@ static PyObject* set_grad_type(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* tensor_method_get_non_zero_indices(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
PADDLE_ENFORCE(self->tensor.is_sparse_coo_tensor(),
paddle::platform::errors::Fatal(
"this method is only effective for SparseCooTensor"));
auto sparse_coo_tensor =
std::dynamic_pointer_cast<phi::SparseCooTensor>(self->tensor.impl());
paddle::experimental::Tensor tensor(std::make_shared<phi::DenseTensor>(
sparse_coo_tensor->non_zero_indices()));
return ToPyObject(tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* tensor_method_get_non_zero_elements(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
PADDLE_ENFORCE(
self->tensor.is_sparse_coo_tensor() ||
self->tensor.is_sparse_csr_tensor(),
paddle::platform::errors::Fatal("this method is only effective for "
"SparseCooTensor or SparseCsrTensor"));
if (self->tensor.is_sparse_coo_tensor()) {
auto sparse_coo_tensor =
std::dynamic_pointer_cast<phi::SparseCooTensor>(self->tensor.impl());
paddle::experimental::Tensor tensor(std::make_shared<phi::DenseTensor>(
sparse_coo_tensor->non_zero_elements()));
return ToPyObject(tensor);
} else {
auto sparse_csr_tensor =
std::dynamic_pointer_cast<phi::SparseCsrTensor>(self->tensor.impl());
paddle::experimental::Tensor tensor(std::make_shared<phi::DenseTensor>(
sparse_csr_tensor->non_zero_elements()));
return ToPyObject(tensor);
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* tensor_method_get_non_zero_crows(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
PADDLE_ENFORCE(self->tensor.is_sparse_csr_tensor(),
paddle::platform::errors::Fatal(
"this method is only effective for SparseCsrTensor"));
auto sparse_csr_tensor =
std::dynamic_pointer_cast<phi::SparseCsrTensor>(self->tensor.impl());
paddle::experimental::Tensor tensor(
std::make_shared<phi::DenseTensor>(sparse_csr_tensor->non_zero_crows()));
return ToPyObject(tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* tensor_method_get_non_zero_cols(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
PADDLE_ENFORCE(self->tensor.is_sparse_csr_tensor(),
paddle::platform::errors::Fatal(
"this method is only effective for SparseCsrTensor"));
auto sparse_csr_tensor =
std::dynamic_pointer_cast<phi::SparseCsrTensor>(self->tensor.impl());
paddle::experimental::Tensor tensor(
std::make_shared<phi::DenseTensor>(sparse_csr_tensor->non_zero_cols()));
return ToPyObject(tensor);
EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* tensor_method_is_sparse(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
return ToPyObject(self->tensor.is_sparse_coo_tensor() ||
self->tensor.is_sparse_csr_tensor());
EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* tensor_method_is_sparse_coo(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
return ToPyObject(self->tensor.is_sparse_coo_tensor());
EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* tensor_method_is_sparse_csr(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
return ToPyObject(self->tensor.is_sparse_csr_tensor());
EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* tensor__inplace_version(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
Expand Down Expand Up @@ -775,6 +869,26 @@ PyMethodDef variable_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"_set_grad_type", (PyCFunction)(void (*)(void))set_grad_type,
METH_VARARGS | METH_KEYWORDS, NULL},
/***the method of sparse tensor****/
{"non_zero_indices",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_indices,
METH_VARARGS | METH_KEYWORDS, NULL},
{"non_zero_elements",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_elements,
METH_VARARGS | METH_KEYWORDS, NULL},
{"non_zero_crows",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_crows,
METH_VARARGS | METH_KEYWORDS, NULL},
{"non_zero_cols",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_cols,
METH_VARARGS | METH_KEYWORDS, NULL},
{"is_sparse", (PyCFunction)(void (*)(void))tensor_method_is_sparse,
METH_VARARGS | METH_KEYWORDS, NULL},
{"is_sparse_coo", (PyCFunction)(void (*)(void))tensor_method_is_sparse_coo,
METH_VARARGS | METH_KEYWORDS, NULL},
{"is_sparse_csr", (PyCFunction)(void (*)(void))tensor_method_is_sparse_csr,
METH_VARARGS | METH_KEYWORDS, NULL},
/***the method of sparse tensor****/
{"_inplace_version", (PyCFunction)(void (*)(void))tensor__inplace_version,
METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}};
Expand Down
16 changes: 16 additions & 0 deletions paddle/phi/api/include/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,22 @@ class PADDLE_API Tensor final {
*/
bool is_selected_rows() const;

/**
* @brief Determine whether tensor is SparseCooTensor
*
* @return true
* @return false
*/
bool is_sparse_coo_tensor() const;

/**
* @brief Determine whether tensor is SparseCsrTensor
*
* @return true
* @return false
*/
bool is_sparse_csr_tensor() const;

/* Part 3: Device and Backend methods */

/**
Expand Down
56 changes: 28 additions & 28 deletions paddle/phi/api/lib/sparse_api_custom_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,25 +25,24 @@ namespace paddle {
namespace experimental {
namespace sparse {

Tensor to_sparse_coo_impl(const Tensor& x,
Backend backend,
const int64_t sparse_dim) {
Tensor to_sparse_coo_impl(const Tensor& x, const int64_t sparse_dim) {
if (x.layout() == phi::DataLayout::SPARSE_COO) {
return x;
}

// 1. Get kernel signature and kernel
auto kernel_key_set = ParseKernelKeyByInputArgs(x);
kernel_key_set.backend_set = kernel_key_set.backend_set | BackendSet(backend);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
std::string kernel_name = "dense_to_sparse_coo";
if (x.layout() == phi::DataLayout::SPARSE_CSR) {
kernel_name = "sparse_csr_to_coo";
}

auto kernel_key_set = ParseKernelKeyByInputArgs(x);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();

auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
kernel_name, kernel_key);

VLOG(6) << "to API kernel key: " << kernel_key;
VLOG(6) << "add API kernel key: " << kernel_key;
VLOG(6) << "to API kernel: " << kernel;

// 2. Get Device Context
Expand All @@ -62,18 +61,18 @@ Tensor to_sparse_coo_impl(const Tensor& x,

// 4. InferMeta
auto indices_meta =
phi::DenseTensorMeta(phi::DataType::INT64, {-1}, phi::DataLayout::NCHW);
auto elements_meta = phi::DenseTensorMeta(x.dtype(), {-1}, x.layout());
phi::DenseTensorMeta(phi::DataType::INT64, {1}, phi::DataLayout::NCHW);
auto elements_meta = phi::DenseTensorMeta(x.dtype(), {1}, x.layout());

// 5. Prepare outputs
// create empty SparseCooTensor
phi::DenseTensor non_zero_indices(
phi::make_intrusive<paddle::experimental::SharedStorage>(
phi::TransToPhiPlace(backend)),
phi::TransToPhiPlace(kernel_key.backend())),
std::move(indices_meta));
phi::DenseTensor non_zero_elements(
phi::make_intrusive<paddle::experimental::SharedStorage>(
phi::TransToPhiPlace(backend)),
phi::TransToPhiPlace(kernel_key.backend())),
std::move(elements_meta));
auto coo = std::make_shared<phi::SparseCooTensor>(
non_zero_indices, non_zero_elements, x.dims());
Expand All @@ -88,23 +87,23 @@ Tensor to_sparse_coo_impl(const Tensor& x,
return out;
}

Tensor to_sparse_csr_impl(const Tensor& x, Backend backend) {
Tensor to_sparse_csr_impl(const Tensor& x) {
if (x.layout() == phi::DataLayout::SPARSE_CSR) {
return x;
}
// 1. Get kernel signature and kernel
auto kernel_key_set = ParseKernelKeyByInputArgs(x);
kernel_key_set.backend_set = kernel_key_set.backend_set | BackendSet(backend);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
std::string kernel_name = "dense_to_sparse_csr";
if (x.layout() == phi::DataLayout::SPARSE_COO) {
kernel_name = "sparse_coo_to_csr";
}

auto kernel_key_set = ParseKernelKeyByInputArgs(x);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();

auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
kernel_name, kernel_key);

VLOG(6) << "to API kernel key: " << kernel_key;
VLOG(6) << "add API kernel key: " << kernel_key;
VLOG(6) << "to API kernel: " << kernel;

// 2. Get Device Context
Expand All @@ -122,24 +121,24 @@ Tensor to_sparse_csr_impl(const Tensor& x, Backend backend) {

// 4. InferMeta
auto crows_meta =
phi::DenseTensorMeta(phi::DataType::INT64, {-1}, phi::DataLayout::NCHW);
phi::DenseTensorMeta(phi::DataType::INT64, {1}, phi::DataLayout::NCHW);
auto cols_meta =
phi::DenseTensorMeta(phi::DataType::INT64, {-1}, phi::DataLayout::NCHW);
auto elements_meta = phi::DenseTensorMeta(x.dtype(), {-1}, x.layout());
phi::DenseTensorMeta(phi::DataType::INT64, {1}, phi::DataLayout::NCHW);
auto elements_meta = phi::DenseTensorMeta(x.dtype(), {1}, x.layout());

// 5. Prepare outputs
// create empty SparseCooTensor
phi::DenseTensor non_zero_crows(
phi::make_intrusive<paddle::experimental::SharedStorage>(
phi::TransToPhiPlace(backend)),
phi::TransToPhiPlace(kernel_key.backend())),
std::move(crows_meta));
phi::DenseTensor non_zero_cols(
phi::make_intrusive<paddle::experimental::SharedStorage>(
phi::TransToPhiPlace(backend)),
phi::TransToPhiPlace(kernel_key.backend())),
std::move(cols_meta));
phi::DenseTensor non_zero_elements(
phi::make_intrusive<paddle::experimental::SharedStorage>(
phi::TransToPhiPlace(backend)),
phi::TransToPhiPlace(kernel_key.backend())),
std::move(elements_meta));
auto csr = std::make_shared<phi::SparseCsrTensor>(
non_zero_crows, non_zero_cols, non_zero_elements, x.dims());
Expand All @@ -154,24 +153,25 @@ Tensor to_sparse_csr_impl(const Tensor& x, Backend backend) {
return out;
}

Tensor to_dense_impl(const Tensor& x, Backend backend) {
Tensor to_dense_impl(const Tensor& x) {
if (x.layout() != phi::DataLayout::SPARSE_CSR &&
x.layout() != phi::DataLayout::SPARSE_COO) {
return x;
}

// 1. Get kernel signature and kernel
auto kernel_key_set = ParseKernelKeyByInputArgs(x);
kernel_key_set.backend_set = kernel_key_set.backend_set | BackendSet(backend);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
std::string kernel_name = "sparse_coo_to_dense";
if (x.layout() == phi::DataLayout::SPARSE_CSR) {
kernel_name = "sparse_csr_to_dense";
}

auto kernel_key_set = ParseKernelKeyByInputArgs(x);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();

auto kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
kernel_name, kernel_key);

VLOG(6) << "to API kernel key: " << kernel_key;
VLOG(6) << "add API kernel key: " << kernel_key;
VLOG(6) << "to API kernel: " << kernel;

// 2. Get Device Context
Expand All @@ -194,7 +194,7 @@ Tensor to_dense_impl(const Tensor& x, Backend backend) {
// create empty SparseCooTensor
auto dense_out = std::make_shared<phi::DenseTensor>(
phi::make_intrusive<paddle::experimental::SharedStorage>(
phi::TransToPhiPlace(backend)),
phi::TransToPhiPlace(kernel_key.backend())),
std::move(dense_meta));

kernel_context.EmplaceBackOutput(dense_out.get());
Expand Down
8 changes: 3 additions & 5 deletions paddle/phi/api/lib/sparse_api_custom_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,11 @@ namespace paddle {
namespace experimental {
namespace sparse {

Tensor to_dense_impl(const Tensor& x, Backend backend);
Tensor to_dense_impl(const Tensor& x);

Tensor to_sparse_coo_impl(const Tensor& x,
Backend backend,
const int64_t sparse_dim);
Tensor to_sparse_coo_impl(const Tensor& x, const int64_t sparse_dim);

Tensor to_sparse_csr_impl(const Tensor& x, Backend backend);
Tensor to_sparse_csr_impl(const Tensor& x);

} // namespace sparse
} // namespace experimental
Expand Down
Loading

0 comments on commit 95fbbc5

Please sign in to comment.