Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[code-gen] Support code-gen for opmaker of sparse op #46993

Merged
merged 14 commits into from
Oct 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ tools/nvcc_lazy

# these files (directories) are generated before build system generation
paddle/fluid/operators/generated_op.cc
paddle/fluid/operators/generated_sparse_op.cc
paddle/phi/ops/compat/generated_sig.cc
paddle/phi/ops/compat/generated_sparse_sig.cc
paddle/phi/api/yaml/parsed_apis/
python/paddle/utils/code_gen/
paddle/fluid/pybind/tmp_eager_op_function_impl.h
Expand Down
10 changes: 9 additions & 1 deletion paddle/fluid/eager/auto_code_generator/eager_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,9 @@ static std::unordered_set<std::string> black_ops_list = {"run_program",
"fused_gate_attention",
"fused_feedforward",
"fused_attention",
"fused_gemm_epilogue"};
"fused_gemm_epilogue",
"sparse_divide_scalar",
"sparse_scale"};

static std::string LegalizeVariableName(const std::string& var_name) {
std::string ret = var_name;
Expand Down Expand Up @@ -3161,6 +3163,12 @@ static void DygraphCodeGeneration(const std::string& output_dir,
continue;
}

// Skip the sparse op
if (op_type.compare(0, 7, "sparse_") == 0 && op_type != "sparse_momentum" &&
op_type != "sparse_attention") {
continue;
}

GradNodeGenerationInfo bwd_info;

bool is_available = CollectGradInformationFromOpInfo(op_info, &bwd_info);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ cc_test(
cc_library(
var_type_traits
SRCS var_type_traits.cc
DEPS framework_proto scope tensor_array sparse_coo_tensor)
DEPS framework_proto scope tensor_array sparse_coo_tensor sparse_csr_tensor)
if(WITH_GPU)
target_link_libraries(var_type_traits dynload_cuda)
endif()
Expand Down
3 changes: 3 additions & 0 deletions paddle/fluid/framework/framework.proto
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,8 @@ message VarType {
PSTRING = 29;
// the data type of phi::SparseCooTensor
SPARSE_COO = 30;
// the data type of phi::SparseCsrTensor
SPARSE_CSR = 31;
}

required Type type = 1;
Expand Down Expand Up @@ -189,6 +191,7 @@ message VarType {
optional TensorDesc strings = 9;
optional TensorDesc vocab = 10;
optional TensorDesc sparse_coo = 11;
optional TensorDesc sparse_csr = 12;
}

message VarDesc {
Expand Down
5 changes: 5 additions & 0 deletions paddle/fluid/framework/infershape_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,11 @@ class InferShapeArgumentMappingContext : public phi::ArgumentMappingContext {
return var_type == proto::VarType::SPARSE_COO;
}

bool IsSparseCsrTensorInput(const std::string& name) const override {
auto var_type = ctx_.GetInputVarType(name);
return var_type == proto::VarType::SPARSE_CSR;
}

bool IsDenseTensorOutput(const std::string& name) const override {
auto var_types = ctx_.GetOutputsVarType(name);
return std::all_of(var_types.begin(),
Expand Down
5 changes: 5 additions & 0 deletions paddle/fluid/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -537,6 +537,11 @@ class ExecutionArgumentMappingContext : public phi::ArgumentMappingContext {
return var->IsType<phi::SparseCooTensor>();
}

bool IsSparseCsrTensorInput(const std::string& name) const override {
const auto* var = ctx_.InputVar(name);
return var->IsType<phi::SparseCsrTensor>();
}

bool IsDenseTensorOutput(const std::string& name) const override {
auto vars = ctx_.MultiOutputVar(name);
return std::all_of(vars.begin(), vars.end(), [](const Variable* var) {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/framework/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ limitations under the License. */
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"

namespace paddle {
namespace framework {
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/framework/var_type_traits.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ namespace phi {
class DenseTensor;
class SelectedRows;
class SparseCooTensor;
class SparseCsrTensor;
} // namespace phi

// Users should add forward declarations here
Expand Down Expand Up @@ -182,6 +183,7 @@ using VarTypeRegistry = detail::VarTypeRegistryImpl<
phi::DenseTensor,
phi::SelectedRows,
phi::SparseCooTensor,
phi::SparseCsrTensor,
std::vector<Scope *>,
LoDRankTable,
Strings,
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,10 @@ bool PluginArgumentMappingContext::IsSparseCooTensorInput(
const std::string& name) const {
return false;
}
bool PluginArgumentMappingContext::IsSparseCsrTensorInput(
const std::string& name) const {
return false;
}
bool PluginArgumentMappingContext::IsDenseTensorVectorInput(
const std::string& name) const {
return false;
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/inference/tensorrt/plugin_arg_mapping_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ class PluginArgumentMappingContext : public ::phi::ArgumentMappingContext {

bool IsSparseCooTensorInput(const std::string& name) const override;

bool IsSparseCsrTensorInput(const std::string& name) const override;

bool IsDenseTensorVectorInput(const std::string& name) const override;

bool IsDenseTensorOutput(const std::string& name) const override;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ else()
cc_library(gather_scatter_kernel SRCS gather_scatter_kernel.cc gather_scatter_kernel.cu DEPS tensor)
endif()

set(OP_HEADER_DEPS ${OP_HEADER_DEPS} phi phi_api_utils gather_scatter_kernel backward_infermeta)
set(OP_HEADER_DEPS ${OP_HEADER_DEPS} phi phi_api_utils gather_scatter_kernel backward_infermeta sparse_backward_infermeta)

register_operators(EXCLUDES py_layer_op py_func_op warpctc_op dgc_op load_combine_op lstm_op run_program_op eye_op quantize_linear_op
recurrent_op save_combine_op sparse_attention_op sync_batch_norm_op ${OP_MKL_DEPS} DEPS ${OP_HEADER_DEPS})
Expand Down
214 changes: 0 additions & 214 deletions paddle/fluid/operators/sparse_manual_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,50 +28,6 @@ limitations under the License. */
namespace paddle {
namespace operators {

class SparseSparseCooTensorOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("values", "(Tensor), input 0 of sparse_coo_tensor op.");
AddInput("indices", "(Tensor), input 1 of sparse_coo_tensor op.");
AddOutput("out", "(Tensor), output 0 of sparse_coo_tensor op.");
AddAttr<std::vector<int>>(
"dense_shape", "(vector<int>), attribute 0 for sparse_coo_tensor op.");
AddComment(R"DOC(
TODO: Documentation of sparse_coo_tensor op.
)DOC");
}
};

class SparseSparseCooTensorOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};

DECLARE_INFER_SHAPE_FUNCTOR(
sparse_sparse_coo_tensor,
SparseSparseCooTensorInferShapeFunctor,
PD_INFER_META(phi::sparse::SparseCooTensorInferMeta));

class SparseValuesOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("x", "(Tensor), input 0 of sparse_values op.");
AddOutput("out", "(Tensor), output 0 of sparse_values op.");
AddComment(R"DOC(
TODO: Documentation of sparse_values op.
)DOC");
}
};

class SparseValuesOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};

DECLARE_INFER_SHAPE_FUNCTOR(sparse_values,
SparseValuesInferShapeFunctor,
PD_INFER_META(phi::sparse::ValuesInferMeta));

class SparseIndicesOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
Expand All @@ -92,182 +48,12 @@ DECLARE_INFER_SHAPE_FUNCTOR(sparse_indices,
SparseIndicesInferShapeFunctor,
PD_INFER_META(phi::sparse::IndicesInferMeta));

class SparseToDenseOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("x", "(Tensor), input 0 of sparse_to_dense op.");
AddOutput("out", "(Tensor), output 0 of sparse_to_dense op.");
AddComment(R"DOC(
TODO: Documentation of sparse_to_dense op.
)DOC");
}
};

class SparseToDenseOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};

DECLARE_INFER_SHAPE_FUNCTOR(sparse_to_dense,
SparseToDenseInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));

class SparseReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("x", "(Tensor), input 0 of sparse_relu op.");
AddOutput("out", "(Tensor), output 0 of sparse_relu op.");
AddComment(R"DOC(
TODO: Documentation of sparse_relu op.
)DOC");
}
};

class SparseReluOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};

DECLARE_INFER_SHAPE_FUNCTOR(sparse_relu,
SparseReluInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));

class SparseConv3dOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("x", "(Tensor), input 0 of sparse_conv3d op.");
AddInput("kernel", "(Tensor), input 1 of sparse_conv3d op.");
AddOutput("out", "(Tensor), output 0 of sparse_conv3d op.");
AddOutput("rulebook", "(Tensor), output 1 of sparse_conv3d op.");
AddOutput("counter", "(Tensor), output 2 of sparse_conv3d op.");
AddAttr<std::vector<int>>(
"paddings", "(vector<int>), attribute 0 for sparse_conv3d op.");
AddAttr<std::vector<int>>(
"dilations", "(vector<int>), attribute 1 for sparse_conv3d op.");
AddAttr<std::vector<int>>(
"strides", "(vector<int>), attribute 2 for sparse_conv3d op.");
AddAttr<int>("groups", "(int), attribute 3 for sparse_conv3d op.");
AddAttr<bool>("subm", "(bool), attribute 4 for conv3d_coo op.");
AddAttr<std::string>("key", "(string), attribute 5 for sparse_conv3d op.")
.SetDefault("");
AddComment(R"DOC(
TODO: Documentation of sparse_conv3d op.
)DOC");
}
};

class SparseConv3dOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};

DECLARE_INFER_SHAPE_FUNCTOR(sparse_conv3d,
SparseConv3dInferShapeFunctor,
PD_INFER_META(phi::sparse::Conv3dInferMeta));

class SparseAddOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("x", "(Tensor), input 0 of sparse_add op.");
AddInput("y", "(Tensor), input 1 of sparse_add op.");
AddOutput("out", "(Tensor), output 0 of sparse_add op.");
AddComment(R"DOC(
TODO: Documentation of sparse_add op.
)DOC");
}
};

class SparseAddOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};

DECLARE_INFER_SHAPE_FUNCTOR(sparse_add,
SparseAddInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));

class SparseBatchNormOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("x", "(Tensor), input 0 of sparse_batch_norm op.");
AddInput("scale", "(Tensor), input 1 of sparse_batch_norm op.");
AddInput("bias", "(Tensor), input 2 of sparse_batch_norm op.");
AddInput("mean", "(Tensor), input 3 of sparse_batch_norm op.");
AddInput("variance", "(Tensor), input 4 of sparse_batch_norm op.");
AddOutput("y", "(Tensor), output 0 of sparse_batch_norm op.");
AddOutput("mean_out", "(Tensor), output 1 of sparse_batch_norm op.");
AddOutput("variance_out", "(Tensor), output 2 of sparse_batch_norm op.");
AddOutput("saved_mean", "(Tensor), output 3 of sparse_batch_norm op.");
AddOutput("saved_variance", "(Tensor), output 4 of sparse_batch_norm op.");
AddOutput("reserve_space", "(Tensor), output 5 of sparse_batch_norm op.");
AddAttr<float>("momentum",
"(float), attribute 0 for sparse_batch_norm op.");
AddAttr<float>("epsilon", "(float), attribute 1 for sparse_batch_norm op.");
AddAttr<std::string>("data_layout",
"(string), attribute 2 for sparse_batch_norm op.");
AddAttr<bool>("is_test", "(bool), attribute 3 for sparse_batch_norm op.");
AddAttr<bool>("use_global_stats",
"(bool), attribute 4 for sparse_batch_norm op.");
AddAttr<bool>("trainable_statistics",
"(bool), attribute 4 for sparse_batch_norm op.");
AddAttr<bool>("fuse_with_relu",
"(bool), attribute 4 for sparse_batch_norm op.");
AddComment(R"DOC(
TODO: Documentation of sparse_batch_norm op.
)DOC");
}
};

class SparseBatchNormOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};

DECLARE_INFER_SHAPE_FUNCTOR(sparse_batch_norm,
SparseBatchNormInferShapeFunctor,
PD_INFER_META(phi::BatchNormInferMeta));

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;

REGISTER_OPERATOR(sparse_sparse_coo_tensor,
ops::SparseSparseCooTensorOp,
ops::SparseSparseCooTensorOpMaker,
ops::SparseSparseCooTensorInferShapeFunctor);

REGISTER_OPERATOR(sparse_values,
ops::SparseValuesOp,
ops::SparseValuesOpMaker,
ops::SparseValuesInferShapeFunctor);

REGISTER_OPERATOR(sparse_indices,
ops::SparseIndicesOp,
ops::SparseIndicesOpMaker,
ops::SparseIndicesInferShapeFunctor);

REGISTER_OPERATOR(sparse_to_dense,
ops::SparseToDenseOp,
ops::SparseToDenseOpMaker,
ops::SparseToDenseInferShapeFunctor);

REGISTER_OPERATOR(sparse_relu,
ops::SparseReluOp,
ops::SparseReluOpMaker,
ops::SparseReluInferShapeFunctor);

REGISTER_OPERATOR(sparse_conv3d,
ops::SparseConv3dOp,
ops::SparseConv3dOpMaker,
ops::SparseConv3dInferShapeFunctor);

REGISTER_OPERATOR(sparse_add,
ops::SparseAddOp,
ops::SparseAddOpMaker,
ops::SparseAddInferShapeFunctor);

REGISTER_OPERATOR(sparse_batch_norm,
ops::SparseBatchNormOp,
ops::SparseBatchNormOpMaker,
ops::SparseBatchNormInferShapeFunctor);
5 changes: 5 additions & 0 deletions paddle/fluid/pybind/eager_legacy_op_function_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -416,6 +416,11 @@ GenerateOpFunctions() {
if (CUSTOM_HANDWRITE_OPS_SET.count(op_type)) {
continue;
}
// Skip the sparse op
if (op_type.compare(0, 7, "sparse_") == 0 && op_type != "sparse_momentum" &&
op_type != "sparse_attention") {
Comment on lines +420 to +421
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[TODO] 这里Sparse类算子的过滤条件用硬编码的方式不够鲁棒,如果中间态代码短期内无法去掉,这里的逻辑需要再优化下

continue;
}
// Skip operator which is not inherit form OperatorWithKernel, like while,
// since only OperatorWithKernel can run in dygraph mode.
// if the phi lib contains op kernel, we still generate ops method
Expand Down
Loading