Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Browse files Browse the repository at this point in the history
… fix_default_value
  • Loading branch information
jim19930609 committed Mar 4, 2022
2 parents e751306 + a694799 commit a82f5fc
Show file tree
Hide file tree
Showing 25 changed files with 275 additions and 143 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@

yaml_types_mapping = {
'int' : 'int', 'int32_t' : 'int32_t', 'int64_t' : 'int64_t', 'size_t' : 'size_t', \
'float' : 'float', 'double' : 'double', 'bool' : 'bool', \
'Backend' : 'Backend', 'DataLayout' : 'DataLayout', 'DataType' : 'DataType', \
'int64_t[]' : 'std::vector<int64_t>', 'int[]' : 'std::vector<int>',
'float' : 'float', 'double' : 'double', 'bool' : 'bool', \
'Backend' : 'paddle::experimental::Backend', 'DataLayout' : 'paddle::experimental::DataLayout', 'DataType' : 'paddle::experimental::DataType', \
'int64_t[]' : 'std::vector<int64_t>', 'int[]' : 'std::vector<int>',
'Tensor' : 'Tensor',
'Tensor[]' : 'std::vector<Tensor>',
'Tensor[Tensor[]]' : 'std::vector<std::vector<Tensor>>',
'Scalar' : 'Scalar',
'ScalarArray' : 'ScalarArray'
'Scalar' : 'paddle::experimental::Scalar',
'ScalarArray' : 'paddle::experimental::ScalarArray'
}


Expand Down Expand Up @@ -208,39 +208,26 @@ def ParseYamlArgs(string):


def ParseYamlReturns(string):
# Example: Tensor, Tensor

# list = [ ["", ret_type, orig_position], ...]
returns_list = []

returns = [x.strip() for x in string.strip().split(",")]
for i in range(len(returns)):
ret_type = returns[i]

assert ret_type in yaml_types_mapping.keys()
ret_type = yaml_types_mapping[ret_type]

returns_list.append(["", ret_type, i])

return returns_list


def ParseYamlReturnsWithName(string):
# Example: Tensor(out), Tensor(out1)
# Example0: Tensor(out), Tensor(out1)
# Example1: Tensor, Tensor
# Example2: Tensor[](out), Tensor

# list = [ [ret_name, ret_type, orig_position], ...]
returns_list = []

returns = [x.strip() for x in string.strip().split(",")]

atype = r'(.*?)'
aname = r'(.*?)'
pattern = f'{atype}\({aname}\)'
for i in range(len(returns)):
ret = returns[i]
m = re.search(pattern, ret)
ret_type = m.group(1)
ret_name = m.group(2)

ret_name = ""
if "(" in ret and ")" in ret:
# Remove trailing ')'
ret = ret[:-1]
ret_type = ret.split("(")[0].strip()
ret_name = ret.split("(")[1].strip()
else:
ret_type = ret.strip()

assert ret_type in yaml_types_mapping.keys()
ret_type = yaml_types_mapping[ret_type]
Expand All @@ -266,7 +253,7 @@ def ParseYamlForwardFromBackward(string):
function_returns = m.group(3)

forward_inputs_list, forward_attrs_list = ParseYamlArgs(function_args)
forward_returns_list = ParseYamlReturnsWithName(function_returns)
forward_returns_list = ParseYamlReturns(function_returns)

return forward_inputs_list, forward_attrs_list, forward_returns_list

Expand Down Expand Up @@ -296,7 +283,7 @@ def ParseYamlBackward(args_str, returns_str):
args_str = re.search(args_pattern, args_str).group(1)

inputs_list, attrs_list = ParseYamlArgs(args_str)
returns_list = ParseYamlReturnsWithName(returns_str)
returns_list = ParseYamlReturns(returns_str)

return inputs_list, attrs_list, returns_list

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,26 @@
import argparse
from eager_gen import yaml_types_mapping, ReadFwdFile, ParseDispensable, IsVectorTensorType, GetForwardFunctionName, ParseYamlForward, DetermineForwardPositionMap

skipped_fwd_api_names = set(["scale"])

atype_to_parsing_function = {
"bool": "CastPyArg2Boolean",
"int": "CastPyArg2Int",
"long": "CastPyArg2Long",
"int64_t": "CastPyArg2Long",
"float": "CastPyArg2Float",
"string": "CastPyArg2String",
"bool[]": "CastPyArg2Booleans",
"int[]": "CastPyArg2Ints",
"long[]": "CastPyArg2Longs",
"float[]": "CastPyArg2Floats",
"double[]": "CastPyArg2Float64s",
"string[]": "CastPyArg2Strings",
"Scalar": "CastPyArg2Scalar",
"ScalarArray": "CastPyArg2ScalarArray"
"std::vector<bool>": "CastPyArg2Booleans",
"std::vector<int>": "CastPyArg2Ints",
"std::vector<long>": "CastPyArg2Longs",
"std::vector<int64_t>": "CastPyArg2Longs",
"std::vector<float>": "CastPyArg2Floats",
"std::vector<double>": "CastPyArg2Float64s",
"std::vector<std::string>": "CastPyArg2Strings",
"paddle::experimental::Scalar": "CastPyArg2Scalar",
"paddle::experimental::ScalarArray": "CastPyArg2ScalarArray",
"paddle::experimental::Backend": "CastPyArg2Backend",
"paddle::experimental::DataType": "CastPyArg2DataType",
}


Expand All @@ -43,23 +49,17 @@ def ParseArguments():
return args


def GetCxxType(atype):
if atype not in yaml_types_mapping.keys():
assert False

return yaml_types_mapping[atype]


def FindParsingFunctionFromAttributeType(atype):
if atype not in atype_to_parsing_function.keys():
print(f"Unable to find {atype} in atype_to_parsing_function.")
assert False

return atype_to_parsing_function[atype]


def GeneratePythonCFunction(fwd_api_name, forward_inputs_position_map,
forward_attrs_list, forward_outputs_position_map,
optional_inputs):
optional_inputs, is_forward_only):
# forward_inputs_position_map = { "name" : [type, fwd_position] }
# forward_outputs_position_map = { "name" : [type, fwd_position] }
# forward_attrs_list = [ [attr_name, attr_type, default_value, orig_position], ...]
Expand All @@ -86,11 +86,10 @@ def GeneratePythonCFunction(fwd_api_name, forward_inputs_position_map,
# Get Attributes
for name, atype, _, pos in forward_attrs_list:
parsing_function = FindParsingFunctionFromAttributeType(atype)
cxx_type = GetCxxType(atype)
key = f"{name}"

parse_attributes_str += f" PyObject* {name}_obj = PyTuple_GET_ITEM(args, {pos});\n"
parse_attributes_str += f" {cxx_type} {name} = {parsing_function}({name}_obj, \"{fwd_api_name}\", {pos});\n"
parse_attributes_str += f" {atype} {name} = {parsing_function}({name}_obj, \"{fwd_api_name}\", {pos});\n"

dygraph_function_call_list[pos] = f"{name}"
dygraph_function_call_str = ",".join(dygraph_function_call_list)
Expand Down Expand Up @@ -127,9 +126,14 @@ def GeneratePythonCFunction(fwd_api_name, forward_inputs_position_map,
}}
"""
if is_forward_only:
fwd_function_name = fwd_api_name
else:
fwd_function_name = GetForwardFunctionName(fwd_api_name)

python_c_function_str = PYTHON_C_FUNCTION_TEMPLATE.format(
fwd_api_name, fwd_api_name, get_eager_tensor_str, parse_attributes_str,
GetForwardFunctionName(fwd_api_name), dygraph_function_call_str)
fwd_function_name, dygraph_function_call_str)

python_c_function_reg_str = f"{{\"final_state_{fwd_api_name}\", (PyCFunction)(void(*)(void))eager_final_state_api_{fwd_api_name}, METH_VARARGS | METH_KEYWORDS, \"C++ interface function for {fwd_api_name} in dygraph.\"}}\n"

Expand Down Expand Up @@ -213,6 +217,11 @@ def GeneratePythonCWrappers(python_c_function_str, python_c_function_reg_str):
#pragma once
#include "pybind11/detail/common.h"
#include "paddle/phi/api/all.h"
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/fluid/pybind/op_function_common.h"
#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h"
#include "paddle/fluid/pybind/exception.h"
Expand Down Expand Up @@ -251,19 +260,23 @@ def GeneratePythonCFile(filepath, python_c_str):
python_c_function_list = []
python_c_function_reg_list = []
for fwd_api in fwd_api_list:

# We only generate Ops with grad
is_forward_only = False
if 'backward' not in fwd_api.keys():
continue
is_forward_only = True

assert 'api' in fwd_api.keys()
assert 'args' in fwd_api.keys()
assert 'output' in fwd_api.keys()
assert 'backward' in fwd_api.keys()

fwd_api_name = fwd_api['api']
fwd_args_str = fwd_api['args']
fwd_returns_str = fwd_api['output']

if fwd_api_name in skipped_fwd_api_names:
continue

# Parse Dispensable Inputs
optional_inputs = []
if 'optional' in fwd_api.keys():
Expand All @@ -285,7 +298,7 @@ def GeneratePythonCFile(filepath, python_c_str):

python_c_function_str, python_c_function_reg_str = GeneratePythonCFunction(
fwd_api_name, forward_inputs_position_map, forward_attrs_list,
forward_outputs_position_map, optional_inputs)
forward_outputs_position_map, optional_inputs, is_forward_only)
python_c_function_list.append(python_c_function_str)
python_c_function_reg_list.append(python_c_function_reg_str)
print("Generated Python-C Function: ", python_c_function_str)
Expand Down
47 changes: 43 additions & 4 deletions paddle/fluid/pybind/eager_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -757,7 +757,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
if (obj == Py_None) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"bool, but got %s",
"int, float, bool or Tensor, but got %s",
op_type, arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
Expand All @@ -784,7 +784,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"bool, but got %s",
"int, float, bool or Tensor, but got %s",
op_type, arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
Expand All @@ -801,7 +801,7 @@ paddle::experimental::ScalarArray CastPyArg2ScalarArray(
if (obj == Py_None) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"bool, but got %s",
"list or Tensor, but got %s",
op_type, arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
Expand All @@ -821,7 +821,7 @@ paddle::experimental::ScalarArray CastPyArg2ScalarArray(
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"bool, but got %s",
"list or Tensor, but got %s",
op_type, arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}
Expand All @@ -830,5 +830,44 @@ paddle::experimental::ScalarArray CastPyArg2ScalarArray(
return paddle::experimental::ScalarArray({1});
}

paddle::experimental::Backend CastPyArg2Backend(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
if (obj == Py_None) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"int or place, but got %s",
op_type, arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}

PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name);
if (type_name == "int") {
int value = CastPyArg2Int(obj, op_type, arg_pos);
return static_cast<paddle::experimental::Backend>(value);
} else {
platform::Place place = CastPyArg2Place(obj, arg_pos);
return phi::TransToPhiBackend(place);
}

return paddle::experimental::Backend::CPU;
}

paddle::experimental::DataType CastPyArg2DataType(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos) {
if (obj == Py_None) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
"data_type, but got %s",
op_type, arg_pos + 1,
((PyTypeObject*)obj->ob_type)->tp_name)); // NOLINT
}

framework::proto::VarType::Type type = CastPyArg2ProtoType(obj, arg_pos);
return framework::TransToPhiDataType(type);
}

} // namespace pybind
} // namespace paddle
10 changes: 10 additions & 0 deletions paddle/fluid/pybind/eager_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ limitations under the License. */
#pragma once

#include <Python.h>
#include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/common/scalar_array.h"
#include "paddle/phi/core/dense_tensor.h"
Expand Down Expand Up @@ -100,6 +102,14 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
paddle::experimental::ScalarArray CastPyArg2ScalarArray(
PyObject* obj, const std::string& op_type, ssize_t arg_pos);

paddle::experimental::Backend CastPyArg2Backend(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);

paddle::experimental::DataType CastPyArg2DataType(PyObject* obj,
const std::string& op_type,
ssize_t arg_pos);

paddle::optional<paddle::experimental::Tensor> GetOptionalTensorFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false);
Expand Down
2 changes: 1 addition & 1 deletion paddle/infrt/host_context/value.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ using ValueVariantType =
backends::CpuPhiAllocator,
backends::CpuPhiContext,
::phi::CPUContext,
std::vector<phi::DenseTensor>,
std::vector<const phi::DenseTensor*>,
paddle::experimental::ScalarBase<phi::DenseTensor>,
paddle::experimental::ScalarArrayBase<phi::DenseTensor>,
std::vector<phi::MetaTensor*>,
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/api/lib/api_gen_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,11 +71,11 @@ paddle::optional<phi::MetaTensor> MakeMetaTensor(
}

std::vector<phi::MetaTensor> MakeMetaTensor(
const std::vector<phi::DenseTensor>& tensors) {
const std::vector<const phi::DenseTensor*>& tensors) {
std::vector<phi::MetaTensor> meta_tensors;
meta_tensors.reserve(tensors.size());
for (const auto& t : tensors) {
meta_tensors.emplace_back(t);
for (const auto* t : tensors) {
meta_tensors.emplace_back(*t);
}
return meta_tensors;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/lib/api_gen_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ paddle::optional<phi::MetaTensor> MakeMetaTensor(
const paddle::optional<const phi::DenseTensor&>& tensor);

std::vector<phi::MetaTensor> MakeMetaTensor(
const std::vector<phi::DenseTensor>& tensors);
const std::vector<const phi::DenseTensor*>& tensors);

phi::MetaTensor MakeMetaTensor(const phi::SelectedRows& tensor);

Expand Down
9 changes: 4 additions & 5 deletions paddle/phi/core/kernel_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,12 +82,11 @@ class KernelContext {
}

template <typename TensorType>
std::vector<TensorType> MoveInputsBetween(size_t start, size_t end) {
std::vector<TensorType> v;
std::vector<const TensorType*> InputsBetween(size_t start, size_t end) {
std::vector<const TensorType*> v;
for (size_t i = start; i < end; ++i) {
auto t = static_cast<const TensorType*>(inputs_.at(i));
v.emplace_back(*t);
inputs_[i] = nullptr;
auto* t = static_cast<const TensorType*>(inputs_.at(i));
v.emplace_back(t);
}
return v;
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/core/kernel_registry.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ struct KernelArgsParseFunctor<Return_ (*)(Args_...)> {
default_tensor_layout,
default_key.dtype(),
arg_type);
} else if (arg_type ==
std::type_index(typeid(const std::vector<DenseTensor>&))) {
} else if (arg_type == std::type_index(typeid(
const std::vector<const DenseTensor*>&))) {
args_def->AppendInput(default_key.backend(),
default_tensor_layout,
default_key.dtype(),
Expand Down
Loading

1 comment on commit a82f5fc

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

Please sign in to comment.