Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[v1.x] Add onnx export operator unit tests. (#20192)
Browse files Browse the repository at this point in the history
* Add onnx operator export unit tests for reciprocal, power, broadcast_power, sqrt, depth_to_space and space_to_depth operators.

* Add onnx operator export unit test for square operator.

* Update export operators for broadcast_logical_and, broadcast_logical_or, broadcast_logical_xor.

* Add onnx export unit tests for shape_array, hard_sigmoid, broadcast_lesser, broadcast_greater, log_softmax, broadcast_logical_and, broadcast_logical_or, broadcast_logical_xor.

* Fix logical_not export by casting, add export unit tests for broadcast_to and logical_not.

* Fix random_uniform and random_uniform_like to use proper data types and add unit tests.

* Onnxruntime does not support float64 dtype for reciprocal.

* Fix random_normal export function to include output dtype, add operator export unit test.

* Fix syntax.

* Fix pylint, add operator unit test for ROIPooling.
  • Loading branch information
josephevans authored Apr 21, 2021
1 parent cdb901a commit 94fe808
Show file tree
Hide file tree
Showing 2 changed files with 293 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2273,34 +2273,81 @@ def convert_broadcast_equal(node, **kwargs):

@mx_op.register("broadcast_logical_and")
def convert_broadcast_logical_and(node, **kwargs):
"""Map MXNet's broadcast logical and operator attributes to onnx's Add operator
"""Map MXNet's broadcast logical and operator attributes to onnx's And operator
and return the created node.
"""
return create_basic_op_node('And', node, kwargs)
from onnx.helper import make_node
from onnx import TensorProto
name, input_nodes, _ = get_inputs(node, kwargs)
input_dtypes = get_input_dtypes(node, kwargs)
dtype = input_dtypes[0]
dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]
nodes = [
make_node("Cast", [input_nodes[0]], [name+"_cast0"], to=int(TensorProto.BOOL)),
make_node("Cast", [input_nodes[1]], [name+"_cast1"], to=int(TensorProto.BOOL)),
make_node("And", [name+"_cast0", name+"_cast1"], [name+"_and"]),
make_node("Cast", [name+"_and"], [name], name=name, to=int(dtype_t))
]
return nodes


@mx_op.register("broadcast_logical_or")
def convert_broadcast_logical_or(node, **kwargs):
"""Map MXNet's broadcast logical or operator attributes to onnx's Or operator
and return the created node.
"""
return create_basic_op_node('Or', node, kwargs)
from onnx.helper import make_node
from onnx import TensorProto
name, input_nodes, _ = get_inputs(node, kwargs)
input_dtypes = get_input_dtypes(node, kwargs)
dtype = input_dtypes[0]
dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]
nodes = [
make_node("Cast", [input_nodes[0]], [name+"_cast0"], to=int(TensorProto.BOOL)),
make_node("Cast", [input_nodes[1]], [name+"_cast1"], to=int(TensorProto.BOOL)),
make_node("Or", [name+"_cast0", name+"_cast1"], [name+"_or"]),
make_node("Cast", [name+"_or"], [name], name=name, to=int(dtype_t))
]
return nodes


@mx_op.register("broadcast_logical_xor")
def convert_broadcast_logical_xor(node, **kwargs):
"""Map MXNet's broadcast logical xor operator attributes to onnx's Xor operator
and return the created node.
"""
return create_basic_op_node('Xor', node, kwargs)
from onnx.helper import make_node
from onnx import TensorProto
name, input_nodes, _ = get_inputs(node, kwargs)
input_dtypes = get_input_dtypes(node, kwargs)
dtype = input_dtypes[0]
dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]
nodes = [
make_node("Cast", [input_nodes[0]], [name+"_cast0"], to=int(TensorProto.BOOL)),
make_node("Cast", [input_nodes[1]], [name+"_cast1"], to=int(TensorProto.BOOL)),
make_node("Xor", [name+"_cast0", name+"_cast1"], [name+"_xor"]),
make_node("Cast", [name+"_xor"], [name], name=name, to=int(dtype_t))
]
return nodes


@mx_op.register("logical_not")
def convert_logical_not(node, **kwargs):
"""Map MXNet's logical not operator attributes to onnx's Not operator
and return the created node.
"""
return create_basic_op_node('Not', node, kwargs)
from onnx.helper import make_node
from onnx import TensorProto
name, input_nodes, _ = get_inputs(node, kwargs)
input_dtypes = get_input_dtypes(node, kwargs)
dtype = input_dtypes[0]
dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]
nodes = [
make_node("Cast", [input_nodes[0]], [name+"_cast"], to=int(TensorProto.BOOL)),
make_node("Not", [name+"_cast"], [name+"_not"]),
make_node("Cast", [name+"_not"], [name], name=name, to=int(dtype_t))
]
return nodes


@mx_op.register("size_array")
Expand Down Expand Up @@ -2346,6 +2393,9 @@ def convert_norm(node, **kwargs):
keepdims = get_boolean_attribute_value(attrs, "keepdims")
ord = int(attrs.get("ord", 2))

if ord not in [1, 2]:
raise AttributeError("norm export operator only supports ord=1 or ord=2.")

onnx_op_name = "ReduceL1" if ord == 1 else "ReduceL2"

if axes:
Expand Down Expand Up @@ -2396,25 +2446,26 @@ def convert_random_uniform(node, **kwargs):
"""Map MXNet's random_uniform operator attributes to onnx's RandomUniform
operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
name, _, attrs = get_inputs(node, kwargs)

# Converting to float32
low = float(attrs.get("low", 0))
high = float(attrs.get("high", 1.0))
shape = convert_string_to_list(attrs.get('shape', '[]'))
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]
dtype = np.dtype(attrs.get('dtype', 'float32'))
dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]

node = onnx.helper.make_node(
'RandomUniform',
input_nodes,
[],
[name],
low=low,
high=high,
dtype=dtype,
dtype=dtype_t,
shape=shape,
name=name
)
return [node]
return [node], (dtype,)


@mx_op.register("_random_normal")
Expand All @@ -2428,19 +2479,20 @@ def convert_random_normal(node, **kwargs):
mean = float(attrs.get("loc", 0))
scale = float(attrs.get("scale", 1.0))
shape = convert_string_to_list(attrs.get('shape', '[]'))
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]
dtype = np.dtype(attrs.get('dtype', 'float32'))
dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]

node = onnx.helper.make_node(
'RandomNormal',
input_nodes,
[name],
mean=mean,
scale=scale,
dtype=dtype,
dtype=dtype_t,
shape=shape,
name=name
)
return [node]
return [node], (dtype,)


@mx_op.register("ROIPooling")
Expand Down Expand Up @@ -4293,15 +4345,17 @@ def convert_random_uniform_like(node, **kwargs):
"""
from onnx.helper import make_node
name, input_nodes, attrs = get_inputs(node, kwargs)
input_dtypes = get_input_dtypes(node, kwargs)

dtype = input_dtypes[0]
dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]

low = float(attrs.get('low', 0.))
high = float(attrs.get('high', 1.))
dtype = attrs.get('dtype', 'float32')

nodes = [
make_node('RandomUniformLike', [input_nodes[0]], [name], name=name,
dtype=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)],
low=low, high=high)
dtype=dtype_t, low=low, high=high)
]

return nodes
Expand Down
Loading

0 comments on commit 94fe808

Please sign in to comment.