diff --git a/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py b/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py index 8f1114a6399e..8550b703e475 100644 --- a/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py +++ b/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py @@ -2273,10 +2273,22 @@ def convert_broadcast_equal(node, **kwargs): @mx_op.register("broadcast_logical_and") def convert_broadcast_logical_and(node, **kwargs): - """Map MXNet's broadcast logical and operator attributes to onnx's Add operator + """Map MXNet's broadcast logical and operator attributes to onnx's And operator and return the created node. """ - return create_basic_op_node('And', node, kwargs) + from onnx.helper import make_node + from onnx import TensorProto + name, input_nodes, _ = get_inputs(node, kwargs) + input_dtypes = get_input_dtypes(node, kwargs) + dtype = input_dtypes[0] + dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype] + nodes = [ + make_node("Cast", [input_nodes[0]], [name+"_cast0"], to=int(TensorProto.BOOL)), + make_node("Cast", [input_nodes[1]], [name+"_cast1"], to=int(TensorProto.BOOL)), + make_node("And", [name+"_cast0", name+"_cast1"], [name+"_and"]), + make_node("Cast", [name+"_and"], [name], name=name, to=int(dtype_t)) + ] + return nodes @mx_op.register("broadcast_logical_or") @@ -2284,7 +2296,19 @@ def convert_broadcast_logical_or(node, **kwargs): """Map MXNet's broadcast logical or operator attributes to onnx's Or operator and return the created node. """ - return create_basic_op_node('Or', node, kwargs) + from onnx.helper import make_node + from onnx import TensorProto + name, input_nodes, _ = get_inputs(node, kwargs) + input_dtypes = get_input_dtypes(node, kwargs) + dtype = input_dtypes[0] + dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype] + nodes = [ + make_node("Cast", [input_nodes[0]], [name+"_cast0"], to=int(TensorProto.BOOL)), + make_node("Cast", [input_nodes[1]], [name+"_cast1"], to=int(TensorProto.BOOL)), + make_node("Or", [name+"_cast0", name+"_cast1"], [name+"_or"]), + make_node("Cast", [name+"_or"], [name], name=name, to=int(dtype_t)) + ] + return nodes @mx_op.register("broadcast_logical_xor") @@ -2292,7 +2316,19 @@ def convert_broadcast_logical_xor(node, **kwargs): """Map MXNet's broadcast logical xor operator attributes to onnx's Xor operator and return the created node. """ - return create_basic_op_node('Xor', node, kwargs) + from onnx.helper import make_node + from onnx import TensorProto + name, input_nodes, _ = get_inputs(node, kwargs) + input_dtypes = get_input_dtypes(node, kwargs) + dtype = input_dtypes[0] + dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype] + nodes = [ + make_node("Cast", [input_nodes[0]], [name+"_cast0"], to=int(TensorProto.BOOL)), + make_node("Cast", [input_nodes[1]], [name+"_cast1"], to=int(TensorProto.BOOL)), + make_node("Xor", [name+"_cast0", name+"_cast1"], [name+"_xor"]), + make_node("Cast", [name+"_xor"], [name], name=name, to=int(dtype_t)) + ] + return nodes @mx_op.register("logical_not") @@ -2300,7 +2336,18 @@ def convert_logical_not(node, **kwargs): """Map MXNet's logical not operator attributes to onnx's Not operator and return the created node. """ - return create_basic_op_node('Not', node, kwargs) + from onnx.helper import make_node + from onnx import TensorProto + name, input_nodes, _ = get_inputs(node, kwargs) + input_dtypes = get_input_dtypes(node, kwargs) + dtype = input_dtypes[0] + dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype] + nodes = [ + make_node("Cast", [input_nodes[0]], [name+"_cast"], to=int(TensorProto.BOOL)), + make_node("Not", [name+"_cast"], [name+"_not"]), + make_node("Cast", [name+"_not"], [name], name=name, to=int(dtype_t)) + ] + return nodes @mx_op.register("size_array") @@ -2346,6 +2393,9 @@ def convert_norm(node, **kwargs): keepdims = get_boolean_attribute_value(attrs, "keepdims") ord = int(attrs.get("ord", 2)) + if ord not in [1, 2]: + raise AttributeError("norm export operator only supports ord=1 or ord=2.") + onnx_op_name = "ReduceL1" if ord == 1 else "ReduceL2" if axes: @@ -2396,25 +2446,26 @@ def convert_random_uniform(node, **kwargs): """Map MXNet's random_uniform operator attributes to onnx's RandomUniform operator and return the created node. """ - name, input_nodes, attrs = get_inputs(node, kwargs) + name, _, attrs = get_inputs(node, kwargs) # Converting to float32 low = float(attrs.get("low", 0)) high = float(attrs.get("high", 1.0)) shape = convert_string_to_list(attrs.get('shape', '[]')) - dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))] + dtype = np.dtype(attrs.get('dtype', 'float32')) + dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype] node = onnx.helper.make_node( 'RandomUniform', - input_nodes, + [], [name], low=low, high=high, - dtype=dtype, + dtype=dtype_t, shape=shape, name=name ) - return [node] + return [node], (dtype,) @mx_op.register("_random_normal") @@ -2428,7 +2479,8 @@ def convert_random_normal(node, **kwargs): mean = float(attrs.get("loc", 0)) scale = float(attrs.get("scale", 1.0)) shape = convert_string_to_list(attrs.get('shape', '[]')) - dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))] + dtype = np.dtype(attrs.get('dtype', 'float32')) + dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype] node = onnx.helper.make_node( 'RandomNormal', @@ -2436,11 +2488,11 @@ def convert_random_normal(node, **kwargs): [name], mean=mean, scale=scale, - dtype=dtype, + dtype=dtype_t, shape=shape, name=name ) - return [node] + return [node], (dtype,) @mx_op.register("ROIPooling") @@ -4293,15 +4345,17 @@ def convert_random_uniform_like(node, **kwargs): """ from onnx.helper import make_node name, input_nodes, attrs = get_inputs(node, kwargs) + input_dtypes = get_input_dtypes(node, kwargs) + + dtype = input_dtypes[0] + dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype] low = float(attrs.get('low', 0.)) high = float(attrs.get('high', 1.)) - dtype = attrs.get('dtype', 'float32') nodes = [ make_node('RandomUniformLike', [input_nodes[0]], [name], name=name, - dtype=onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], - low=low, high=high) + dtype=dtype_t, low=low, high=high) ] return nodes diff --git a/tests/python-pytest/onnx/test_operators.py b/tests/python-pytest/onnx/test_operators.py index cd694a090ad4..eedfd331b522 100644 --- a/tests/python-pytest/onnx/test_operators.py +++ b/tests/python-pytest/onnx/test_operators.py @@ -1290,6 +1290,154 @@ def test_onnx_export_contrib_div_sqrt_dim(tmp_path, dtype, shape): op_export_test('contrib_div_sqrt_dim', M, [A], tmp_path) +@pytest.mark.parametrize('dtype', ['float16', 'float32']) +@pytest.mark.parametrize('shape', [(100,), (3, 4, 5), (6, 7)]) +def test_onnx_export_reciprocal(tmp_path, dtype, shape): + A = mx.nd.random.uniform(-100, 100, shape).astype(dtype) + M = def_model('reciprocal') + op_export_test('reciprocal', M, [A], tmp_path) + + +@pytest.mark.parametrize("dtype", ["float16", "float32", "float64", "int32", "int64"]) +@pytest.mark.parametrize('shape', [(1, 3), (3, 4, 5)]) +def test_onnx_export_power(tmp_path, shape, dtype): + x = mx.nd.random.uniform(-5, 5, shape).astype(dtype) + y = mx.nd.random.uniform(-10, 10, shape).astype(dtype) + M = def_model('_internal._power') + op_export_test('_internal._power', M, [x, y], tmp_path) + +@pytest.mark.parametrize("dtype", ["float16", "float32", "float64", "int32", "int64"]) +@pytest.mark.parametrize('shape', [(1, 3), (3, 4, 5)]) +def test_onnx_export_broadcast_power(tmp_path, shape, dtype): + x = mx.nd.random.uniform(-5, 5, shape).astype(dtype) + y = mx.nd.random.uniform(-10, 10, shape).astype(dtype) + M = def_model('broadcast_power') + op_export_test('broadcast_power', M, [x, y], tmp_path) + + +@pytest.mark.parametrize("dtype", ["float16", "float32", "float64"]) +@pytest.mark.parametrize('shape', [(3, 4, 5), (6, 7), (8,)]) +def test_onnx_export_sqrt(tmp_path, dtype, shape): + A = mx.nd.random.uniform(-100, 100, shape).astype(dtype) + M = def_model('sqrt') + op_export_test('sqrt', M, [A], tmp_path) + + +@pytest.mark.parametrize("dtype", ["float16", "float32"]) +@pytest.mark.parametrize("params", [[(1,4,2,3), 1], [(1,4,2,3), 2]]) +def test_onnx_export_depth_to_space(tmp_path, dtype, params): + shape, block_size = params + M = def_model('depth_to_space', block_size=block_size) + x = mx.nd.arange(0, np.prod(shape)).reshape(shape).astype(dtype) + op_export_test('depth_to_space', M, [x], tmp_path) + + +@pytest.mark.parametrize("dtype", ["float16", "float32"]) +@pytest.mark.parametrize("params", [[(1,4,2,3), 1], [(1,1,4,6),2]]) +def test_onnx_export_space_to_depth(tmp_path, dtype, params): + shape, block_size = params + M = def_model('space_to_depth', block_size=block_size) + x = mx.nd.arange(0, np.prod(shape)).reshape(shape).astype(dtype) + op_export_test('space_to_depth', M, [x], tmp_path) + + +@pytest.mark.parametrize("dtype", ["float16", "float32", "float64", "int32", "int64"]) +@pytest.mark.parametrize("shape", [(10,), (1,2,3), (4,5,6)]) +def test_onnx_export_square(tmp_path, dtype, shape): + M = def_model('square') + x = mx.nd.arange(0, np.prod(shape)).reshape(shape).astype(dtype) + op_export_test('square', M, [x], tmp_path) + + +@pytest.mark.parametrize("dtype", ["float16", "float32", "float64", "int32", "int64"]) +@pytest.mark.parametrize("shape", [(10,), (1,2,3), (4,5,6)]) +def test_onnx_export_shape_array(tmp_path, dtype, shape): + M = def_model('shape_array') + x = mx.nd.arange(0, np.prod(shape)).reshape(shape).astype(dtype) + op_export_test('shape_array', M, [x], tmp_path) + + +@pytest.mark.parametrize("dtype", ["float16", "float32"]) +@pytest.mark.parametrize("shape", [(10,), (1,2,3), (4,5,6)]) +@pytest.mark.parametrize("alpha", [None, 0.1, 0.4567, 0.9]) +@pytest.mark.parametrize("beta", [None, 0.1, 0.4567, 0.5, 0.9]) +def test_onnx_export_hard_sigmoid(tmp_path, dtype, shape, alpha, beta): + kwargs = { } + if alpha is not None: + kwargs['alpha'] = alpha + if beta is not None: + kwargs['beta'] = beta + M = def_model('hard_sigmoid', **kwargs) + x = mx.nd.arange(0, np.prod(shape)).reshape(shape).astype(dtype) + op_export_test('hard_sigmoid', M, [x], tmp_path) + + +@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64', 'int32', 'int64']) +@pytest.mark.parametrize("shape", [(10,), (1,2,3), (4,5,6)]) +def test_onnx_export_broadcast_lesser(tmp_path, dtype, shape): + M = def_model('broadcast_lesser') + x = mx.nd.random.uniform(-100, 100, shape).astype(dtype) + y = mx.nd.random.uniform(-100, 100, shape).astype(dtype) + op_export_test('broadcast_lesser', M, [x, y], tmp_path) + + +@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64', 'int32', 'int64']) +@pytest.mark.parametrize("shape", [(10,), (1,2,3), (4,5,6)]) +def test_onnx_export_broadcast_greater(tmp_path, dtype, shape): + M = def_model('broadcast_greater') + x = mx.nd.random.uniform(-100, 100, shape).astype(dtype) + y = mx.nd.random.uniform(-100, 100, shape).astype(dtype) + op_export_test('broadcast_greater', M, [x, y], tmp_path) + + +@pytest.mark.parametrize('dtype', ['float16', 'float32']) +@pytest.mark.parametrize("shape", [(10,5), (1,2,3), (4,5,6)]) +@pytest.mark.parametrize('axis', [None, 1]) +def test_onnx_export_log_softmax(tmp_path, dtype, shape, axis): + x = mx.nd.random.uniform(0, 1, shape, dtype=dtype) + kwargs = {} + if axis is not None: + kwargs['axis'] = axis + M = def_model('log_softmax', **kwargs) + op_export_test('log_softmax', M, [x], tmp_path) + + +@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64', 'int32', 'int64']) +@pytest.mark.parametrize("shape", [(10,), (2,3), (4,5,6)]) +def test_onnx_export_broadcast_logical_and(tmp_path, dtype, shape): + M = def_model('broadcast_logical_and') + x = mx.nd.random.uniform(-1, 1, shape).astype(dtype) + y = mx.nd.random.uniform(-1, 1, shape).astype(dtype) + op_export_test('broadcast_logical_and', M, [x, y], tmp_path) + + +@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64', 'int32', 'int64']) +@pytest.mark.parametrize("shape", [(10,), (2,3), (4,5,6)]) +def test_onnx_export_broadcast_logical_or(tmp_path, dtype, shape): + M = def_model('broadcast_logical_or') + x = mx.nd.random.uniform(-1, 1, shape).astype(dtype) + y = mx.nd.random.uniform(-1, 1, shape).astype(dtype) + op_export_test('broadcast_logical_or', M, [x, y], tmp_path) + + +@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64', 'int32', 'int64']) +@pytest.mark.parametrize("shape", [(10,), (2,3), (4,5,6)]) +def test_onnx_export_broadcast_logical_xor(tmp_path, dtype, shape): + M = def_model('broadcast_logical_xor') + x = mx.nd.random.uniform(-1, 1, shape).astype(dtype) + y = mx.nd.random.uniform(-1, 1, shape).astype(dtype) + op_export_test('broadcast_logical_xor', M, [x, y], tmp_path) + + +@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64', 'int32', 'int64']) +@pytest.mark.parametrize("shapes", [[(1,3),(2,3)], [(2,1,3,1),(2,8,3,9)], [(1,3,6),(5,3,6)]]) +def test_onnx_export_broadcast_to(tmp_path, dtype, shapes): + in_shape, to_shape = shapes + M = def_model('broadcast_to', shape=to_shape) + x = mx.nd.random.uniform(-100, 100, in_shape).astype(dtype) + op_export_test('broadcast_to', M, [x], tmp_path) + + # onnxruntime currently does not support int32 @pytest.mark.parametrize('dtype', ['float16', 'float32', 'int64']) @pytest.mark.parametrize('shape', [(1,), (2, 3), (4, 5, 6)]) @@ -1406,4 +1554,78 @@ def test_onnx_export_ufunc(tmp_path, dtype, shape, op_name): def test_onnx_export_squeeze(tmp_path, dtype, shape_axis): x = mx.nd.random.uniform(1, 100, shape=shape_axis[0]).astype(dtype) M = def_model('squeeze', axis=shape_axis[1]) - op_export_test('squeeze', M, [x], tmp_path) \ No newline at end of file + op_export_test('squeeze', M, [x], tmp_path) + + +@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64', 'int32', 'int64']) +@pytest.mark.parametrize("shape", [(10,), (2,3), (4,5,6)]) +def test_onnx_export_logical_not(tmp_path, dtype, shape): + M = def_model('logical_not') + x = mx.nd.random.uniform(-1, 1, shape).astype(dtype) + op_export_test('logical_not', M, [x], tmp_path) + + +@pytest.mark.parametrize("dtype", ["float16", "float32", "float64"]) +@pytest.mark.parametrize("shape", [(10,), (1,2,3), (4,5,6)]) +def test_onnx_export_random_uniform_like(tmp_path, dtype, shape): + M = def_model('random.uniform_like') + low = -10 + high = 10 + x = mx.nd.zeros(shape=shape).astype(dtype) + def rand_check(out): + for i in out: + if i.any() < low or i.any() >= high: + raise Exception("Invalid value") + return np.zeros_like(out) + def rand_check_nd(out): + return rand_check(out.asnumpy()) + op_export_test('random.uniform_like', M, [x], tmp_path, mx_map=rand_check_nd, onnx_map=rand_check) + + +@pytest.mark.parametrize("dtype", ["float32", "float64"]) +@pytest.mark.parametrize("shape", [(10,), (1,2,3), (4,5,6)]) +def test_onnx_export_random_uniform(tmp_path, dtype, shape): + low = -10 + high = 10 + M = def_model('random_uniform', low=low, high=high, shape=shape, dtype=dtype, dummy_input=True) + x = mx.nd.array([1], dtype='float32') + def rand_check(out): + for i in out: + if i.any() < low or i.any() >= high: + raise Exception("Invalid value") + return np.zeros_like(out) + def rand_check_nd(out): + return rand_check(out.asnumpy()) + op_export_test('random_uniform', M, [x], tmp_path, mx_map=rand_check_nd, onnx_map=rand_check, dummy_input=True) + + +@pytest.mark.parametrize("dtype", ["float32", "float64"]) +@pytest.mark.parametrize("shape", [(10,), (1,2,3), (4,5,6)]) +@pytest.mark.parametrize("loc", [None, 0, 1, 2]) +@pytest.mark.parametrize("scale", [None, 1, 2]) +def test_onnx_export_random_normal(tmp_path, dtype, loc, scale, shape): + kwargs = { + 'dtype': dtype, + 'shape': shape, + 'dummy_input': True + } + if loc is not None: + kwargs['loc'] = loc + if scale is not None: + kwargs['scale'] = scale + M = def_model('random_normal', **kwargs) + x = mx.nd.array([1], dtype='float32') + def rand_check(out): + return np.zeros_like(out) + def rand_check_nd(out): + return rand_check(out.asnumpy()) + op_export_test('random_normal', M, [x], tmp_path, mx_map=rand_check_nd, onnx_map=rand_check, dummy_input=True) + + +@pytest.mark.parametrize("dtype", ["float16", "float32"]) +@pytest.mark.parametrize("spatial_scale", [0.7, 1.0]) +def test_onnx_export_roi_pooling(tmp_path, dtype, spatial_scale): + M = def_model('ROIPooling', pooled_size=(2,2), spatial_scale=spatial_scale) + x = mx.nd.arange(start=0, stop=48, dtype=dtype).reshape((1,1,8,6)) + y = mx.nd.array([[0,0,0,4,4]], dtype=dtype) + op_export_test('ROIPooling', M, [x, y], tmp_path)