Skip to content
This repository has been archived by the owner on Oct 13, 2021. It is now read-only.

Add support for tf.nn.depth_to_space lambda #492

Merged
merged 9 commits into from
May 19, 2020
30 changes: 30 additions & 0 deletions keras2onnx/_builtin.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,36 @@ def _calc_explicit_padding(input_size, output_shape, output_padding, kernel_shap
return pads


@converter_func(TYPES.DepthToSpace)
def convert_tf_depth_to_space(scope, operator, container):
if operator.target_opset < 11:
raise ValueError("DepthToSpace op is not supported for opset < 11")
jiafatom marked this conversation as resolved.
Show resolved Hide resolved
node = operator.raw_operator
block_size = node.get_attr('block_size')
oopb = OnnxOperatorBuilder(container, scope)
if _is_nhwc(node):
_, h, w, c = _cal_tensor_shape(node.inputs[0])
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I still need some help, because here I have now set n = -1 such that it is OK if the batch dimension is unknown. But what if h and w are also unknown, i.e. their values are None. How to then do the reshapes below?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point here. This line has issue when the tensor shape is unknown (dynamic). Search our code base to find the case when _cal_tensor_shape is None. We need handle that -- Add a Shape op after node.inputs[0] to get the dynamic shape, and use Slice op to get h and w, and concatenate with other dimensions to make the desired_shape. We have some examples in our code.

Copy link
Contributor Author

@CNugteren CNugteren May 19, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I had a look at an example and it is not trivial, code quickly becomes unreadable because of all the extra nodes added. Then in that case I reconsidered the use of the actual DepthToSpace node, so I worked on that a bit and made it work with two extra transposes. I've added a test case with unknown tensor sizes as well and that one now also passes :-) Could you have another look at the code? Thanks!

n = -1
reshaped = oopb.apply_reshape(operator.input_full_names,
name=operator.full_name + '_pre_reshape',
desired_shape=[n, h, w, block_size, block_size, c // (block_size ** 2)])
transposed = oopb.apply_transpose(reshaped,
name=operator.full_name + '_transpose',
perm=[0, 1, 3, 2, 4, 5])
oopb.apply_op_with_output("apply_reshape",
transposed,
operator.output_full_names,
name=operator.full_name + '_post_reshape',
desired_shape=[n, h * block_size, w * block_size, c // (block_size ** 2)])
else:
oopb.add_node_with_output("DepthToSpace",
operator.input_full_names,
operator.output_full_names,
name=operator.full_name,
blocksize=block_size,
mode="DCR")
jiafatom marked this conversation as resolved.
Show resolved Hide resolved


@converter_func(TYPES.DepthwiseConv2dNative)
def convert_tf_depthwise_conv2d(scope, operator, container):
node = operator.raw_operator
Expand Down
1 change: 1 addition & 0 deletions keras2onnx/_consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ class TYPES:
Conv1D = 'Conv1D'
Conv2D = 'Conv2D'
Cumsum = 'Cumsum'
DepthToSpace = 'DepthToSpace'
DepthwiseConv2dNative = 'DepthwiseConv2dNative'
Div = 'Div'
Einsum = 'Einsum'
Expand Down
19 changes: 19 additions & 0 deletions tests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,25 @@ def test_keras_lambda(runner):
assert runner('onnx_lambda', onnx_model, data, expected)


@pytest.mark.skipif(is_tensorflow_older_than('1.12.0'),
reason="tf.nn.depth_to_space not supported.")
@pytest.mark.skipif(get_maximum_opset_supported() < 11,
reason="DepthToSpace is not supported before opset 11.")
@pytest.mark.parametrize("data_format", ["NCHW", "NHWC"])
def test_keras_lambda_depth_to_space(runner, data_format):
input_shape = [4, 6, 8]
model = Sequential()
model.add(Lambda(
lambda x: tf.nn.depth_to_space(x, block_size=2, data_format=data_format),
input_shape=input_shape
))

onnx_model = keras2onnx.convert_keras(model, 'test_keras_lambda_depth_to_space')
data = np.random.rand(3, *input_shape).astype(np.float32)
expected = model.predict(data)
assert runner('tf_depth_to_space', onnx_model, data, expected)


def test_tf_addn(runner):
input1 = Input(shape=(5, 3, 4), dtype=tf.float32)
input2 = Input(shape=(5, 3, 4), dtype=tf.float32)
Expand Down