Skip to content

Commit

Permalink
[microNPU] Add a legalization test for TFLite PAD (apache#13750)
Browse files Browse the repository at this point in the history
Added a legalization test for stand-alone pad operation which is legalized to depthwise operation on the NPU.
  • Loading branch information
Aleksei-grovety authored and fzi-peccia committed Mar 27, 2023
1 parent b53706e commit 4c1da54
Showing 1 changed file with 100 additions and 0 deletions.
100 changes: 100 additions & 0 deletions tests/python/contrib/test_ethosu/test_legalize.py
Original file line number Diff line number Diff line change
Expand Up @@ -674,6 +674,106 @@ def verify(ext_func):
verify(mod["tvmgen_default_ethos_u_main_0"])


@pytest.mark.parametrize("ifm_shape", [(1, 55, 55, 3), (1, 23, 32, 7)])
@pytest.mark.parametrize("padding", [(0, 1, 0, 0), (1, 1, 1, 1), (1, 1, 5, 5)])
@pytest.mark.parametrize("const_value", [0, 5, 125, -5])
def test_tflite_separate_padding_legalize(ifm_shape, padding, const_value):
dtype = "int8"
kernel_shape = (1, 1)
strides = (1, 1)
dilation = (1, 1)

def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
return tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
const_value,
)

model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]

converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model

def verify(ext_func):
op = ext_func.body
ofm_channels = op.attrs.ofm_channels

# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels

# check OFM
ofm = op.checked_type
expected_ofm_shape = infra.compute_ofm_shape(
ifm_shape, padding, kernel_shape, strides, dilation
)
assert list(ofm.shape) == list(expected_ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels

# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert weights_ohwi.shape[0] == ofm_channels
assert weights_ohwi.shape[1] == kernel_shape[0]
assert weights_ohwi.shape[2] == kernel_shape[1]
assert weights_ohwi.shape[3] == 1 # only depth multiplier 1 is supported

# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels

assert list(op.attrs.padding) == list(padding)
assert op.attrs.ofm_channels == ofm_channels
assert list(op.attrs.strides) == list(strides)
assert list(op.attrs.dilation) == list(dilation)

pad_pattern_table = [
(
ethosu.PadParams.composite_name,
ethosu.pad_pattern(),
lambda pat: ethosu.PadParams(pat).is_valid(),
)
]

tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)

mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)

mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, pad_pattern_table)

mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.PadRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])


@pytest.mark.parametrize("pooling_type", ["MAX", "AVG"])
@pytest.mark.parametrize("ifm_shape", [[1, 3, 4, 3], [1, 4, 5, 2]])
@pytest.mark.parametrize(
Expand Down

0 comments on commit 4c1da54

Please sign in to comment.