diff --git a/docker/Dockerfile.ci_gpu b/docker/Dockerfile.ci_gpu index 7e7f92c684b42..5be18322c97b0 100644 --- a/docker/Dockerfile.ci_gpu +++ b/docker/Dockerfile.ci_gpu @@ -17,7 +17,7 @@ # CI docker GPU env # tag: v0.60 -FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04 +FROM nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04 COPY utils/apt-install-and-clear.sh /usr/local/bin/apt-install-and-clear diff --git a/python/tvm/relax/frontend/nnef/nnef_ops.py b/python/tvm/relax/frontend/nnef/nnef_ops.py index c4150c64d2115..c9b52ba0044c7 100644 --- a/python/tvm/relax/frontend/nnef/nnef_ops.py +++ b/python/tvm/relax/frontend/nnef/nnef_ops.py @@ -1419,7 +1419,7 @@ def pad_converter(bbuilder, data, padding, border, value, **kwargs): return bbuilder.emit_te(tvm.topi.nn.mirror_pad, data, pad_before, pad_after, "REFLECT") if border == "edge": raise tvm.error.OpNotImplemented( - "Replicate - Edge mode is currently not supperted in TVM relax" + "Replicate - Edge mode is currently not supported in TVM relax" ) # constant works with normal relax.nn.pad diff --git a/tests/python/frontend/nnef/cases/abs_2d/graph.nnef b/tests/python/frontend/nnef/cases/abs_2d/graph.nnef deleted file mode 100644 index 1f101b10b4d6d..0000000000000 --- a/tests/python/frontend/nnef/cases/abs_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = abs(input); -} diff --git a/tests/python/frontend/nnef/cases/abs_4d/graph.nnef b/tests/python/frontend/nnef/cases/abs_4d/graph.nnef deleted file mode 100644 index b4449bae13dac..0000000000000 --- a/tests/python/frontend/nnef/cases/abs_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = abs(input); -} diff --git a/tests/python/frontend/nnef/cases/acos_2d/graph.nnef b/tests/python/frontend/nnef/cases/acos_2d/graph.nnef deleted file mode 100644 index c6551c4785064..0000000000000 --- a/tests/python/frontend/nnef/cases/acos_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = acos(input); -} diff --git a/tests/python/frontend/nnef/cases/acos_4d/graph.nnef b/tests/python/frontend/nnef/cases/acos_4d/graph.nnef deleted file mode 100644 index 0a6b58a3407f6..0000000000000 --- a/tests/python/frontend/nnef/cases/acos_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = acos(input); -} diff --git a/tests/python/frontend/nnef/cases/acosh_2d/graph.nnef b/tests/python/frontend/nnef/cases/acosh_2d/graph.nnef deleted file mode 100644 index c6551c4785064..0000000000000 --- a/tests/python/frontend/nnef/cases/acosh_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = acos(input); -} diff --git a/tests/python/frontend/nnef/cases/acosh_4d/graph.nnef b/tests/python/frontend/nnef/cases/acosh_4d/graph.nnef deleted file mode 100644 index 0a6b58a3407f6..0000000000000 --- a/tests/python/frontend/nnef/cases/acosh_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = acos(input); -} diff --git a/tests/python/frontend/nnef/cases/add_2d/graph.nnef b/tests/python/frontend/nnef/cases/add_2d/graph.nnef deleted file mode 100644 index ccb1d0dbf7f88..0000000000000 --- a/tests/python/frontend/nnef/cases/add_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = add(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/add_4d/graph.nnef b/tests/python/frontend/nnef/cases/add_4d/graph.nnef deleted file mode 100644 index 63ab32aeab90c..0000000000000 --- a/tests/python/frontend/nnef/cases/add_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = add(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/add_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/add_4d_broadcast/graph.nnef deleted file mode 100644 index cc5227e788963..0000000000000 --- a/tests/python/frontend/nnef/cases/add_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = add(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/add_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/add_4d_constant/graph.nnef deleted file mode 100644 index a490b8bac1f77..0000000000000 --- a/tests/python/frontend/nnef/cases/add_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = add(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/all_reduce_channel/graph.nnef b/tests/python/frontend/nnef/cases/all_reduce_channel/graph.nnef deleted file mode 100644 index 4655d7f2d03f9..0000000000000 --- a/tests/python/frontend/nnef/cases/all_reduce_channel/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = all_reduce(input, axes = [1]); -} diff --git a/tests/python/frontend/nnef/cases/all_reduce_spatial/graph.nnef b/tests/python/frontend/nnef/cases/all_reduce_spatial/graph.nnef deleted file mode 100644 index e225df3e38b9b..0000000000000 --- a/tests/python/frontend/nnef/cases/all_reduce_spatial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = all_reduce(input, axes = [2,3]); -} diff --git a/tests/python/frontend/nnef/cases/and_2d/graph.nnef b/tests/python/frontend/nnef/cases/and_2d/graph.nnef deleted file mode 100644 index 9aab6ac9e7436..0000000000000 --- a/tests/python/frontend/nnef/cases/and_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = and(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/and_4d/graph.nnef b/tests/python/frontend/nnef/cases/and_4d/graph.nnef deleted file mode 100644 index 7692dd8689f75..0000000000000 --- a/tests/python/frontend/nnef/cases/and_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = and(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/and_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/and_4d_broadcast/graph.nnef deleted file mode 100644 index 4010bb0b9a1f9..0000000000000 --- a/tests/python/frontend/nnef/cases/and_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = and(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/and_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/and_4d_constant/graph.nnef deleted file mode 100644 index 35dee4bb48392..0000000000000 --- a/tests/python/frontend/nnef/cases/and_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = and(input, false); -} diff --git a/tests/python/frontend/nnef/cases/any_reduce_channel/graph.nnef b/tests/python/frontend/nnef/cases/any_reduce_channel/graph.nnef deleted file mode 100644 index 40c1c62adef32..0000000000000 --- a/tests/python/frontend/nnef/cases/any_reduce_channel/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = any_reduce(input, axes = [1]); -} diff --git a/tests/python/frontend/nnef/cases/any_reduce_spatial/graph.nnef b/tests/python/frontend/nnef/cases/any_reduce_spatial/graph.nnef deleted file mode 100644 index 296877019aa81..0000000000000 --- a/tests/python/frontend/nnef/cases/any_reduce_spatial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = any_reduce(input, axes = [2,3]); -} diff --git a/tests/python/frontend/nnef/cases/area_downsample/graph.nnef b/tests/python/frontend/nnef/cases/area_downsample/graph.nnef deleted file mode 100644 index df4c5c0951e69..0000000000000 --- a/tests/python/frontend/nnef/cases/area_downsample/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = area_downsample(input, factor = [2,2]); -} diff --git a/tests/python/frontend/nnef/cases/argmax_reduce_channel/graph.nnef b/tests/python/frontend/nnef/cases/argmax_reduce_channel/graph.nnef deleted file mode 100644 index 10c00d26fda9f..0000000000000 --- a/tests/python/frontend/nnef/cases/argmax_reduce_channel/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = argmax_reduce(input, axes = [1]); -} diff --git a/tests/python/frontend/nnef/cases/argmax_reduce_spatial/graph.nnef b/tests/python/frontend/nnef/cases/argmax_reduce_spatial/graph.nnef deleted file mode 100644 index 696dba65ba605..0000000000000 --- a/tests/python/frontend/nnef/cases/argmax_reduce_spatial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = argmax_reduce(input, axes = [2,3]); -} diff --git a/tests/python/frontend/nnef/cases/argmin_reduce_channel/graph.nnef b/tests/python/frontend/nnef/cases/argmin_reduce_channel/graph.nnef deleted file mode 100644 index dc048a1677ed4..0000000000000 --- a/tests/python/frontend/nnef/cases/argmin_reduce_channel/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = argmin_reduce(input, axes = [1]); -} diff --git a/tests/python/frontend/nnef/cases/argmin_reduce_spatial/graph.nnef b/tests/python/frontend/nnef/cases/argmin_reduce_spatial/graph.nnef deleted file mode 100644 index 0f532835811e5..0000000000000 --- a/tests/python/frontend/nnef/cases/argmin_reduce_spatial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = argmin_reduce(input, axes = [2,3]); -} diff --git a/tests/python/frontend/nnef/cases/asin_2d/graph.nnef b/tests/python/frontend/nnef/cases/asin_2d/graph.nnef deleted file mode 100644 index 5855e4fc41192..0000000000000 --- a/tests/python/frontend/nnef/cases/asin_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = asin(input); -} diff --git a/tests/python/frontend/nnef/cases/asin_4d/graph.nnef b/tests/python/frontend/nnef/cases/asin_4d/graph.nnef deleted file mode 100644 index 1eebea76d39f0..0000000000000 --- a/tests/python/frontend/nnef/cases/asin_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = asin(input); -} diff --git a/tests/python/frontend/nnef/cases/asinh_2d/graph.nnef b/tests/python/frontend/nnef/cases/asinh_2d/graph.nnef deleted file mode 100644 index 95571e7e63378..0000000000000 --- a/tests/python/frontend/nnef/cases/asinh_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = asinh(input); -} diff --git a/tests/python/frontend/nnef/cases/asinh_4d/graph.nnef b/tests/python/frontend/nnef/cases/asinh_4d/graph.nnef deleted file mode 100644 index f42189e2e82f3..0000000000000 --- a/tests/python/frontend/nnef/cases/asinh_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = asinh(input); -} diff --git a/tests/python/frontend/nnef/cases/atan_2d/graph.nnef b/tests/python/frontend/nnef/cases/atan_2d/graph.nnef deleted file mode 100644 index 71948f20d5f20..0000000000000 --- a/tests/python/frontend/nnef/cases/atan_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = atan(input); -} diff --git a/tests/python/frontend/nnef/cases/atan_4d/graph.nnef b/tests/python/frontend/nnef/cases/atan_4d/graph.nnef deleted file mode 100644 index 444d042c3caa5..0000000000000 --- a/tests/python/frontend/nnef/cases/atan_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = atan(input); -} diff --git a/tests/python/frontend/nnef/cases/atanh_2d/graph.nnef b/tests/python/frontend/nnef/cases/atanh_2d/graph.nnef deleted file mode 100644 index 859943ddda9e7..0000000000000 --- a/tests/python/frontend/nnef/cases/atanh_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = atanh(input); -} diff --git a/tests/python/frontend/nnef/cases/atanh_4d/graph.nnef b/tests/python/frontend/nnef/cases/atanh_4d/graph.nnef deleted file mode 100644 index b181be734e71e..0000000000000 --- a/tests/python/frontend/nnef/cases/atanh_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = atanh(input); -} diff --git a/tests/python/frontend/nnef/cases/avg_pool1x1/graph.nnef b/tests/python/frontend/nnef/cases/avg_pool1x1/graph.nnef deleted file mode 100644 index 295ee379cce76..0000000000000 --- a/tests/python/frontend/nnef/cases/avg_pool1x1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = avg_pool(input, size = [1,1,1,1], stride = [1,1,2,2], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/avg_pool2x2/graph.nnef b/tests/python/frontend/nnef/cases/avg_pool2x2/graph.nnef deleted file mode 100644 index 48315774032ea..0000000000000 --- a/tests/python/frontend/nnef/cases/avg_pool2x2/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = avg_pool(input, size = [1,1,2,2], stride = [1,1,2,2], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/avg_pool3x3/graph.nnef b/tests/python/frontend/nnef/cases/avg_pool3x3/graph.nnef deleted file mode 100644 index 33d98645b6c4b..0000000000000 --- a/tests/python/frontend/nnef/cases/avg_pool3x3/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/avg_pool3x3_ignore-border/graph.nnef b/tests/python/frontend/nnef/cases/avg_pool3x3_ignore-border/graph.nnef deleted file mode 100644 index be79e6d985623..0000000000000 --- a/tests/python/frontend/nnef/cases/avg_pool3x3_ignore-border/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/avg_pool3x3_pad0-0/graph.nnef b/tests/python/frontend/nnef/cases/avg_pool3x3_pad0-0/graph.nnef deleted file mode 100644 index 0434182b0efa1..0000000000000 --- a/tests/python/frontend/nnef/cases/avg_pool3x3_pad0-0/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,0), (0,0)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/avg_pool3x3_pad0-1/graph.nnef b/tests/python/frontend/nnef/cases/avg_pool3x3_pad0-1/graph.nnef deleted file mode 100644 index e43442630cd63..0000000000000 --- a/tests/python/frontend/nnef/cases/avg_pool3x3_pad0-1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,1), (0,1)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/avg_pool3x3_pad1-0/graph.nnef b/tests/python/frontend/nnef/cases/avg_pool3x3_pad1-0/graph.nnef deleted file mode 100644 index 09c854997f9a5..0000000000000 --- a/tests/python/frontend/nnef/cases/avg_pool3x3_pad1-0/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,0), (1,0)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/avg_pool3x3_pad1-1/graph.nnef b/tests/python/frontend/nnef/cases/avg_pool3x3_pad1-1/graph.nnef deleted file mode 100644 index c334ba3fb807d..0000000000000 --- a/tests/python/frontend/nnef/cases/avg_pool3x3_pad1-1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,1), (1,1)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/avg_pool3x3_stride1x1/graph.nnef b/tests/python/frontend/nnef/cases/avg_pool3x3_stride1x1/graph.nnef deleted file mode 100644 index d1fbf173a7213..0000000000000 --- a/tests/python/frontend/nnef/cases/avg_pool3x3_stride1x1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = avg_pool(input, size = [1,1,3,3], stride = [1,1,1,1], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/batch_norm/graph.nnef b/tests/python/frontend/nnef/cases/batch_norm/graph.nnef deleted file mode 100644 index 55197bf03d607..0000000000000 --- a/tests/python/frontend/nnef/cases/batch_norm/graph.nnef +++ /dev/null @@ -1,11 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - mean = variable(shape = [1,16], label = 'mean'); - variance = variable(shape = [1,16], label = 'variance'); - offset = variable(shape = [1,16], label = 'offset'); - scale = variable(shape = [1,16], label = 'scale'); - output = batch_normalization(input, mean, variance, offset, scale, epsilon = 1e-3); -} diff --git a/tests/python/frontend/nnef/cases/bilinear_upsample_aligned_constant/graph.nnef b/tests/python/frontend/nnef/cases/bilinear_upsample_aligned_constant/graph.nnef deleted file mode 100644 index 6fbc55a4b61d1..0000000000000 --- a/tests/python/frontend/nnef/cases/bilinear_upsample_aligned_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = multilinear_upsample(input, factor = [2,2], method = 'aligned', border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/bilinear_upsample_aligned_replicate/graph.nnef b/tests/python/frontend/nnef/cases/bilinear_upsample_aligned_replicate/graph.nnef deleted file mode 100644 index 5bdee4db665c0..0000000000000 --- a/tests/python/frontend/nnef/cases/bilinear_upsample_aligned_replicate/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = multilinear_upsample(input, factor = [2,2], method = 'aligned', border = 'replicate'); -} diff --git a/tests/python/frontend/nnef/cases/bilinear_upsample_asymmetric_constant/graph.nnef b/tests/python/frontend/nnef/cases/bilinear_upsample_asymmetric_constant/graph.nnef deleted file mode 100644 index e94572e7aa3ba..0000000000000 --- a/tests/python/frontend/nnef/cases/bilinear_upsample_asymmetric_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = multilinear_upsample(input, factor = [2,2], method = 'asymmetric', border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/bilinear_upsample_asymmetric_replicate/graph.nnef b/tests/python/frontend/nnef/cases/bilinear_upsample_asymmetric_replicate/graph.nnef deleted file mode 100644 index 59a0c229a5d77..0000000000000 --- a/tests/python/frontend/nnef/cases/bilinear_upsample_asymmetric_replicate/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = multilinear_upsample(input, factor = [2,2], method = 'asymmetric', border = 'replicate'); -} diff --git a/tests/python/frontend/nnef/cases/bilinear_upsample_symmetric_constant/graph.nnef b/tests/python/frontend/nnef/cases/bilinear_upsample_symmetric_constant/graph.nnef deleted file mode 100644 index 2e6cc716f9f72..0000000000000 --- a/tests/python/frontend/nnef/cases/bilinear_upsample_symmetric_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = multilinear_upsample(input, factor = [2,2], method = 'symmetric', border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/bilinear_upsample_symmetric_replicate/graph.nnef b/tests/python/frontend/nnef/cases/bilinear_upsample_symmetric_replicate/graph.nnef deleted file mode 100644 index a0721fe77e225..0000000000000 --- a/tests/python/frontend/nnef/cases/bilinear_upsample_symmetric_replicate/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = multilinear_upsample(input, factor = [2,2], method = 'symmetric', border = 'replicate'); -} diff --git a/tests/python/frontend/nnef/cases/box1x1/graph.nnef b/tests/python/frontend/nnef/cases/box1x1/graph.nnef deleted file mode 100644 index 2f3c0876e950f..0000000000000 --- a/tests/python/frontend/nnef/cases/box1x1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = box(input, size = [1,1,1,1], stride = [1,1,2,2], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/box2x2/graph.nnef b/tests/python/frontend/nnef/cases/box2x2/graph.nnef deleted file mode 100644 index 693903905ea99..0000000000000 --- a/tests/python/frontend/nnef/cases/box2x2/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = box(input, size = [1,1,2,2], stride = [1,1,2,2], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/box3x3/graph.nnef b/tests/python/frontend/nnef/cases/box3x3/graph.nnef deleted file mode 100644 index 60135ee5b37e8..0000000000000 --- a/tests/python/frontend/nnef/cases/box3x3/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = box(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/box3x3_pad0-0/graph.nnef b/tests/python/frontend/nnef/cases/box3x3_pad0-0/graph.nnef deleted file mode 100644 index baf67c5304e58..0000000000000 --- a/tests/python/frontend/nnef/cases/box3x3_pad0-0/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = box(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,0), (0,0)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/box3x3_pad0-1/graph.nnef b/tests/python/frontend/nnef/cases/box3x3_pad0-1/graph.nnef deleted file mode 100644 index a5a86b05c09cf..0000000000000 --- a/tests/python/frontend/nnef/cases/box3x3_pad0-1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = box(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,1), (0,1)], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/box3x3_pad1-0/graph.nnef b/tests/python/frontend/nnef/cases/box3x3_pad1-0/graph.nnef deleted file mode 100644 index 485a57b456fdf..0000000000000 --- a/tests/python/frontend/nnef/cases/box3x3_pad1-0/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = box(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,0), (1,0)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/box3x3_pad1-1/graph.nnef b/tests/python/frontend/nnef/cases/box3x3_pad1-1/graph.nnef deleted file mode 100644 index d660e46aecb8a..0000000000000 --- a/tests/python/frontend/nnef/cases/box3x3_pad1-1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = box(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,1), (1,1)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/box3x3_stride1x1/graph.nnef b/tests/python/frontend/nnef/cases/box3x3_stride1x1/graph.nnef deleted file mode 100644 index dd78ea76b5bf1..0000000000000 --- a/tests/python/frontend/nnef/cases/box3x3_stride1x1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = box(input, size = [1,1,3,3], stride = [1,1,1,1], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/ceil_2d/graph.nnef b/tests/python/frontend/nnef/cases/ceil_2d/graph.nnef deleted file mode 100644 index 1a599994c7eb7..0000000000000 --- a/tests/python/frontend/nnef/cases/ceil_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = ceil(input); -} diff --git a/tests/python/frontend/nnef/cases/ceil_4d/graph.nnef b/tests/python/frontend/nnef/cases/ceil_4d/graph.nnef deleted file mode 100644 index 07cec8f89947e..0000000000000 --- a/tests/python/frontend/nnef/cases/ceil_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = ceil(input); -} diff --git a/tests/python/frontend/nnef/cases/clamp_2d/graph.nnef b/tests/python/frontend/nnef/cases/clamp_2d/graph.nnef deleted file mode 100644 index 7f6747ba4d4e0..0000000000000 --- a/tests/python/frontend/nnef/cases/clamp_2d/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input1, input2, input3 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - input3 = external(shape = [4,16]); - output = clamp(input1, input2, input3); -} diff --git a/tests/python/frontend/nnef/cases/clamp_4d/graph.nnef b/tests/python/frontend/nnef/cases/clamp_4d/graph.nnef deleted file mode 100644 index 0fbf546b1cfd3..0000000000000 --- a/tests/python/frontend/nnef/cases/clamp_4d/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input1, input2, input3 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - input3 = external(shape = [4,16,32,32]); - output = clamp(input1, input2, input3); -} diff --git a/tests/python/frontend/nnef/cases/clamp_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/clamp_4d_constant/graph.nnef deleted file mode 100644 index ea73414bcb143..0000000000000 --- a/tests/python/frontend/nnef/cases/clamp_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = clamp(input, 0.25, 0.75); -} diff --git a/tests/python/frontend/nnef/cases/concat_channel/graph.nnef b/tests/python/frontend/nnef/cases/concat_channel/graph.nnef deleted file mode 100644 index 211f1366c2e97..0000000000000 --- a/tests/python/frontend/nnef/cases/concat_channel/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = concat([input1, input2], axis = 1); -} diff --git a/tests/python/frontend/nnef/cases/conv1x1/graph.nnef b/tests/python/frontend/nnef/cases/conv1x1/graph.nnef deleted file mode 100644 index 75d5ca91ac239..0000000000000 --- a/tests/python/frontend/nnef/cases/conv1x1/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,1,1], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/conv2x2/graph.nnef b/tests/python/frontend/nnef/cases/conv2x2/graph.nnef deleted file mode 100644 index e47b8fb89772f..0000000000000 --- a/tests/python/frontend/nnef/cases/conv2x2/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,2,2], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/conv3x3/graph.nnef b/tests/python/frontend/nnef/cases/conv3x3/graph.nnef deleted file mode 100644 index 687f05187fa81..0000000000000 --- a/tests/python/frontend/nnef/cases/conv3x3/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/conv3x3_groups0/graph.nnef b/tests/python/frontend/nnef/cases/conv3x3_groups0/graph.nnef deleted file mode 100644 index 5f169f2409251..0000000000000 --- a/tests/python/frontend/nnef/cases/conv3x3_groups0/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,1,3,3], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, groups = 0); -} diff --git a/tests/python/frontend/nnef/cases/conv3x3_nobias/graph.nnef b/tests/python/frontend/nnef/cases/conv3x3_nobias/graph.nnef deleted file mode 100644 index 396692d4681f4..0000000000000 --- a/tests/python/frontend/nnef/cases/conv3x3_nobias/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - output = conv(input, filter, 0.0); -} diff --git a/tests/python/frontend/nnef/cases/conv3x3_pad0-0/graph.nnef b/tests/python/frontend/nnef/cases/conv3x3_pad0-0/graph.nnef deleted file mode 100644 index 7365760bacd08..0000000000000 --- a/tests/python/frontend/nnef/cases/conv3x3_pad0-0/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, padding = [(0,0), (0,0)]); -} diff --git a/tests/python/frontend/nnef/cases/conv3x3_pad0-1/graph.nnef b/tests/python/frontend/nnef/cases/conv3x3_pad0-1/graph.nnef deleted file mode 100644 index 228fee93cb9fe..0000000000000 --- a/tests/python/frontend/nnef/cases/conv3x3_pad0-1/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, padding = [(0,1), (0,1)]); -} diff --git a/tests/python/frontend/nnef/cases/conv3x3_pad1-0/graph.nnef b/tests/python/frontend/nnef/cases/conv3x3_pad1-0/graph.nnef deleted file mode 100644 index f28b4b4a2a8a1..0000000000000 --- a/tests/python/frontend/nnef/cases/conv3x3_pad1-0/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, padding = [(1,0), (1,0)]); -} diff --git a/tests/python/frontend/nnef/cases/conv3x3_pad1-1/graph.nnef b/tests/python/frontend/nnef/cases/conv3x3_pad1-1/graph.nnef deleted file mode 100644 index 4948bf3794492..0000000000000 --- a/tests/python/frontend/nnef/cases/conv3x3_pad1-1/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, padding = [(1,1), (1,1)]); -} diff --git a/tests/python/frontend/nnef/cases/conv3x3_stride2x2/graph.nnef b/tests/python/frontend/nnef/cases/conv3x3_stride2x2/graph.nnef deleted file mode 100644 index 5f4df908f3307..0000000000000 --- a/tests/python/frontend/nnef/cases/conv3x3_stride2x2/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, stride = [2,2]); -} diff --git a/tests/python/frontend/nnef/cases/conv3x3_valid/graph.nnef b/tests/python/frontend/nnef/cases/conv3x3_valid/graph.nnef deleted file mode 100644 index 7365760bacd08..0000000000000 --- a/tests/python/frontend/nnef/cases/conv3x3_valid/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, padding = [(0,0), (0,0)]); -} diff --git a/tests/python/frontend/nnef/cases/conv4x4/graph.nnef b/tests/python/frontend/nnef/cases/conv4x4/graph.nnef deleted file mode 100644 index ee6de3aa535e8..0000000000000 --- a/tests/python/frontend/nnef/cases/conv4x4/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,4,4], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/conv4x4_stride2x2/graph.nnef b/tests/python/frontend/nnef/cases/conv4x4_stride2x2/graph.nnef deleted file mode 100644 index 5a86b6850dd47..0000000000000 --- a/tests/python/frontend/nnef/cases/conv4x4_stride2x2/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,4,4], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, stride = [2,2]); -} diff --git a/tests/python/frontend/nnef/cases/conv5x5/graph.nnef b/tests/python/frontend/nnef/cases/conv5x5/graph.nnef deleted file mode 100644 index bda7b9e120fa0..0000000000000 --- a/tests/python/frontend/nnef/cases/conv5x5/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,5,5], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/conv5x5_pad2-2/graph.nnef b/tests/python/frontend/nnef/cases/conv5x5_pad2-2/graph.nnef deleted file mode 100644 index 7d121ff5e1264..0000000000000 --- a/tests/python/frontend/nnef/cases/conv5x5_pad2-2/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,5,5], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, padding = [(2,2), (2,2)]); -} diff --git a/tests/python/frontend/nnef/cases/conv5x5_stride3x3/graph.nnef b/tests/python/frontend/nnef/cases/conv5x5_stride3x3/graph.nnef deleted file mode 100644 index bac1ff164ac4a..0000000000000 --- a/tests/python/frontend/nnef/cases/conv5x5_stride3x3/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,5,5], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, stride = [3,3]); -} diff --git a/tests/python/frontend/nnef/cases/conv6x6/graph.nnef b/tests/python/frontend/nnef/cases/conv6x6/graph.nnef deleted file mode 100644 index 157d2b73e9eeb..0000000000000 --- a/tests/python/frontend/nnef/cases/conv6x6/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,6,6], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/conv7x7/graph.nnef b/tests/python/frontend/nnef/cases/conv7x7/graph.nnef deleted file mode 100644 index 92e3cdac4404b..0000000000000 --- a/tests/python/frontend/nnef/cases/conv7x7/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,7,7], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/conv7x7_stride4x4/graph.nnef b/tests/python/frontend/nnef/cases/conv7x7_stride4x4/graph.nnef deleted file mode 100644 index e7c9a49cda335..0000000000000 --- a/tests/python/frontend/nnef/cases/conv7x7_stride4x4/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - filter = variable(shape = [16,8,7,7], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = conv(input, filter, bias, stride = [4,4]); -} diff --git a/tests/python/frontend/nnef/cases/copy_2d/graph.nnef b/tests/python/frontend/nnef/cases/copy_2d/graph.nnef deleted file mode 100644 index cdc5613513b49..0000000000000 --- a/tests/python/frontend/nnef/cases/copy_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = copy(input); -} diff --git a/tests/python/frontend/nnef/cases/copy_4d/graph.nnef b/tests/python/frontend/nnef/cases/copy_4d/graph.nnef deleted file mode 100644 index c80293c10b765..0000000000000 --- a/tests/python/frontend/nnef/cases/copy_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = copy(input); -} diff --git a/tests/python/frontend/nnef/cases/cos_2d/graph.nnef b/tests/python/frontend/nnef/cases/cos_2d/graph.nnef deleted file mode 100644 index d82b2731c8221..0000000000000 --- a/tests/python/frontend/nnef/cases/cos_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = cos(input); -} diff --git a/tests/python/frontend/nnef/cases/cos_4d/graph.nnef b/tests/python/frontend/nnef/cases/cos_4d/graph.nnef deleted file mode 100644 index 6e4264735a329..0000000000000 --- a/tests/python/frontend/nnef/cases/cos_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = cos(input); -} diff --git a/tests/python/frontend/nnef/cases/cosh_2d/graph.nnef b/tests/python/frontend/nnef/cases/cosh_2d/graph.nnef deleted file mode 100644 index 538b3daab3206..0000000000000 --- a/tests/python/frontend/nnef/cases/cosh_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = cosh(input); -} diff --git a/tests/python/frontend/nnef/cases/cosh_4d/graph.nnef b/tests/python/frontend/nnef/cases/cosh_4d/graph.nnef deleted file mode 100644 index 76c83f2e2d74b..0000000000000 --- a/tests/python/frontend/nnef/cases/cosh_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = cosh(input); -} diff --git a/tests/python/frontend/nnef/cases/debox1x1/graph.nnef b/tests/python/frontend/nnef/cases/debox1x1/graph.nnef deleted file mode 100644 index cf6b31a87e58d..0000000000000 --- a/tests/python/frontend/nnef/cases/debox1x1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = debox(input, size = [1,1,1,1], stride = [1,1,2,2], padding = [(0,0),(0,0),(0,-1),(0,-1)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/debox2x2/graph.nnef b/tests/python/frontend/nnef/cases/debox2x2/graph.nnef deleted file mode 100644 index 75ae129d5cc9b..0000000000000 --- a/tests/python/frontend/nnef/cases/debox2x2/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = debox(input, size = [1,1,2,2], stride = [1,1,2,2], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/debox3x3/graph.nnef b/tests/python/frontend/nnef/cases/debox3x3/graph.nnef deleted file mode 100644 index 02f1a26532f08..0000000000000 --- a/tests/python/frontend/nnef/cases/debox3x3/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = debox(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/debox3x3_pad0-0/graph.nnef b/tests/python/frontend/nnef/cases/debox3x3_pad0-0/graph.nnef deleted file mode 100644 index ac127aa0bd259..0000000000000 --- a/tests/python/frontend/nnef/cases/debox3x3_pad0-0/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = debox(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,0), (0,0)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/debox3x3_pad0-1/graph.nnef b/tests/python/frontend/nnef/cases/debox3x3_pad0-1/graph.nnef deleted file mode 100644 index 3982739aa2084..0000000000000 --- a/tests/python/frontend/nnef/cases/debox3x3_pad0-1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = debox(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,1), (0,1)], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/debox3x3_pad1-0/graph.nnef b/tests/python/frontend/nnef/cases/debox3x3_pad1-0/graph.nnef deleted file mode 100644 index 12eb3815c833a..0000000000000 --- a/tests/python/frontend/nnef/cases/debox3x3_pad1-0/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = debox(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,0), (1,0)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/debox3x3_pad1-1/graph.nnef b/tests/python/frontend/nnef/cases/debox3x3_pad1-1/graph.nnef deleted file mode 100644 index 6195f3ae620c1..0000000000000 --- a/tests/python/frontend/nnef/cases/debox3x3_pad1-1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = debox(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,1), (1,1)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/debox3x3_stride1x1/graph.nnef b/tests/python/frontend/nnef/cases/debox3x3_stride1x1/graph.nnef deleted file mode 100644 index 9424ce312b263..0000000000000 --- a/tests/python/frontend/nnef/cases/debox3x3_stride1x1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = debox(input, size = [1,1,3,3], stride = [1,1,1,1], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/deconv1x1/graph.nnef b/tests/python/frontend/nnef/cases/deconv1x1/graph.nnef deleted file mode 100644 index e49e379424759..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv1x1/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,1,1], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/deconv2x2/graph.nnef b/tests/python/frontend/nnef/cases/deconv2x2/graph.nnef deleted file mode 100644 index 1039bfe5aacac..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv2x2/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,2,2], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/deconv3x3/graph.nnef b/tests/python/frontend/nnef/cases/deconv3x3/graph.nnef deleted file mode 100644 index c4900e0c81257..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv3x3/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/deconv3x3_groups0/graph.nnef b/tests/python/frontend/nnef/cases/deconv3x3_groups0/graph.nnef deleted file mode 100644 index d817b0e8d8cf0..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv3x3_groups0/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,1,3,3], label = 'filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = deconv(input, filter, bias, groups = 0); -} diff --git a/tests/python/frontend/nnef/cases/deconv3x3_nobias/graph.nnef b/tests/python/frontend/nnef/cases/deconv3x3_nobias/graph.nnef deleted file mode 100644 index dbb9c056fbe14..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv3x3_nobias/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - output = deconv(input, filter, 0.0); -} diff --git a/tests/python/frontend/nnef/cases/deconv3x3_pad0-0/graph.nnef b/tests/python/frontend/nnef/cases/deconv3x3_pad0-0/graph.nnef deleted file mode 100644 index 9623b24e10fc6..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv3x3_pad0-0/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias, padding = [(0,0), (0,0)]); -} diff --git a/tests/python/frontend/nnef/cases/deconv3x3_pad0-1/graph.nnef b/tests/python/frontend/nnef/cases/deconv3x3_pad0-1/graph.nnef deleted file mode 100644 index 1c95c94ed91d7..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv3x3_pad0-1/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias, padding = [(0,1), (0,1)]); -} diff --git a/tests/python/frontend/nnef/cases/deconv3x3_pad1-0/graph.nnef b/tests/python/frontend/nnef/cases/deconv3x3_pad1-0/graph.nnef deleted file mode 100644 index 395e8436ba0bf..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv3x3_pad1-0/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias, padding = [(1,0), (1,0)]); -} diff --git a/tests/python/frontend/nnef/cases/deconv3x3_pad1-1/graph.nnef b/tests/python/frontend/nnef/cases/deconv3x3_pad1-1/graph.nnef deleted file mode 100644 index 97d4dfebae2c1..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv3x3_pad1-1/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias, padding = [(1,1), (1,1)]); -} diff --git a/tests/python/frontend/nnef/cases/deconv3x3_stride2x2/graph.nnef b/tests/python/frontend/nnef/cases/deconv3x3_stride2x2/graph.nnef deleted file mode 100644 index ee2eb0ae206ca..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv3x3_stride2x2/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias, stride = [2,2]); -} diff --git a/tests/python/frontend/nnef/cases/deconv3x3_valid/graph.nnef b/tests/python/frontend/nnef/cases/deconv3x3_valid/graph.nnef deleted file mode 100644 index 9623b24e10fc6..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv3x3_valid/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,3,3], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias, padding = [(0,0), (0,0)]); -} diff --git a/tests/python/frontend/nnef/cases/deconv4x4/graph.nnef b/tests/python/frontend/nnef/cases/deconv4x4/graph.nnef deleted file mode 100644 index 04eb81101fe27..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv4x4/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,4,4], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/deconv4x4_stride2x2/graph.nnef b/tests/python/frontend/nnef/cases/deconv4x4_stride2x2/graph.nnef deleted file mode 100644 index a5d1b28e3b817..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv4x4_stride2x2/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,4,4], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias, stride = [2,2]); -} diff --git a/tests/python/frontend/nnef/cases/deconv5x5/graph.nnef b/tests/python/frontend/nnef/cases/deconv5x5/graph.nnef deleted file mode 100644 index d928d9f53400d..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv5x5/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,5,5], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/deconv5x5_pad2-2/graph.nnef b/tests/python/frontend/nnef/cases/deconv5x5_pad2-2/graph.nnef deleted file mode 100644 index 5713e9dd7d349..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv5x5_pad2-2/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,5,5], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias, padding = [(2,2), (2,2)]); -} diff --git a/tests/python/frontend/nnef/cases/deconv5x5_stride3x3/graph.nnef b/tests/python/frontend/nnef/cases/deconv5x5_stride3x3/graph.nnef deleted file mode 100644 index 8eb31d814deab..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv5x5_stride3x3/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,5,5], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias, stride = [3,3]); -} diff --git a/tests/python/frontend/nnef/cases/deconv6x6/graph.nnef b/tests/python/frontend/nnef/cases/deconv6x6/graph.nnef deleted file mode 100644 index 6f5fd0a012cac..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv6x6/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,6,6], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/deconv7x7/graph.nnef b/tests/python/frontend/nnef/cases/deconv7x7/graph.nnef deleted file mode 100644 index 1e637b499c221..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv7x7/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,7,7], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/deconv7x7_stride4x4/graph.nnef b/tests/python/frontend/nnef/cases/deconv7x7_stride4x4/graph.nnef deleted file mode 100644 index c9974fa8d8147..0000000000000 --- a/tests/python/frontend/nnef/cases/deconv7x7_stride4x4/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = variable(shape = [16,8,7,7], label = 'filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = deconv(input, filter, bias, stride = [4,4]); -} diff --git a/tests/python/frontend/nnef/cases/div_2d/graph.nnef b/tests/python/frontend/nnef/cases/div_2d/graph.nnef deleted file mode 100644 index c24464c92548e..0000000000000 --- a/tests/python/frontend/nnef/cases/div_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = div(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/div_4d/graph.nnef b/tests/python/frontend/nnef/cases/div_4d/graph.nnef deleted file mode 100644 index b173873e6636c..0000000000000 --- a/tests/python/frontend/nnef/cases/div_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = div(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/div_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/div_4d_broadcast/graph.nnef deleted file mode 100644 index 78a3f4cc86b75..0000000000000 --- a/tests/python/frontend/nnef/cases/div_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = div(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/div_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/div_4d_constant/graph.nnef deleted file mode 100644 index 4391fe6dc89f3..0000000000000 --- a/tests/python/frontend/nnef/cases/div_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = div(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/elu/graph.nnef b/tests/python/frontend/nnef/cases/elu/graph.nnef deleted file mode 100644 index 358b376395299..0000000000000 --- a/tests/python/frontend/nnef/cases/elu/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input, filter, bias, groups = 0); - output = elu(conv); -} diff --git a/tests/python/frontend/nnef/cases/elu_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/elu_2d_standalone/graph.nnef deleted file mode 100644 index 96b500d63b02a..0000000000000 --- a/tests/python/frontend/nnef/cases/elu_2d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16]); - output = elu(input); -} diff --git a/tests/python/frontend/nnef/cases/elu_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/elu_4d_standalone/graph.nnef deleted file mode 100644 index 512bd6b651c61..0000000000000 --- a/tests/python/frontend/nnef/cases/elu_4d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16,32,32]); - output = elu(input); -} diff --git a/tests/python/frontend/nnef/cases/eq_2d/graph.nnef b/tests/python/frontend/nnef/cases/eq_2d/graph.nnef deleted file mode 100644 index 2869ff5d97bc1..0000000000000 --- a/tests/python/frontend/nnef/cases/eq_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = eq(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/eq_4d/graph.nnef b/tests/python/frontend/nnef/cases/eq_4d/graph.nnef deleted file mode 100644 index 318154c8e9ff0..0000000000000 --- a/tests/python/frontend/nnef/cases/eq_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = eq(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/eq_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/eq_4d_broadcast/graph.nnef deleted file mode 100644 index b944355b7adde..0000000000000 --- a/tests/python/frontend/nnef/cases/eq_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = eq(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/eq_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/eq_4d_constant/graph.nnef deleted file mode 100644 index e173af679c3af..0000000000000 --- a/tests/python/frontend/nnef/cases/eq_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = eq(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/exp_2d/graph.nnef b/tests/python/frontend/nnef/cases/exp_2d/graph.nnef deleted file mode 100644 index 0cc1698c90309..0000000000000 --- a/tests/python/frontend/nnef/cases/exp_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = exp(input); -} diff --git a/tests/python/frontend/nnef/cases/exp_4d/graph.nnef b/tests/python/frontend/nnef/cases/exp_4d/graph.nnef deleted file mode 100644 index f312ca4506fa5..0000000000000 --- a/tests/python/frontend/nnef/cases/exp_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = exp(input); -} diff --git a/tests/python/frontend/nnef/cases/floor_2d/graph.nnef b/tests/python/frontend/nnef/cases/floor_2d/graph.nnef deleted file mode 100644 index bc88b588ad582..0000000000000 --- a/tests/python/frontend/nnef/cases/floor_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = floor(input); -} diff --git a/tests/python/frontend/nnef/cases/floor_4d/graph.nnef b/tests/python/frontend/nnef/cases/floor_4d/graph.nnef deleted file mode 100644 index 00815209ef4ab..0000000000000 --- a/tests/python/frontend/nnef/cases/floor_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = floor(input); -} diff --git a/tests/python/frontend/nnef/cases/ge_2d/graph.nnef b/tests/python/frontend/nnef/cases/ge_2d/graph.nnef deleted file mode 100644 index b6abf50776f68..0000000000000 --- a/tests/python/frontend/nnef/cases/ge_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = ge(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/ge_4d/graph.nnef b/tests/python/frontend/nnef/cases/ge_4d/graph.nnef deleted file mode 100644 index dadab33cbcafa..0000000000000 --- a/tests/python/frontend/nnef/cases/ge_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = ge(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/ge_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/ge_4d_broadcast/graph.nnef deleted file mode 100644 index b1af292190534..0000000000000 --- a/tests/python/frontend/nnef/cases/ge_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = ge(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/ge_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/ge_4d_constant/graph.nnef deleted file mode 100644 index 6d779025c6077..0000000000000 --- a/tests/python/frontend/nnef/cases/ge_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = ge(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/gelu/graph.nnef b/tests/python/frontend/nnef/cases/gelu/graph.nnef deleted file mode 100644 index 3fdfce946d91a..0000000000000 --- a/tests/python/frontend/nnef/cases/gelu/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input, filter, bias, groups = 0); - output = gelu(conv); -} diff --git a/tests/python/frontend/nnef/cases/gelu_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/gelu_2d_standalone/graph.nnef deleted file mode 100644 index c903678fa9f59..0000000000000 --- a/tests/python/frontend/nnef/cases/gelu_2d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16]); - output = gelu(input); -} diff --git a/tests/python/frontend/nnef/cases/gelu_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/gelu_4d_standalone/graph.nnef deleted file mode 100644 index 7180e1a8de53c..0000000000000 --- a/tests/python/frontend/nnef/cases/gelu_4d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16,32,32]); - output = gelu(input); -} diff --git a/tests/python/frontend/nnef/cases/gt_2d/graph.nnef b/tests/python/frontend/nnef/cases/gt_2d/graph.nnef deleted file mode 100644 index 48bc77a5bdcce..0000000000000 --- a/tests/python/frontend/nnef/cases/gt_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = gt(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/gt_4d/graph.nnef b/tests/python/frontend/nnef/cases/gt_4d/graph.nnef deleted file mode 100644 index e3d392a6560d7..0000000000000 --- a/tests/python/frontend/nnef/cases/gt_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = gt(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/gt_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/gt_4d_broadcast/graph.nnef deleted file mode 100644 index bf2cf2b3ede29..0000000000000 --- a/tests/python/frontend/nnef/cases/gt_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = gt(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/gt_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/gt_4d_constant/graph.nnef deleted file mode 100644 index 252a483af67e4..0000000000000 --- a/tests/python/frontend/nnef/cases/gt_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = gt(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/l1_normalization/graph.nnef b/tests/python/frontend/nnef/cases/l1_normalization/graph.nnef deleted file mode 100644 index 0833aa1e2a134..0000000000000 --- a/tests/python/frontend/nnef/cases/l1_normalization/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = l1_normalization(input, axes = [1], bias = 1.0, epsilon = 1e-5); -} diff --git a/tests/python/frontend/nnef/cases/l2_normalization/graph.nnef b/tests/python/frontend/nnef/cases/l2_normalization/graph.nnef deleted file mode 100644 index 3bb94b6f52f2f..0000000000000 --- a/tests/python/frontend/nnef/cases/l2_normalization/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = l2_normalization(input, axes = [1], epsilon = 1e-3); -} diff --git a/tests/python/frontend/nnef/cases/le_2d/graph.nnef b/tests/python/frontend/nnef/cases/le_2d/graph.nnef deleted file mode 100644 index b89c3f19a726a..0000000000000 --- a/tests/python/frontend/nnef/cases/le_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = le(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/le_4d/graph.nnef b/tests/python/frontend/nnef/cases/le_4d/graph.nnef deleted file mode 100644 index 17f821d93d8c8..0000000000000 --- a/tests/python/frontend/nnef/cases/le_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = le(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/le_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/le_4d_broadcast/graph.nnef deleted file mode 100644 index e7df0cf2aab7b..0000000000000 --- a/tests/python/frontend/nnef/cases/le_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = le(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/le_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/le_4d_constant/graph.nnef deleted file mode 100644 index 328a17aab564c..0000000000000 --- a/tests/python/frontend/nnef/cases/le_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = le(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/leaky_relu/graph.nnef b/tests/python/frontend/nnef/cases/leaky_relu/graph.nnef deleted file mode 100644 index 43a829232d4c0..0000000000000 --- a/tests/python/frontend/nnef/cases/leaky_relu/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input, filter, bias, groups = 0); - output = leaky_relu(conv, alpha = 0.5); -} diff --git a/tests/python/frontend/nnef/cases/leaky_relu_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/leaky_relu_2d_standalone/graph.nnef deleted file mode 100644 index 24b239e58ddc4..0000000000000 --- a/tests/python/frontend/nnef/cases/leaky_relu_2d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16]); - output = leaky_relu(input, alpha = 0.5); -} diff --git a/tests/python/frontend/nnef/cases/leaky_relu_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/leaky_relu_4d_standalone/graph.nnef deleted file mode 100644 index 04e6c4dadaef3..0000000000000 --- a/tests/python/frontend/nnef/cases/leaky_relu_4d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16,32,32]); - output = leaky_relu(input, alpha = 0.5); -} diff --git a/tests/python/frontend/nnef/cases/linear/graph.nnef b/tests/python/frontend/nnef/cases/linear/graph.nnef deleted file mode 100644 index 0cbef1067c1ba..0000000000000 --- a/tests/python/frontend/nnef/cases/linear/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - weights = variable(shape = [32,16], label = 'weights'); - bias = variable(shape = [1,32], label = 'bias'); - output = linear(input, weights, bias); -} diff --git a/tests/python/frontend/nnef/cases/linear_nobias/graph.nnef b/tests/python/frontend/nnef/cases/linear_nobias/graph.nnef deleted file mode 100644 index 9a93ea8d21777..0000000000000 --- a/tests/python/frontend/nnef/cases/linear_nobias/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - weights = variable(shape = [32,16], label = 'weights'); - output = linear(input, weights, 0.0); -} diff --git a/tests/python/frontend/nnef/cases/linear_reshape/graph.nnef b/tests/python/frontend/nnef/cases/linear_reshape/graph.nnef deleted file mode 100644 index d17413a265ea8..0000000000000 --- a/tests/python/frontend/nnef/cases/linear_reshape/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,8,8]); - weights = variable(shape = [32,1024], label = 'weights'); - bias = variable(shape = [1,32], label = 'bias'); - flattened = reshape(input, shape = [0,-1]); - output = linear(flattened, weights, bias); -} diff --git a/tests/python/frontend/nnef/cases/linear_squeeze/graph.nnef b/tests/python/frontend/nnef/cases/linear_squeeze/graph.nnef deleted file mode 100644 index 2971aa570d107..0000000000000 --- a/tests/python/frontend/nnef/cases/linear_squeeze/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,1,1]); - weights = variable(shape = [32,16], label = 'weights'); - bias = variable(shape = [1,32], label = 'bias'); - squeezed = squeeze(input, axes = [2,3]); - output = linear(squeezed, weights, bias); -} diff --git a/tests/python/frontend/nnef/cases/local_contrast_normalization/graph.nnef b/tests/python/frontend/nnef/cases/local_contrast_normalization/graph.nnef deleted file mode 100644 index ac9434e87689e..0000000000000 --- a/tests/python/frontend/nnef/cases/local_contrast_normalization/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = local_contrast_normalization(input, size = [1, 1, 3, 3], bias = 1.0, epsilon = 1e-5); -} diff --git a/tests/python/frontend/nnef/cases/local_mean_normalization/graph.nnef b/tests/python/frontend/nnef/cases/local_mean_normalization/graph.nnef deleted file mode 100644 index 2aa3a8b7d529d..0000000000000 --- a/tests/python/frontend/nnef/cases/local_mean_normalization/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = local_mean_normalization(input, size = [1, 1, 3, 3]); -} diff --git a/tests/python/frontend/nnef/cases/local_response_normalization/graph.nnef b/tests/python/frontend/nnef/cases/local_response_normalization/graph.nnef deleted file mode 100644 index b450cc8cea900..0000000000000 --- a/tests/python/frontend/nnef/cases/local_response_normalization/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = local_response_normalization(input, alpha = 1e-05, beta = 0.75, bias = 1.0, size = [1, 5, 1, 1]); -} diff --git a/tests/python/frontend/nnef/cases/local_variance_normalization/graph.nnef b/tests/python/frontend/nnef/cases/local_variance_normalization/graph.nnef deleted file mode 100644 index 83b0c6ebfff16..0000000000000 --- a/tests/python/frontend/nnef/cases/local_variance_normalization/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = local_variance_normalization(input, size = [1, 1, 3, 3], bias = 1.0, epsilon = 1e-5); -} diff --git a/tests/python/frontend/nnef/cases/log2_2d/graph.nnef b/tests/python/frontend/nnef/cases/log2_2d/graph.nnef deleted file mode 100644 index 166e05ed6a170..0000000000000 --- a/tests/python/frontend/nnef/cases/log2_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = log2(input); -} diff --git a/tests/python/frontend/nnef/cases/log2_4d/graph.nnef b/tests/python/frontend/nnef/cases/log2_4d/graph.nnef deleted file mode 100644 index 95b71212ce00d..0000000000000 --- a/tests/python/frontend/nnef/cases/log2_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = log2(input); -} diff --git a/tests/python/frontend/nnef/cases/log_2d/graph.nnef b/tests/python/frontend/nnef/cases/log_2d/graph.nnef deleted file mode 100644 index 337102ab8e789..0000000000000 --- a/tests/python/frontend/nnef/cases/log_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = log(input); -} diff --git a/tests/python/frontend/nnef/cases/log_4d/graph.nnef b/tests/python/frontend/nnef/cases/log_4d/graph.nnef deleted file mode 100644 index 36975b9bd94ff..0000000000000 --- a/tests/python/frontend/nnef/cases/log_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = log(input); -} diff --git a/tests/python/frontend/nnef/cases/lt_2d/graph.nnef b/tests/python/frontend/nnef/cases/lt_2d/graph.nnef deleted file mode 100644 index 7ef77d6be0a85..0000000000000 --- a/tests/python/frontend/nnef/cases/lt_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = lt(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/lt_4d/graph.nnef b/tests/python/frontend/nnef/cases/lt_4d/graph.nnef deleted file mode 100644 index 6cdb2285dd14a..0000000000000 --- a/tests/python/frontend/nnef/cases/lt_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = lt(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/lt_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/lt_4d_broadcast/graph.nnef deleted file mode 100644 index 7fb5764ec4b32..0000000000000 --- a/tests/python/frontend/nnef/cases/lt_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = lt(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/lt_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/lt_4d_constant/graph.nnef deleted file mode 100644 index a4dce93a6ccb3..0000000000000 --- a/tests/python/frontend/nnef/cases/lt_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = lt(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/matmul_2d/graph.nnef b/tests/python/frontend/nnef/cases/matmul_2d/graph.nnef deleted file mode 100644 index 8586028c3deb0..0000000000000 --- a/tests/python/frontend/nnef/cases/matmul_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [16,4]); - output = matmul(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/matmul_2d_transpose/graph.nnef b/tests/python/frontend/nnef/cases/matmul_2d_transpose/graph.nnef deleted file mode 100644 index 4cb78911ea2d4..0000000000000 --- a/tests/python/frontend/nnef/cases/matmul_2d_transpose/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = matmul(input1, input2, transposeA = true, transposeB = false); -} diff --git a/tests/python/frontend/nnef/cases/matmul_4d/graph.nnef b/tests/python/frontend/nnef/cases/matmul_4d/graph.nnef deleted file mode 100644 index 5e32634583685..0000000000000 --- a/tests/python/frontend/nnef/cases/matmul_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = matmul(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/matmul_4d_transpose/graph.nnef b/tests/python/frontend/nnef/cases/matmul_4d_transpose/graph.nnef deleted file mode 100644 index 1b24655bf3440..0000000000000 --- a/tests/python/frontend/nnef/cases/matmul_4d_transpose/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = matmul(input1, input2, transposeA = true, transposeB = false); -} diff --git a/tests/python/frontend/nnef/cases/max_2d/graph.nnef b/tests/python/frontend/nnef/cases/max_2d/graph.nnef deleted file mode 100644 index ae302f3ae7350..0000000000000 --- a/tests/python/frontend/nnef/cases/max_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = max(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/max_4d/graph.nnef b/tests/python/frontend/nnef/cases/max_4d/graph.nnef deleted file mode 100644 index dc560b1a7020f..0000000000000 --- a/tests/python/frontend/nnef/cases/max_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = max(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/max_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/max_4d_broadcast/graph.nnef deleted file mode 100644 index fe7d4ce862c07..0000000000000 --- a/tests/python/frontend/nnef/cases/max_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = max(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/max_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/max_4d_constant/graph.nnef deleted file mode 100644 index c1b61662daefd..0000000000000 --- a/tests/python/frontend/nnef/cases/max_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/max_pool1x1/graph.nnef b/tests/python/frontend/nnef/cases/max_pool1x1/graph.nnef deleted file mode 100644 index 1c74044e88908..0000000000000 --- a/tests/python/frontend/nnef/cases/max_pool1x1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_pool(input, size = [1,1,1,1], stride = [1,1,2,2], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/max_pool2x2/graph.nnef b/tests/python/frontend/nnef/cases/max_pool2x2/graph.nnef deleted file mode 100644 index 9df88946ab344..0000000000000 --- a/tests/python/frontend/nnef/cases/max_pool2x2/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_pool(input, size = [1,1,2,2], stride = [1,1,2,2], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/max_pool3x3/graph.nnef b/tests/python/frontend/nnef/cases/max_pool3x3/graph.nnef deleted file mode 100644 index 2413faa521d6d..0000000000000 --- a/tests/python/frontend/nnef/cases/max_pool3x3/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/max_pool3x3_constant-border/graph.nnef b/tests/python/frontend/nnef/cases/max_pool3x3_constant-border/graph.nnef deleted file mode 100644 index b0221c3c7d333..0000000000000 --- a/tests/python/frontend/nnef/cases/max_pool3x3_constant-border/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/max_pool3x3_pad0-0/graph.nnef b/tests/python/frontend/nnef/cases/max_pool3x3_pad0-0/graph.nnef deleted file mode 100644 index b59b0f166df05..0000000000000 --- a/tests/python/frontend/nnef/cases/max_pool3x3_pad0-0/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,0), (0,0)], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/max_pool3x3_pad0-1/graph.nnef b/tests/python/frontend/nnef/cases/max_pool3x3_pad0-1/graph.nnef deleted file mode 100644 index efcbfb9241622..0000000000000 --- a/tests/python/frontend/nnef/cases/max_pool3x3_pad0-1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,1), (0,1)], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/max_pool3x3_pad1-0/graph.nnef b/tests/python/frontend/nnef/cases/max_pool3x3_pad1-0/graph.nnef deleted file mode 100644 index ccb0db8245a5c..0000000000000 --- a/tests/python/frontend/nnef/cases/max_pool3x3_pad1-0/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,0), (1,0)], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/max_pool3x3_pad1-1/graph.nnef b/tests/python/frontend/nnef/cases/max_pool3x3_pad1-1/graph.nnef deleted file mode 100644 index 189d708d27699..0000000000000 --- a/tests/python/frontend/nnef/cases/max_pool3x3_pad1-1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,1), (1,1)], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/max_pool3x3_stride1x1/graph.nnef b/tests/python/frontend/nnef/cases/max_pool3x3_stride1x1/graph.nnef deleted file mode 100644 index 26513627ef81e..0000000000000 --- a/tests/python/frontend/nnef/cases/max_pool3x3_stride1x1/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_pool(input, size = [1,1,3,3], stride = [1,1,1,1], border = 'ignore'); -} diff --git a/tests/python/frontend/nnef/cases/max_reduce_channel/graph.nnef b/tests/python/frontend/nnef/cases/max_reduce_channel/graph.nnef deleted file mode 100644 index 38f5bfd2e342b..0000000000000 --- a/tests/python/frontend/nnef/cases/max_reduce_channel/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_reduce(input, axes = [1]); -} diff --git a/tests/python/frontend/nnef/cases/max_reduce_spatial/graph.nnef b/tests/python/frontend/nnef/cases/max_reduce_spatial/graph.nnef deleted file mode 100644 index 18d0bf319b67f..0000000000000 --- a/tests/python/frontend/nnef/cases/max_reduce_spatial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = max_reduce(input, axes = [2,3]); -} diff --git a/tests/python/frontend/nnef/cases/mean_reduce_channel/graph.nnef b/tests/python/frontend/nnef/cases/mean_reduce_channel/graph.nnef deleted file mode 100644 index 06821a94ed9e4..0000000000000 --- a/tests/python/frontend/nnef/cases/mean_reduce_channel/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = mean_reduce(input, axes = [1]); -} diff --git a/tests/python/frontend/nnef/cases/mean_reduce_spatial/graph.nnef b/tests/python/frontend/nnef/cases/mean_reduce_spatial/graph.nnef deleted file mode 100644 index 42219e6e62e2d..0000000000000 --- a/tests/python/frontend/nnef/cases/mean_reduce_spatial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = mean_reduce(input, axes = [2,3]); -} diff --git a/tests/python/frontend/nnef/cases/min_2d/graph.nnef b/tests/python/frontend/nnef/cases/min_2d/graph.nnef deleted file mode 100644 index 4c96becd5959c..0000000000000 --- a/tests/python/frontend/nnef/cases/min_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = min(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/min_4d/graph.nnef b/tests/python/frontend/nnef/cases/min_4d/graph.nnef deleted file mode 100644 index bbc28df233149..0000000000000 --- a/tests/python/frontend/nnef/cases/min_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = min(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/min_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/min_4d_broadcast/graph.nnef deleted file mode 100644 index 7befa71d83b3f..0000000000000 --- a/tests/python/frontend/nnef/cases/min_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = min(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/min_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/min_4d_constant/graph.nnef deleted file mode 100644 index 5e19520c498bd..0000000000000 --- a/tests/python/frontend/nnef/cases/min_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = min(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/min_reduce_channel/graph.nnef b/tests/python/frontend/nnef/cases/min_reduce_channel/graph.nnef deleted file mode 100644 index a2ad6680ae4d3..0000000000000 --- a/tests/python/frontend/nnef/cases/min_reduce_channel/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = min_reduce(input, axes = [1]); -} diff --git a/tests/python/frontend/nnef/cases/min_reduce_spatial/graph.nnef b/tests/python/frontend/nnef/cases/min_reduce_spatial/graph.nnef deleted file mode 100644 index 08f0249c3b767..0000000000000 --- a/tests/python/frontend/nnef/cases/min_reduce_spatial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = min_reduce(input, axes = [2,3]); -} diff --git a/tests/python/frontend/nnef/cases/mul_2d/graph.nnef b/tests/python/frontend/nnef/cases/mul_2d/graph.nnef deleted file mode 100644 index 5d5720377c6ea..0000000000000 --- a/tests/python/frontend/nnef/cases/mul_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = mul(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/mul_4d/graph.nnef b/tests/python/frontend/nnef/cases/mul_4d/graph.nnef deleted file mode 100644 index ac78a91a322de..0000000000000 --- a/tests/python/frontend/nnef/cases/mul_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = mul(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/mul_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/mul_4d_broadcast/graph.nnef deleted file mode 100644 index 682f4e893e7b6..0000000000000 --- a/tests/python/frontend/nnef/cases/mul_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = mul(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/mul_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/mul_4d_constant/graph.nnef deleted file mode 100644 index 1f6bdbb69732d..0000000000000 --- a/tests/python/frontend/nnef/cases/mul_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = mul(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/ne_2d/graph.nnef b/tests/python/frontend/nnef/cases/ne_2d/graph.nnef deleted file mode 100644 index 6a8ea3e3ee7c0..0000000000000 --- a/tests/python/frontend/nnef/cases/ne_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = ne(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/ne_4d/graph.nnef b/tests/python/frontend/nnef/cases/ne_4d/graph.nnef deleted file mode 100644 index 7dee4ad2f22c6..0000000000000 --- a/tests/python/frontend/nnef/cases/ne_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = ne(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/ne_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/ne_4d_broadcast/graph.nnef deleted file mode 100644 index 7e619bdc317af..0000000000000 --- a/tests/python/frontend/nnef/cases/ne_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = ne(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/ne_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/ne_4d_constant/graph.nnef deleted file mode 100644 index 7b0d7720eb2d7..0000000000000 --- a/tests/python/frontend/nnef/cases/ne_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = ne(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/nearest_downsample/graph.nnef b/tests/python/frontend/nnef/cases/nearest_downsample/graph.nnef deleted file mode 100644 index 8b1443c165e47..0000000000000 --- a/tests/python/frontend/nnef/cases/nearest_downsample/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = nearest_downsample(input, factor = [2,2]); -} diff --git a/tests/python/frontend/nnef/cases/nearest_upsample/graph.nnef b/tests/python/frontend/nnef/cases/nearest_upsample/graph.nnef deleted file mode 100644 index 34f6fe49e45a2..0000000000000 --- a/tests/python/frontend/nnef/cases/nearest_upsample/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = nearest_upsample(input, factor = [2,2]); -} diff --git a/tests/python/frontend/nnef/cases/neg_2d/graph.nnef b/tests/python/frontend/nnef/cases/neg_2d/graph.nnef deleted file mode 100644 index b25f97cef4659..0000000000000 --- a/tests/python/frontend/nnef/cases/neg_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = neg(input); -} diff --git a/tests/python/frontend/nnef/cases/neg_4d/graph.nnef b/tests/python/frontend/nnef/cases/neg_4d/graph.nnef deleted file mode 100644 index 8c752d747860e..0000000000000 --- a/tests/python/frontend/nnef/cases/neg_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = neg(input); -} diff --git a/tests/python/frontend/nnef/cases/not_2d/graph.nnef b/tests/python/frontend/nnef/cases/not_2d/graph.nnef deleted file mode 100644 index e7885f852a321..0000000000000 --- a/tests/python/frontend/nnef/cases/not_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = not(input); -} diff --git a/tests/python/frontend/nnef/cases/not_4d/graph.nnef b/tests/python/frontend/nnef/cases/not_4d/graph.nnef deleted file mode 100644 index 7544fb1394d33..0000000000000 --- a/tests/python/frontend/nnef/cases/not_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = not(input); -} diff --git a/tests/python/frontend/nnef/cases/or_2d/graph.nnef b/tests/python/frontend/nnef/cases/or_2d/graph.nnef deleted file mode 100644 index 52ec1fdbdc209..0000000000000 --- a/tests/python/frontend/nnef/cases/or_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = or(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/or_4d/graph.nnef b/tests/python/frontend/nnef/cases/or_4d/graph.nnef deleted file mode 100644 index a799707f4f80c..0000000000000 --- a/tests/python/frontend/nnef/cases/or_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = or(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/or_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/or_4d_broadcast/graph.nnef deleted file mode 100644 index 74ed77cf9587a..0000000000000 --- a/tests/python/frontend/nnef/cases/or_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = or(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/or_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/or_4d_constant/graph.nnef deleted file mode 100644 index 100aedaf2487c..0000000000000 --- a/tests/python/frontend/nnef/cases/or_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = or(input, false); -} diff --git a/tests/python/frontend/nnef/cases/pad_0-1_constant/graph.nnef b/tests/python/frontend/nnef/cases/pad_0-1_constant/graph.nnef deleted file mode 100644 index 89bf37e06c5ca..0000000000000 --- a/tests/python/frontend/nnef/cases/pad_0-1_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [1,16,32,32]); - output = pad(input, padding = [(0,0), (0,0), (0,1), (0,1)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/pad_0-1_reflect/graph.nnef b/tests/python/frontend/nnef/cases/pad_0-1_reflect/graph.nnef deleted file mode 100644 index 2deca2e42e5fb..0000000000000 --- a/tests/python/frontend/nnef/cases/pad_0-1_reflect/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [1,16,32,32]); - output = pad(input, padding = [(0,0), (0,0), (0,1), (0,1)], border = 'reflect'); -} diff --git a/tests/python/frontend/nnef/cases/pad_0-1_replicate/graph.nnef b/tests/python/frontend/nnef/cases/pad_0-1_replicate/graph.nnef deleted file mode 100644 index 544c5704db185..0000000000000 --- a/tests/python/frontend/nnef/cases/pad_0-1_replicate/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [1,16,32,32]); - output = pad(input, padding = [(0,0), (0,0), (0,1), (0,1)], border = 'replicate'); -} diff --git a/tests/python/frontend/nnef/cases/pad_1-0_constant/graph.nnef b/tests/python/frontend/nnef/cases/pad_1-0_constant/graph.nnef deleted file mode 100644 index 5b36fc86d2fce..0000000000000 --- a/tests/python/frontend/nnef/cases/pad_1-0_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [1,16,32,32]); - output = pad(input, padding = [(0,0), (0,0), (1,0), (1,0)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/pad_1-0_reflect/graph.nnef b/tests/python/frontend/nnef/cases/pad_1-0_reflect/graph.nnef deleted file mode 100644 index d12aa22702408..0000000000000 --- a/tests/python/frontend/nnef/cases/pad_1-0_reflect/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [1,16,32,32]); - output = pad(input, padding = [(0,0), (0,0), (1,0), (1,0)], border = 'reflect'); -} diff --git a/tests/python/frontend/nnef/cases/pad_1-0_replicate/graph.nnef b/tests/python/frontend/nnef/cases/pad_1-0_replicate/graph.nnef deleted file mode 100644 index d527f2e6fa23b..0000000000000 --- a/tests/python/frontend/nnef/cases/pad_1-0_replicate/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [1,16,32,32]); - output = pad(input, padding = [(0,0), (0,0), (1,0), (1,0)], border = 'replicate'); -} diff --git a/tests/python/frontend/nnef/cases/pad_1-1_constant/graph.nnef b/tests/python/frontend/nnef/cases/pad_1-1_constant/graph.nnef deleted file mode 100644 index c5118096c9578..0000000000000 --- a/tests/python/frontend/nnef/cases/pad_1-1_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [1,16,32,32]); - output = pad(input, padding = [(0,0), (0,0), (1,1), (1,1)], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/pad_1-1_reflect/graph.nnef b/tests/python/frontend/nnef/cases/pad_1-1_reflect/graph.nnef deleted file mode 100644 index fa2709ea354ba..0000000000000 --- a/tests/python/frontend/nnef/cases/pad_1-1_reflect/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [1,16,32,32]); - output = pad(input, padding = [(0,0), (0,0), (1,1), (1,1)], border = 'reflect'); -} diff --git a/tests/python/frontend/nnef/cases/pad_1-1_replicate/graph.nnef b/tests/python/frontend/nnef/cases/pad_1-1_replicate/graph.nnef deleted file mode 100644 index dcdead991e9a7..0000000000000 --- a/tests/python/frontend/nnef/cases/pad_1-1_replicate/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [1,16,32,32]); - output = pad(input, padding = [(0,0), (0,0), (1,1), (1,1)], border = 'replicate'); -} diff --git a/tests/python/frontend/nnef/cases/pow_2d/graph.nnef b/tests/python/frontend/nnef/cases/pow_2d/graph.nnef deleted file mode 100644 index b07c5b61a5731..0000000000000 --- a/tests/python/frontend/nnef/cases/pow_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = pow(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/pow_4d/graph.nnef b/tests/python/frontend/nnef/cases/pow_4d/graph.nnef deleted file mode 100644 index f812848110430..0000000000000 --- a/tests/python/frontend/nnef/cases/pow_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = pow(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/pow_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/pow_4d_broadcast/graph.nnef deleted file mode 100644 index 664e8381eed5c..0000000000000 --- a/tests/python/frontend/nnef/cases/pow_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = pow(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/pow_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/pow_4d_constant/graph.nnef deleted file mode 100644 index 2d3ed54b01b56..0000000000000 --- a/tests/python/frontend/nnef/cases/pow_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = pow(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/prelu/graph.nnef b/tests/python/frontend/nnef/cases/prelu/graph.nnef deleted file mode 100644 index 04fe7c0a34642..0000000000000 --- a/tests/python/frontend/nnef/cases/prelu/graph.nnef +++ /dev/null @@ -1,11 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [16,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input1, filter, bias, groups = 0); - input2 = external(shape = [16]); - output = prelu(conv, input2); -} diff --git a/tests/python/frontend/nnef/cases/prelu_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/prelu_2d_standalone/graph.nnef deleted file mode 100644 index 1cbe5da615157..0000000000000 --- a/tests/python/frontend/nnef/cases/prelu_2d_standalone/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [16,16]); - input2 = external(shape = [16]); - output = prelu(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/prelu_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/prelu_4d_standalone/graph.nnef deleted file mode 100644 index abc6613b2ea6f..0000000000000 --- a/tests/python/frontend/nnef/cases/prelu_4d_standalone/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [16,16,32,32]); - input2 = external(shape = [16]); - output = prelu(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/rcp_2d/graph.nnef b/tests/python/frontend/nnef/cases/rcp_2d/graph.nnef deleted file mode 100644 index aa9db7a802914..0000000000000 --- a/tests/python/frontend/nnef/cases/rcp_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = rcp(input); -} diff --git a/tests/python/frontend/nnef/cases/rcp_4d/graph.nnef b/tests/python/frontend/nnef/cases/rcp_4d/graph.nnef deleted file mode 100644 index f5784549bec72..0000000000000 --- a/tests/python/frontend/nnef/cases/rcp_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = rcp(input); -} diff --git a/tests/python/frontend/nnef/cases/relu/graph.nnef b/tests/python/frontend/nnef/cases/relu/graph.nnef deleted file mode 100644 index 08a81ee886eed..0000000000000 --- a/tests/python/frontend/nnef/cases/relu/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input, filter, bias, groups = 0); - output = relu(conv); -} diff --git a/tests/python/frontend/nnef/cases/relu_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/relu_2d_standalone/graph.nnef deleted file mode 100644 index fdba3f74bff62..0000000000000 --- a/tests/python/frontend/nnef/cases/relu_2d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = relu(input); -} diff --git a/tests/python/frontend/nnef/cases/relu_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/relu_4d_standalone/graph.nnef deleted file mode 100644 index 347cf9665ae3b..0000000000000 --- a/tests/python/frontend/nnef/cases/relu_4d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = relu(input); -} diff --git a/tests/python/frontend/nnef/cases/reshape_flatten/graph.nnef b/tests/python/frontend/nnef/cases/reshape_flatten/graph.nnef deleted file mode 100644 index 1d39de4b26e65..0000000000000 --- a/tests/python/frontend/nnef/cases/reshape_flatten/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = reshape(input, shape = [0,-1]); -} diff --git a/tests/python/frontend/nnef/cases/reshape_partial/graph.nnef b/tests/python/frontend/nnef/cases/reshape_partial/graph.nnef deleted file mode 100644 index 50f983e266c6e..0000000000000 --- a/tests/python/frontend/nnef/cases/reshape_partial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [2,3,3,3,2]); - output = reshape(input, shape = [0,-1], axis_start = 1, axis_count = 3); -} diff --git a/tests/python/frontend/nnef/cases/reshape_squeeze/graph.nnef b/tests/python/frontend/nnef/cases/reshape_squeeze/graph.nnef deleted file mode 100644 index b8471424234a9..0000000000000 --- a/tests/python/frontend/nnef/cases/reshape_squeeze/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,1,1]); - output = reshape(input, shape = [4,16]); -} diff --git a/tests/python/frontend/nnef/cases/rms_pool3x3/graph.nnef b/tests/python/frontend/nnef/cases/rms_pool3x3/graph.nnef deleted file mode 100644 index bd3972de2ed1f..0000000000000 --- a/tests/python/frontend/nnef/cases/rms_pool3x3/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = rms_pool(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'constant'); -} diff --git a/tests/python/frontend/nnef/cases/round_2d/graph.nnef b/tests/python/frontend/nnef/cases/round_2d/graph.nnef deleted file mode 100644 index 6dcc91eb50a1c..0000000000000 --- a/tests/python/frontend/nnef/cases/round_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = round(input); -} diff --git a/tests/python/frontend/nnef/cases/round_4d/graph.nnef b/tests/python/frontend/nnef/cases/round_4d/graph.nnef deleted file mode 100644 index bbbdb1bea377b..0000000000000 --- a/tests/python/frontend/nnef/cases/round_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = round(input); -} diff --git a/tests/python/frontend/nnef/cases/rsqr_2d/graph.nnef b/tests/python/frontend/nnef/cases/rsqr_2d/graph.nnef deleted file mode 100644 index 385ec228b1c64..0000000000000 --- a/tests/python/frontend/nnef/cases/rsqr_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = rsqr(input); -} diff --git a/tests/python/frontend/nnef/cases/rsqr_4d/graph.nnef b/tests/python/frontend/nnef/cases/rsqr_4d/graph.nnef deleted file mode 100644 index a462d27572da3..0000000000000 --- a/tests/python/frontend/nnef/cases/rsqr_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = rsqr(input); -} diff --git a/tests/python/frontend/nnef/cases/rsqrt_2d/graph.nnef b/tests/python/frontend/nnef/cases/rsqrt_2d/graph.nnef deleted file mode 100644 index f3503cfee649c..0000000000000 --- a/tests/python/frontend/nnef/cases/rsqrt_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = rsqrt(input); -} diff --git a/tests/python/frontend/nnef/cases/rsqrt_4d/graph.nnef b/tests/python/frontend/nnef/cases/rsqrt_4d/graph.nnef deleted file mode 100644 index 76583e05c7f6a..0000000000000 --- a/tests/python/frontend/nnef/cases/rsqrt_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = rsqrt(input); -} diff --git a/tests/python/frontend/nnef/cases/select_2d/graph.nnef b/tests/python/frontend/nnef/cases/select_2d/graph.nnef deleted file mode 100644 index a771def8b45e4..0000000000000 --- a/tests/python/frontend/nnef/cases/select_2d/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( cond, input1, input2 ) -> ( output ) -{ - cond = external(shape = [4,16]); - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = select(cond, input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/select_2d_false/graph.nnef b/tests/python/frontend/nnef/cases/select_2d_false/graph.nnef deleted file mode 100644 index 44669bc31ca11..0000000000000 --- a/tests/python/frontend/nnef/cases/select_2d_false/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = select(false, input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/select_2d_true/graph.nnef b/tests/python/frontend/nnef/cases/select_2d_true/graph.nnef deleted file mode 100644 index 6df5598fa1cc6..0000000000000 --- a/tests/python/frontend/nnef/cases/select_2d_true/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = select(true, input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/select_4d/graph.nnef b/tests/python/frontend/nnef/cases/select_4d/graph.nnef deleted file mode 100644 index 06ae030eb9333..0000000000000 --- a/tests/python/frontend/nnef/cases/select_4d/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( cond, input1, input2 ) -> ( output ) -{ - cond = external(shape = [4,16,32,32]); - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = select(cond, input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/select_4d_false/graph.nnef b/tests/python/frontend/nnef/cases/select_4d_false/graph.nnef deleted file mode 100644 index d2f4f45b7177e..0000000000000 --- a/tests/python/frontend/nnef/cases/select_4d_false/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = select(false, input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/select_4d_true/graph.nnef b/tests/python/frontend/nnef/cases/select_4d_true/graph.nnef deleted file mode 100644 index b6437d5953762..0000000000000 --- a/tests/python/frontend/nnef/cases/select_4d_true/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = select(true, input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/selu/graph.nnef b/tests/python/frontend/nnef/cases/selu/graph.nnef deleted file mode 100644 index cf08d103a23a2..0000000000000 --- a/tests/python/frontend/nnef/cases/selu/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input, filter, bias, groups = 0); - output = selu(conv); -} diff --git a/tests/python/frontend/nnef/cases/selu_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/selu_2d_standalone/graph.nnef deleted file mode 100644 index cfe55aa9ca323..0000000000000 --- a/tests/python/frontend/nnef/cases/selu_2d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16]); - output = selu(input); -} diff --git a/tests/python/frontend/nnef/cases/selu_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/selu_4d_standalone/graph.nnef deleted file mode 100644 index c8d6bd6b6d2bb..0000000000000 --- a/tests/python/frontend/nnef/cases/selu_4d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16,32,32]); - output = selu(input); -} diff --git a/tests/python/frontend/nnef/cases/separable_conv3x3/graph.nnef b/tests/python/frontend/nnef/cases/separable_conv3x3/graph.nnef deleted file mode 100644 index 30c722ba6062c..0000000000000 --- a/tests/python/frontend/nnef/cases/separable_conv3x3/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - plane_filter = variable(shape = [8,1,3,3], label = 'plane_filter'); - point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = separable_conv(input, plane_filter, point_filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/separable_conv3x3_with_attrs/graph.nnef b/tests/python/frontend/nnef/cases/separable_conv3x3_with_attrs/graph.nnef deleted file mode 100644 index 7471ad7fce3cd..0000000000000 --- a/tests/python/frontend/nnef/cases/separable_conv3x3_with_attrs/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - plane_filter = variable(shape = [8,1,3,3], label = 'plane_filter'); - point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); - output = separable_conv(input, plane_filter, point_filter, padding = [(0,1), (0,1)], stride = [2,2]); -} diff --git a/tests/python/frontend/nnef/cases/separable_conv5x5/graph.nnef b/tests/python/frontend/nnef/cases/separable_conv5x5/graph.nnef deleted file mode 100644 index 07903799cdec9..0000000000000 --- a/tests/python/frontend/nnef/cases/separable_conv5x5/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,8,32,32]); - plane_filter = variable(shape = [8,1,5,5], label = 'plane_filter'); - point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); - bias = variable(shape = [1,16], label = 'bias'); - output = separable_conv(input, plane_filter, point_filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/separable_deconv3x3/graph.nnef b/tests/python/frontend/nnef/cases/separable_deconv3x3/graph.nnef deleted file mode 100644 index 1d830b6bba5ea..0000000000000 --- a/tests/python/frontend/nnef/cases/separable_deconv3x3/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - plane_filter = variable(shape = [8,1,3,3], label = 'plane_filter'); - point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = separable_deconv(input, plane_filter, point_filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/separable_deconv3x3_with_attrs/graph.nnef b/tests/python/frontend/nnef/cases/separable_deconv3x3_with_attrs/graph.nnef deleted file mode 100644 index 331f733d31957..0000000000000 --- a/tests/python/frontend/nnef/cases/separable_deconv3x3_with_attrs/graph.nnef +++ /dev/null @@ -1,9 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - plane_filter = variable(shape = [8,1,3,3], label = 'plane_filter'); - point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); - output = separable_deconv(input, plane_filter, point_filter, padding = [(0,1), (0,1)], stride = [2,2]); -} diff --git a/tests/python/frontend/nnef/cases/separable_deconv5x5/graph.nnef b/tests/python/frontend/nnef/cases/separable_deconv5x5/graph.nnef deleted file mode 100644 index f115a9ecc105b..0000000000000 --- a/tests/python/frontend/nnef/cases/separable_deconv5x5/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - plane_filter = variable(shape = [8,1,5,5], label = 'plane_filter'); - point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); - bias = variable(shape = [1,8], label = 'bias'); - output = separable_deconv(input, plane_filter, point_filter, bias); -} diff --git a/tests/python/frontend/nnef/cases/sigmoid/graph.nnef b/tests/python/frontend/nnef/cases/sigmoid/graph.nnef deleted file mode 100644 index 83eb12e4cdb3b..0000000000000 --- a/tests/python/frontend/nnef/cases/sigmoid/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input, filter, bias, groups = 0); - output = sigmoid(conv); -} diff --git a/tests/python/frontend/nnef/cases/sigmoid_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/sigmoid_2d_standalone/graph.nnef deleted file mode 100644 index 64ac4e44a611c..0000000000000 --- a/tests/python/frontend/nnef/cases/sigmoid_2d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = sigmoid(input); -} diff --git a/tests/python/frontend/nnef/cases/sigmoid_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/sigmoid_4d_standalone/graph.nnef deleted file mode 100644 index 80ddf8208c6a3..0000000000000 --- a/tests/python/frontend/nnef/cases/sigmoid_4d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = sigmoid(input); -} diff --git a/tests/python/frontend/nnef/cases/sign_2d/graph.nnef b/tests/python/frontend/nnef/cases/sign_2d/graph.nnef deleted file mode 100644 index 77f0bf039bdd2..0000000000000 --- a/tests/python/frontend/nnef/cases/sign_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = sign(input); -} diff --git a/tests/python/frontend/nnef/cases/sign_4d/graph.nnef b/tests/python/frontend/nnef/cases/sign_4d/graph.nnef deleted file mode 100644 index 1e0e429c4a522..0000000000000 --- a/tests/python/frontend/nnef/cases/sign_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = sign(input); -} diff --git a/tests/python/frontend/nnef/cases/silu/graph.nnef b/tests/python/frontend/nnef/cases/silu/graph.nnef deleted file mode 100644 index b3209da214c70..0000000000000 --- a/tests/python/frontend/nnef/cases/silu/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input, filter, bias, groups = 0); - output = silu(conv); -} diff --git a/tests/python/frontend/nnef/cases/silu_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/silu_2d_standalone/graph.nnef deleted file mode 100644 index c307794e1c375..0000000000000 --- a/tests/python/frontend/nnef/cases/silu_2d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16]); - output = silu(input); -} diff --git a/tests/python/frontend/nnef/cases/silu_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/silu_4d_standalone/graph.nnef deleted file mode 100644 index a36fa0e18c58a..0000000000000 --- a/tests/python/frontend/nnef/cases/silu_4d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,16,32,32]); - output = silu(input); -} diff --git a/tests/python/frontend/nnef/cases/sin_2d/graph.nnef b/tests/python/frontend/nnef/cases/sin_2d/graph.nnef deleted file mode 100644 index 3fb5738babd46..0000000000000 --- a/tests/python/frontend/nnef/cases/sin_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = sin(input); -} diff --git a/tests/python/frontend/nnef/cases/sin_4d/graph.nnef b/tests/python/frontend/nnef/cases/sin_4d/graph.nnef deleted file mode 100644 index ce3cffc0ba30a..0000000000000 --- a/tests/python/frontend/nnef/cases/sin_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = sin(input); -} diff --git a/tests/python/frontend/nnef/cases/sinh_2d/graph.nnef b/tests/python/frontend/nnef/cases/sinh_2d/graph.nnef deleted file mode 100644 index 2c00c7ab9ca5c..0000000000000 --- a/tests/python/frontend/nnef/cases/sinh_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = sinh(input); -} diff --git a/tests/python/frontend/nnef/cases/sinh_4d/graph.nnef b/tests/python/frontend/nnef/cases/sinh_4d/graph.nnef deleted file mode 100644 index a7df179fa543d..0000000000000 --- a/tests/python/frontend/nnef/cases/sinh_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = sinh(input); -} diff --git a/tests/python/frontend/nnef/cases/slice/graph.nnef b/tests/python/frontend/nnef/cases/slice/graph.nnef deleted file mode 100644 index 52f7ac48ab357..0000000000000 --- a/tests/python/frontend/nnef/cases/slice/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = slice(input, axes = [2,3], begin = [1,2], end = [-1,-2]); -} diff --git a/tests/python/frontend/nnef/cases/slice_strides/graph.nnef b/tests/python/frontend/nnef/cases/slice_strides/graph.nnef deleted file mode 100644 index 1f35e7e1758bb..0000000000000 --- a/tests/python/frontend/nnef/cases/slice_strides/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = slice(input, axes = [1,2,3], begin = [5,16,2], end = [1,4,-1], stride = [-1,-1,1]); -} diff --git a/tests/python/frontend/nnef/cases/softmax/graph.nnef b/tests/python/frontend/nnef/cases/softmax/graph.nnef deleted file mode 100644 index ab0d00b1a27ad..0000000000000 --- a/tests/python/frontend/nnef/cases/softmax/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input, filter, bias, groups = 0); - output = softmax(conv, axes = [1]); -} diff --git a/tests/python/frontend/nnef/cases/softmax_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/softmax_2d_standalone/graph.nnef deleted file mode 100644 index 76e2410a695e1..0000000000000 --- a/tests/python/frontend/nnef/cases/softmax_2d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = softmax(input); -} diff --git a/tests/python/frontend/nnef/cases/softmax_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/softmax_4d_standalone/graph.nnef deleted file mode 100644 index 0eb2191f81eba..0000000000000 --- a/tests/python/frontend/nnef/cases/softmax_4d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = softmax(input); -} diff --git a/tests/python/frontend/nnef/cases/softplus/graph.nnef b/tests/python/frontend/nnef/cases/softplus/graph.nnef deleted file mode 100644 index 9c4c1f15b7c49..0000000000000 --- a/tests/python/frontend/nnef/cases/softplus/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input, filter, bias, groups = 0); - output = softplus(conv); -} diff --git a/tests/python/frontend/nnef/cases/softplus_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/softplus_2d_standalone/graph.nnef deleted file mode 100644 index fca49a128dfde..0000000000000 --- a/tests/python/frontend/nnef/cases/softplus_2d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = softplus(input); -} diff --git a/tests/python/frontend/nnef/cases/softplus_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/softplus_4d_standalone/graph.nnef deleted file mode 100644 index 14972ff7530d2..0000000000000 --- a/tests/python/frontend/nnef/cases/softplus_4d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = softplus(input); -} diff --git a/tests/python/frontend/nnef/cases/split_channel/graph.nnef b/tests/python/frontend/nnef/cases/split_channel/graph.nnef deleted file mode 100644 index ae48d85891d7c..0000000000000 --- a/tests/python/frontend/nnef/cases/split_channel/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output1, output2 ) -{ - input = external(shape = [4,16,32,32]); - [output1, output2] = split(input, axis = 1, ratios = [1,1]); -} diff --git a/tests/python/frontend/nnef/cases/split_unbalanced/graph.nnef b/tests/python/frontend/nnef/cases/split_unbalanced/graph.nnef deleted file mode 100644 index d3dda048014c9..0000000000000 --- a/tests/python/frontend/nnef/cases/split_unbalanced/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output1, output2, output3 ) -{ - input = external(shape = [4,32,3]); - [output1, output2, output3] = split(input, axis = 1, ratios = [3,1,4]); -} diff --git a/tests/python/frontend/nnef/cases/sqr_2d/graph.nnef b/tests/python/frontend/nnef/cases/sqr_2d/graph.nnef deleted file mode 100644 index b1b3fe4848a8a..0000000000000 --- a/tests/python/frontend/nnef/cases/sqr_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = sqr(input); -} diff --git a/tests/python/frontend/nnef/cases/sqr_4d/graph.nnef b/tests/python/frontend/nnef/cases/sqr_4d/graph.nnef deleted file mode 100644 index 297c1f264e34b..0000000000000 --- a/tests/python/frontend/nnef/cases/sqr_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = sqr(input); -} diff --git a/tests/python/frontend/nnef/cases/sqrt_2d/graph.nnef b/tests/python/frontend/nnef/cases/sqrt_2d/graph.nnef deleted file mode 100644 index 5c00df4616862..0000000000000 --- a/tests/python/frontend/nnef/cases/sqrt_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = sqrt(input); -} diff --git a/tests/python/frontend/nnef/cases/sqrt_4d/graph.nnef b/tests/python/frontend/nnef/cases/sqrt_4d/graph.nnef deleted file mode 100644 index 03d5845d43dc0..0000000000000 --- a/tests/python/frontend/nnef/cases/sqrt_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = sqrt(input); -} diff --git a/tests/python/frontend/nnef/cases/squeeze_spatial/graph.nnef b/tests/python/frontend/nnef/cases/squeeze_spatial/graph.nnef deleted file mode 100644 index da182b5fb217b..0000000000000 --- a/tests/python/frontend/nnef/cases/squeeze_spatial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,1,1]); - output = squeeze(input, axes = [2,3]); -} diff --git a/tests/python/frontend/nnef/cases/stack/graph.nnef b/tests/python/frontend/nnef/cases/stack/graph.nnef deleted file mode 100644 index aaf3e0c3b92ec..0000000000000 --- a/tests/python/frontend/nnef/cases/stack/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = stack([input1, input2], axis = 1); -} diff --git a/tests/python/frontend/nnef/cases/sub_2d/graph.nnef b/tests/python/frontend/nnef/cases/sub_2d/graph.nnef deleted file mode 100644 index b3c33a2cf8826..0000000000000 --- a/tests/python/frontend/nnef/cases/sub_2d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16]); - input2 = external(shape = [4,16]); - output = sub(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/sub_4d/graph.nnef b/tests/python/frontend/nnef/cases/sub_4d/graph.nnef deleted file mode 100644 index ff8a068e4f27b..0000000000000 --- a/tests/python/frontend/nnef/cases/sub_4d/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [4,16,32,32]); - output = sub(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/sub_4d_broadcast/graph.nnef b/tests/python/frontend/nnef/cases/sub_4d_broadcast/graph.nnef deleted file mode 100644 index 1ffec0c486acc..0000000000000 --- a/tests/python/frontend/nnef/cases/sub_4d_broadcast/graph.nnef +++ /dev/null @@ -1,8 +0,0 @@ -version 1.0; - -graph G( input1, input2 ) -> ( output ) -{ - input1 = external(shape = [4,16,32,32]); - input2 = external(shape = [1,16,1,1]); - output = sub(input1, input2); -} diff --git a/tests/python/frontend/nnef/cases/sub_4d_constant/graph.nnef b/tests/python/frontend/nnef/cases/sub_4d_constant/graph.nnef deleted file mode 100644 index c9c6abf4951ea..0000000000000 --- a/tests/python/frontend/nnef/cases/sub_4d_constant/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = sub(input, 0.5); -} diff --git a/tests/python/frontend/nnef/cases/sum_reduce_channel/graph.nnef b/tests/python/frontend/nnef/cases/sum_reduce_channel/graph.nnef deleted file mode 100644 index ba9154a2e7157..0000000000000 --- a/tests/python/frontend/nnef/cases/sum_reduce_channel/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = sum_reduce(input, axes = [1]); -} diff --git a/tests/python/frontend/nnef/cases/sum_reduce_spatial/graph.nnef b/tests/python/frontend/nnef/cases/sum_reduce_spatial/graph.nnef deleted file mode 100644 index b46afa6237541..0000000000000 --- a/tests/python/frontend/nnef/cases/sum_reduce_spatial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = sum_reduce(input, axes = [2,3]); -} diff --git a/tests/python/frontend/nnef/cases/tan_2d/graph.nnef b/tests/python/frontend/nnef/cases/tan_2d/graph.nnef deleted file mode 100644 index af203dcb8a4df..0000000000000 --- a/tests/python/frontend/nnef/cases/tan_2d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = tan(input); -} diff --git a/tests/python/frontend/nnef/cases/tan_4d/graph.nnef b/tests/python/frontend/nnef/cases/tan_4d/graph.nnef deleted file mode 100644 index 6b039dd270baa..0000000000000 --- a/tests/python/frontend/nnef/cases/tan_4d/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = tan(input); -} diff --git a/tests/python/frontend/nnef/cases/tanh/graph.nnef b/tests/python/frontend/nnef/cases/tanh/graph.nnef deleted file mode 100644 index 1d39aec99c8c9..0000000000000 --- a/tests/python/frontend/nnef/cases/tanh/graph.nnef +++ /dev/null @@ -1,10 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - filter = constant(shape = [16,1,1,1], value = [1.0]); - bias = constant(shape = [1,16], value = [0.0]); - conv = conv(input, filter, bias, groups = 0); - output = tanh(conv); -} diff --git a/tests/python/frontend/nnef/cases/tanh_2d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/tanh_2d_standalone/graph.nnef deleted file mode 100644 index a5dae283dfadc..0000000000000 --- a/tests/python/frontend/nnef/cases/tanh_2d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = tanh(input); -} diff --git a/tests/python/frontend/nnef/cases/tanh_4d_standalone/graph.nnef b/tests/python/frontend/nnef/cases/tanh_4d_standalone/graph.nnef deleted file mode 100644 index 7c9ee3a6c14a8..0000000000000 --- a/tests/python/frontend/nnef/cases/tanh_4d_standalone/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = tanh(input); -} diff --git a/tests/python/frontend/nnef/cases/tile_batch/graph.nnef b/tests/python/frontend/nnef/cases/tile_batch/graph.nnef deleted file mode 100644 index 853f7789e5002..0000000000000 --- a/tests/python/frontend/nnef/cases/tile_batch/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [1,16]); - output = tile(input, repeats = [16,1]); -} diff --git a/tests/python/frontend/nnef/cases/tile_channel/graph.nnef b/tests/python/frontend/nnef/cases/tile_channel/graph.nnef deleted file mode 100644 index bddc2f13ad5fd..0000000000000 --- a/tests/python/frontend/nnef/cases/tile_channel/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [16,1]); - output = tile(input, repeats = [1,16]); -} diff --git a/tests/python/frontend/nnef/cases/tile_spatial/graph.nnef b/tests/python/frontend/nnef/cases/tile_spatial/graph.nnef deleted file mode 100644 index 6f44e98470835..0000000000000 --- a/tests/python/frontend/nnef/cases/tile_spatial/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = tile(input, repeats = [1,1,3,3]); -} diff --git a/tests/python/frontend/nnef/cases/transpose_nchw_to_nhwc/graph.nnef b/tests/python/frontend/nnef/cases/transpose_nchw_to_nhwc/graph.nnef deleted file mode 100644 index 7e6dbd6a7668b..0000000000000 --- a/tests/python/frontend/nnef/cases/transpose_nchw_to_nhwc/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16,32,32]); - output = transpose(input, axes = [0,2,3,1]); -} diff --git a/tests/python/frontend/nnef/cases/transpose_nhwc_to_nchw/graph.nnef b/tests/python/frontend/nnef/cases/transpose_nhwc_to_nchw/graph.nnef deleted file mode 100644 index 0e6f5172989a0..0000000000000 --- a/tests/python/frontend/nnef/cases/transpose_nhwc_to_nchw/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,32,32,16]); - output = transpose(input, axes = [0,3,1,2]); -} diff --git a/tests/python/frontend/nnef/cases/unsqueeze/graph.nnef b/tests/python/frontend/nnef/cases/unsqueeze/graph.nnef deleted file mode 100644 index ede2811723f88..0000000000000 --- a/tests/python/frontend/nnef/cases/unsqueeze/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output ) -{ - input = external(shape = [4,16]); - output = unsqueeze(input, axes = [2,3]); -} diff --git a/tests/python/frontend/nnef/cases/unstack/graph.nnef b/tests/python/frontend/nnef/cases/unstack/graph.nnef deleted file mode 100644 index 1c37b792c4c66..0000000000000 --- a/tests/python/frontend/nnef/cases/unstack/graph.nnef +++ /dev/null @@ -1,7 +0,0 @@ -version 1.0; - -graph G( input ) -> ( output1, output2, output3 ) -{ - input = external(shape = [4,3,16]); - [output1, output2, output3] = unstack(input, axis = 1); -} diff --git a/tests/python/nightly/frontend/nnef/cases_string.py b/tests/python/nightly/frontend/nnef/cases_string.py new file mode 100644 index 0000000000000..71f61251c2f0c --- /dev/null +++ b/tests/python/nightly/frontend/nnef/cases_string.py @@ -0,0 +1,3218 @@ +""" +NNEF frontend graph definitions for test cases +""" + + +# pylint: disable=line-too-long + +gt_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = gt(input1, input2); +} +""" + +max_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = max(input1, input2); +} +""" + +local_contrast_normalization = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = local_contrast_normalization(input, size = [1, 1, 3, 3], bias = 1.0, epsilon = 1e-5); +} +""" + +mean_reduce_spatial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = mean_reduce(input, axes = [2,3]); +} +""" + +select_4d = """ +version 1.0; + +graph G( cond, input1, input2 ) -> ( output ) +{ + cond = external(shape = [4,16,32,32]); + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = select(cond, input1, input2); +} +""" + +max_pool3x3_pad1_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,0), (1,0)], border = 'ignore'); +} +""" + +relu = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input, filter, bias, groups = 0); + output = relu(conv); +} +""" + +atanh_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = atanh(input); +} +""" + +split_channel = """ +version 1.0; + +graph G( input ) -> ( output1, output2 ) +{ + input = external(shape = [4,16,32,32]); + [output1, output2] = split(input, axis = 1, ratios = [1,1]); +} +""" + +rcp_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = rcp(input); +} +""" + +max_pool2x2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_pool(input, size = [1,1,2,2], stride = [1,1,2,2], border = 'ignore'); +} +""" + +silu_4d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16,32,32]); + output = silu(input); +} +""" + +avg_pool2x2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = avg_pool(input, size = [1,1,2,2], stride = [1,1,2,2], border = 'constant'); +} +""" + +separable_deconv5x5 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + plane_filter = variable(shape = [8,1,5,5], label = 'plane_filter'); + point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = separable_deconv(input, plane_filter, point_filter, bias); +} +""" + +slice_strides = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = slice(input, axes = [1,2,3], begin = [5,16,2], end = [1,4,-1], stride = [-1,-1,1]); +} +""" + +matmul_4d_transpose = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = matmul(input1, input2, transposeA = true, transposeB = false); +} +""" + +rcp_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = rcp(input); +} +""" + +log2_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = log2(input); +} +""" + +conv3x3_stride2x2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, stride = [2,2]); +} +""" + +lt_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = lt(input, 0.5); +} +""" + +or_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = or(input1, input2); +} +""" + +tan_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = tan(input); +} +""" + +deconv7x7 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,7,7], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias); +} +""" + +acos_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = acos(input); +} +""" + +nearest_upsample = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = nearest_upsample(input, factor = [2,2]); +} +""" + +ceil_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = ceil(input); +} +""" + +floor_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = floor(input); +} +""" + +avg_pool1x1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = avg_pool(input, size = [1,1,1,1], stride = [1,1,2,2], border = 'constant'); +} +""" + +log_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = log(input); +} +""" + +sum_reduce_channel = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = sum_reduce(input, axes = [1]); +} +""" + +min_reduce_spatial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = min_reduce(input, axes = [2,3]); +} +""" + +asinh_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = asinh(input); +} +""" + +max_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = max(input1, input2); +} +""" + +max_pool3x3_pad0_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,1), (0,1)], border = 'ignore'); +} +""" + +cos_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = cos(input); +} +""" + +not_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = not(input); +} +""" + +sub_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = sub(input1, input2); +} +""" + +bilinear_upsample_aligned_replicate = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = multilinear_upsample(input, factor = [2,2], method = 'aligned', border = 'replicate'); +} +""" + +log_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = log(input); +} +""" + +argmin_reduce_spatial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = argmin_reduce(input, axes = [2,3]); +} +""" + +selu_4d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16,32,32]); + output = selu(input); +} +""" + +select_2d = """ +version 1.0; + +graph G( cond, input1, input2 ) -> ( output ) +{ + cond = external(shape = [4,16]); + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = select(cond, input1, input2); +} +""" + +prelu = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [16,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input1, filter, bias, groups = 0); + input2 = external(shape = [16]); + output = prelu(conv, input2); +} +""" + +ne_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = ne(input1, input2); +} +""" + +or_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = or(input1, input2); +} +""" + +eq_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = eq(input1, input2); +} +""" + +rsqr_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = rsqr(input); +} +""" + +eq_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = eq(input1, input2); +} +""" + +deconv7x7_stride4x4 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,7,7], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias, stride = [4,4]); +} +""" + +max_pool3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'ignore'); +} +""" + +and_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = and(input1, input2); +} +""" + +atan_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = atan(input); +} +""" + +pad_0_1_reflect = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [1,16,32,32]); + output = pad(input, padding = [(0,0), (0,0), (0,1), (0,1)], border = 'reflect'); +} +""" + +mul_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = mul(input1, input2); +} +""" + +softmax = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input, filter, bias, groups = 0); + output = softmax(conv, axes = [1]); +} +""" + +sign_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = sign(input); +} +""" + +mul_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = mul(input, 0.5); +} +""" + +le_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = le(input, 0.5); +} +""" + +box2x2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = box(input, size = [1,1,2,2], stride = [1,1,2,2], border = 'constant'); +} +""" + +or_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = or(input1, input2); +} +""" + +deconv5x5 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,5,5], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias); +} +""" + +box3x3_pad1_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = box(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,0), (1,0)], border = 'constant'); +} +""" + +debox3x3_pad1_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = debox(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,0), (1,0)], border = 'constant'); +} +""" + +ge_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = ge(input1, input2); +} +""" + +linear_reshape = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,8,8]); + weights = variable(shape = [32,1024], label = 'weights'); + bias = variable(shape = [1,32], label = 'bias'); + flattened = reshape(input, shape = [0,-1]); + output = linear(flattened, weights, bias); +} +""" + +le_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = le(input1, input2); +} +""" + +deconv3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias); +} +""" + +nearest_downsample = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = nearest_downsample(input, factor = [2,2]); +} +""" + +select_4d_true = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = select(true, input1, input2); +} +""" + +min_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = min(input1, input2); +} +""" + +max_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = max(input1, input2); +} +""" + +max_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max(input, 0.5); +} +""" + +sum_reduce_spatial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = sum_reduce(input, axes = [2,3]); +} +""" + +min_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = min(input1, input2); +} +""" + +ge_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = ge(input1, input2); +} +""" + +conv2x2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,2,2], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias); +} +""" + +conv4x4_stride2x2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,4,4], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, stride = [2,2]); +} +""" + +debox1x1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = debox(input, size = [1,1,1,1], stride = [1,1,2,2], padding = [(0,0),(0,0),(0,-1),(0,-1)], border = 'constant'); +} +""" + +reshape_flatten = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = reshape(input, shape = [0,-1]); +} +""" + +conv3x3_nobias = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + output = conv(input, filter, 0.0); +} +""" + +sinh_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = sinh(input); +} +""" + +selu = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input, filter, bias, groups = 0); + output = selu(conv); +} +""" + +prelu_4d_standalone = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [16,16,32,32]); + input2 = external(shape = [16]); + output = prelu(input1, input2); +} +""" + +tile_spatial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = tile(input, repeats = [1,1,3,3]); +} +""" + +softmax_4d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = softmax(input); +} +""" + +rsqrt_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = rsqrt(input); +} +""" + +concat_channel = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = concat([input1, input2], axis = 1); +} +""" + +area_downsample = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = area_downsample(input, factor = [2,2]); +} +""" + +elu = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input, filter, bias, groups = 0); + output = elu(conv); +} +""" + +max_pool3x3_pad1_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,1), (1,1)], border = 'ignore'); +} +""" + +sigmoid_2d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = sigmoid(input); +} +""" + +ne_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = ne(input, 0.5); +} +""" + +conv3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias); +} +""" + +all_reduce_channel = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = all_reduce(input, axes = [1]); +} +""" + +squeeze_spatial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,1,1]); + output = squeeze(input, axes = [2,3]); +} +""" + +and_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = and(input, false); +} +""" + +l1_normalization = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = l1_normalization(input, axes = [1], bias = 1.0, epsilon = 1e-5); +} +""" + +max_pool3x3_constant_border = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'constant'); +} +""" + +argmax_reduce_spatial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = argmax_reduce(input, axes = [2,3]); +} +""" + +cos_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = cos(input); +} +""" + +sqr_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = sqr(input); +} +""" + +rsqrt_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = rsqrt(input); +} +""" + +acos_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = acos(input); +} +""" + +bilinear_upsample_symmetric_replicate = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = multilinear_upsample(input, factor = [2,2], method = 'symmetric', border = 'replicate'); +} +""" + +asinh_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = asinh(input); +} +""" + +tile_channel = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,1]); + output = tile(input, repeats = [1,16]); +} +""" + +cosh_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = cosh(input); +} +""" + +div_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = div(input1, input2); +} +""" + +sqrt_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = sqrt(input); +} +""" + +and_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = and(input1, input2); +} +""" + +transpose_nhwc_to_nchw = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,32,32,16]); + output = transpose(input, axes = [0,3,1,2]); +} +""" + +avg_pool3x3_pad0_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,1), (0,1)], border = 'constant'); +} +""" + +round_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = round(input); +} +""" + +box3x3_pad0_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = box(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,1), (0,1)], border = 'ignore'); +} +""" + +deconv6x6 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,6,6], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias); +} +""" + +atan_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = atan(input); +} +""" + +add_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = add(input, 0.5); +} +""" + +lt_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = lt(input1, input2); +} +""" + +min_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = min(input1, input2); +} +""" + +box3x3_stride1x1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = box(input, size = [1,1,3,3], stride = [1,1,1,1], border = 'constant'); +} +""" + +linear_nobias = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + weights = variable(shape = [32,16], label = 'weights'); + output = linear(input, weights, 0.0); +} +""" + +div_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = div(input1, input2); +} +""" + +avg_pool3x3_stride1x1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = avg_pool(input, size = [1,1,3,3], stride = [1,1,1,1], border = 'constant'); +} +""" + +conv7x7 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,7,7], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias); +} +""" + +conv3x3_groups0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,1,3,3], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, groups = 0); +} +""" + +mul_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = mul(input1, input2); +} +""" + +deconv3x3_pad1_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias, padding = [(1,0), (1,0)]); +} +""" + +ne_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = ne(input1, input2); +} +""" + +tan_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = tan(input); +} +""" + +avg_pool3x3_pad1_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,1), (1,1)], border = 'constant'); +} +""" + +mean_reduce_channel = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = mean_reduce(input, axes = [1]); +} +""" + +softplus_2d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = softplus(input); +} +""" + +conv5x5 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,5,5], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias); +} +""" + +max_pool3x3_stride1x1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_pool(input, size = [1,1,3,3], stride = [1,1,1,1], border = 'ignore'); +} +""" + +pad_1_0_reflect = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [1,16,32,32]); + output = pad(input, padding = [(0,0), (0,0), (1,0), (1,0)], border = 'reflect'); +} +""" + +pad_1_0_replicate = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [1,16,32,32]); + output = pad(input, padding = [(0,0), (0,0), (1,0), (1,0)], border = 'replicate'); +} +""" + +separable_conv5x5 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + plane_filter = variable(shape = [8,1,5,5], label = 'plane_filter'); + point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = separable_conv(input, plane_filter, point_filter, bias); +} +""" + +debox3x3_pad1_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = debox(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,1), (1,1)], border = 'constant'); +} +""" + +avg_pool3x3_pad1_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,0), (1,0)], border = 'constant'); +} +""" + +bilinear_upsample_symmetric_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = multilinear_upsample(input, factor = [2,2], method = 'symmetric', border = 'constant'); +} +""" + +gt_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = gt(input1, input2); +} +""" + +tanh_4d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = tanh(input); +} +""" + +acosh_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = acosh(input); +} +""" + +asin_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = asin(input); +} +""" + +add_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = add(input1, input2); +} +""" + +rsqr_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = rsqr(input); +} +""" + +div_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = div(input1, input2); +} +""" + +eq_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = eq(input1, input2); +} +""" + +conv3x3_valid = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, padding = [(0,0), (0,0)]); +} +""" + +min_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = min(input, 0.5); +} +""" + +separable_deconv3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + plane_filter = variable(shape = [8,1,3,3], label = 'plane_filter'); + point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = separable_deconv(input, plane_filter, point_filter, bias); +} +""" + +asin_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = asin(input); +} +""" + +or_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = or(input, false); +} +""" + +min_reduce_channel = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = min_reduce(input, axes = [1]); +} +""" + +silu = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input, filter, bias, groups = 0); + output = silu(conv); +} +""" + +max_reduce_spatial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_reduce(input, axes = [2,3]); +} +""" + +bilinear_upsample_asymmetric_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = multilinear_upsample(input, factor = [2,2], method = 'asymmetric', border = 'constant'); +} +""" + +gelu = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input, filter, bias, groups = 0); + output = gelu(conv); +} +""" + +clamp_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = clamp(input, 0.25, 0.75); +} +""" + +conv3x3_pad0_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, padding = [(0,0), (0,0)]); +} +""" + +conv3x3_pad1_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, padding = [(1,0), (1,0)]); +} +""" + +abs_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = abs(input); +} +""" + +max_reduce_channel = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_reduce(input, axes = [1]); +} +""" + +ge_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = ge(input, 0.5); +} +""" + +pad_1_1_reflect = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [1,16,32,32]); + output = pad(input, padding = [(0,0), (0,0), (1,1), (1,1)], border = 'reflect'); +} +""" + +elu_4d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16,32,32]); + output = elu(input); +} +""" + +cosh_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = cosh(input); +} +""" + +transpose_nchw_to_nhwc = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = transpose(input, axes = [0,2,3,1]); +} +""" + +deconv3x3_pad1_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias, padding = [(1,1), (1,1)]); +} +""" + +ne_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = ne(input1, input2); +} +""" + +sqr_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = sqr(input); +} +""" + +conv3x3_pad1_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, padding = [(1,1), (1,1)]); +} +""" + +clamp_4d = """ +version 1.0; + +graph G( input1, input2, input3 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + input3 = external(shape = [4,16,32,32]); + output = clamp(input1, input2, input3); +} +""" + +bilinear_upsample_aligned_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = multilinear_upsample(input, factor = [2,2], method = 'aligned', border = 'constant'); +} +""" + +stack = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = stack([input1, input2], axis = 1); +} +""" + +log2_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = log2(input); +} +""" + +slice = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = slice(input, axes = [2,3], begin = [1,2], end = [-1,-2]); +} +""" + +deconv2x2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,2,2], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias); +} +""" + +all_reduce_spatial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = all_reduce(input, axes = [2,3]); +} +""" + +unstack = """ +version 1.0; + +graph G( input ) -> ( output1, output2, output3 ) +{ + input = external(shape = [4,3,16]); + [output1, output2, output3] = unstack(input, axis = 1); +} +""" + +sqrt_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = sqrt(input); +} +""" + +l2_normalization = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = l2_normalization(input, axes = [1], epsilon = 1e-3); +} +""" + +conv7x7_stride4x4 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,7,7], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, stride = [4,4]); +} +""" + +ge_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = ge(input1, input2); +} +""" + +any_reduce_channel = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = any_reduce(input, axes = [1]); +} +""" + +leaky_relu = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input, filter, bias, groups = 0); + output = leaky_relu(conv, alpha = 0.5); +} +""" + +and_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = and(input1, input2); +} +""" + +sinh_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = sinh(input); +} +""" + +add_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = add(input1, input2); +} +""" + +copy_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = copy(input); +} +""" + +separable_conv3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + plane_filter = variable(shape = [8,1,3,3], label = 'plane_filter'); + point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = separable_conv(input, plane_filter, point_filter, bias); +} +""" + +ceil_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = ceil(input); +} +""" + +linear_squeeze = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,1,1]); + weights = variable(shape = [32,16], label = 'weights'); + bias = variable(shape = [1,32], label = 'bias'); + squeezed = squeeze(input, axes = [2,3]); + output = linear(squeezed, weights, bias); +} +""" + +acosh_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = acosh(input); +} +""" + +sub_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = sub(input1, input2); +} +""" + +deconv3x3_valid = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias, padding = [(0,0), (0,0)]); +} +""" + +pow_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = pow(input1, input2); +} +""" + +pad_1_1_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [1,16,32,32]); + output = pad(input, padding = [(0,0), (0,0), (1,1), (1,1)], border = 'constant'); +} +""" + +clamp_2d = """ +version 1.0; + +graph G( input1, input2, input3 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + input3 = external(shape = [4,16]); + output = clamp(input1, input2, input3); +} +""" + +debox3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = debox(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'constant'); +} +""" + +conv1x1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,1,1], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias); +} +""" + +exp_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = exp(input); +} +""" + +avg_pool3x3_ignore_border = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'ignore'); +} +""" + +deconv3x3_pad0_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias, padding = [(0,0), (0,0)]); +} +""" + +leaky_relu_4d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16,32,32]); + output = leaky_relu(input, alpha = 0.5); +} +""" + +pow_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = pow(input1, input2); +} +""" + +abs_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = abs(input); +} +""" + +sin_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = sin(input); +} +""" + +select_2d_true = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = select(true, input1, input2); +} +""" + +relu_2d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = relu(input); +} +""" + +reshape_squeeze = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,1,1]); + output = reshape(input, shape = [4,16]); +} +""" + +selu_2d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16]); + output = selu(input); +} +""" + +sub_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = sub(input, 0.5); +} +""" + +linear = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + weights = variable(shape = [32,16], label = 'weights'); + bias = variable(shape = [1,32], label = 'bias'); + output = linear(input, weights, bias); +} +""" + +atanh_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = atanh(input); +} +""" + +pow_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = pow(input1, input2); +} +""" + +rms_pool3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = rms_pool(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'constant'); +} +""" + +debox3x3_pad0_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = debox(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,1), (0,1)], border = 'ignore'); +} +""" + +floor_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = floor(input); +} +""" + +deconv3x3_nobias = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + output = deconv(input, filter, 0.0); +} +""" + +batch_norm = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + mean = variable(shape = [1,16], label = 'mean'); + variance = variable(shape = [1,16], label = 'variance'); + offset = variable(shape = [1,16], label = 'offset'); + scale = variable(shape = [1,16], label = 'scale'); + output = batch_normalization(input, mean, variance, offset, scale, epsilon = 1e-3); +} +""" + +deconv3x3_stride2x2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias, stride = [2,2]); +} +""" + +debox2x2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = debox(input, size = [1,1,2,2], stride = [1,1,2,2], border = 'constant'); +} +""" + +gelu_2d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16]); + output = gelu(input); +} +""" + +pad_0_1_replicate = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [1,16,32,32]); + output = pad(input, padding = [(0,0), (0,0), (0,1), (0,1)], border = 'replicate'); +} +""" + +mul_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = mul(input1, input2); +} +""" + +local_mean_normalization = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = local_mean_normalization(input, size = [1, 1, 3, 3]); +} +""" + +debox3x3_pad0_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = debox(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,0), (0,0)], border = 'constant'); +} +""" + +reshape_partial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [2,3,3,3,2]); + output = reshape(input, shape = [0,-1], axis_start = 1, axis_count = 3); +} +""" + +argmin_reduce_channel = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = argmin_reduce(input, axes = [1]); +} +""" + +softplus = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input, filter, bias, groups = 0); + output = softplus(conv); +} +""" + +copy_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = copy(input); +} +""" + +local_variance_normalization = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = local_variance_normalization(input, size = [1, 1, 3, 3], bias = 1.0, epsilon = 1e-5); +} +""" + +not_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = not(input); +} +""" + +sigmoid_4d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = sigmoid(input); +} +""" + +local_response_normalization = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = local_response_normalization(input, alpha = 1e-05, beta = 0.75, bias = 1.0, size = [1, 5, 1, 1]); +} +""" + +gelu_4d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16,32,32]); + output = gelu(input); +} +""" + +separable_conv3x3_with_attrs = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + plane_filter = variable(shape = [8,1,3,3], label = 'plane_filter'); + point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); + output = separable_conv(input, plane_filter, point_filter, padding = [(0,1), (0,1)], stride = [2,2]); +} +""" + +exp_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = exp(input); +} +""" + +lt_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = lt(input1, input2); +} +""" + +conv4x4 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,4,4], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias); +} +""" + +avg_pool3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'constant'); +} +""" + +avg_pool3x3_pad0_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = avg_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,0), (0,0)], border = 'constant'); +} +""" + +conv3x3_pad0_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, padding = [(0,1), (0,1)]); +} +""" + +pad_0_1_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [1,16,32,32]); + output = pad(input, padding = [(0,0), (0,0), (0,1), (0,1)], border = 'constant'); +} +""" + +deconv4x4 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,4,4], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias); +} +""" + +neg_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = neg(input); +} +""" + +bilinear_upsample_asymmetric_replicate = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = multilinear_upsample(input, factor = [2,2], method = 'asymmetric', border = 'replicate'); +} +""" + +conv5x5_stride3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,5,5], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, stride = [3,3]); +} +""" + +relu_4d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = relu(input); +} +""" + +max_pool1x1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_pool(input, size = [1,1,1,1], stride = [1,1,2,2], border = 'ignore'); +} +""" + +deconv5x5_pad2_2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,5,5], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias, padding = [(2,2), (2,2)]); +} +""" + +tile_batch = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [1,16]); + output = tile(input, repeats = [16,1]); +} +""" + +eq_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = eq(input, 0.5); +} +""" + +elu_2d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16]); + output = elu(input); +} +""" + +lt_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = lt(input1, input2); +} +""" + +deconv1x1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,1,1], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias); +} +""" + +sign_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = sign(input); +} +""" + +leaky_relu_2d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16]); + output = leaky_relu(input, alpha = 0.5); +} +""" + +select_2d_false = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = select(false, input1, input2); +} +""" + +div_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = div(input, 0.5); +} +""" + +softplus_4d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = softplus(input); +} +""" + +pow_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = pow(input, 0.5); +} +""" + +round_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = round(input); +} +""" + +debox3x3_stride1x1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = debox(input, size = [1,1,3,3], stride = [1,1,1,1], border = 'constant'); +} +""" + +separable_deconv3x3_with_attrs = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + plane_filter = variable(shape = [8,1,3,3], label = 'plane_filter'); + point_filter = variable(shape = [16,8,1,1], label = 'point_filter'); + output = separable_deconv(input, plane_filter, point_filter, padding = [(0,1), (0,1)], stride = [2,2]); +} +""" + +matmul_2d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [16,4]); + output = matmul(input1, input2); +} +""" + +deconv5x5_stride3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,5,5], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias, stride = [3,3]); +} +""" + +sub_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = sub(input1, input2); +} +""" + +matmul_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = matmul(input1, input2); +} +""" + +any_reduce_spatial = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = any_reduce(input, axes = [2,3]); +} +""" + +gt_4d_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = gt(input, 0.5); +} +""" + +conv6x6 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,6,6], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias); +} +""" + +le_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = le(input1, input2); +} +""" + +gt_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = gt(input1, input2); +} +""" + +deconv4x4_stride2x2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,4,4], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias, stride = [2,2]); +} +""" + +le_4d_broadcast = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [1,16,1,1]); + output = le(input1, input2); +} +""" + +tanh_2d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = tanh(input); +} +""" + +split_unbalanced = """ +version 1.0; + +graph G( input ) -> ( output1, output2, output3 ) +{ + input = external(shape = [4,32,3]); + [output1, output2, output3] = split(input, axis = 1, ratios = [3,1,4]); +} +""" + +box3x3 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = box(input, size = [1,1,3,3], stride = [1,1,2,2], border = 'constant'); +} +""" + +select_4d_false = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = select(false, input1, input2); +} +""" + +tanh = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input, filter, bias, groups = 0); + output = tanh(conv); +} +""" + +sin_2d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = sin(input); +} +""" + +box3x3_pad0_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = box(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,0), (0,0)], border = 'constant'); +} +""" + +box1x1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = box(input, size = [1,1,1,1], stride = [1,1,2,2], border = 'constant'); +} +""" + +box3x3_pad1_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = box(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (1,1), (1,1)], border = 'constant'); +} +""" + +conv5x5_pad2_2 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,8,32,32]); + filter = variable(shape = [16,8,5,5], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = conv(input, filter, bias, padding = [(2,2), (2,2)]); +} +""" + +prelu_2d_standalone = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [16,16]); + input2 = external(shape = [16]); + output = prelu(input1, input2); +} +""" + +max_pool3x3_pad0_0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = max_pool(input, size = [1,1,3,3], stride = [1,1,2,2], padding = [(0,0), (0,0), (0,0), (0,0)], border = 'ignore'); +} +""" + +softmax_2d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = softmax(input); +} +""" + +matmul_2d_transpose = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16]); + input2 = external(shape = [4,16]); + output = matmul(input1, input2, transposeA = true, transposeB = false); +} +""" + +silu_2d_standalone = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [16,16]); + output = silu(input); +} +""" + +deconv3x3_groups0 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,1,3,3], label = 'filter'); + bias = variable(shape = [1,16], label = 'bias'); + output = deconv(input, filter, bias, groups = 0); +} +""" + +deconv3x3_pad0_1 = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = variable(shape = [16,8,3,3], label = 'filter'); + bias = variable(shape = [1,8], label = 'bias'); + output = deconv(input, filter, bias, padding = [(0,1), (0,1)]); +} +""" + +sigmoid = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + filter = constant(shape = [16,1,1,1], value = [1.0]); + bias = constant(shape = [1,16], value = [0.0]); + conv = conv(input, filter, bias, groups = 0); + output = sigmoid(conv); +} +""" + +argmax_reduce_channel = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = argmax_reduce(input, axes = [1]); +} +""" + +pad_1_1_replicate = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [1,16,32,32]); + output = pad(input, padding = [(0,0), (0,0), (1,1), (1,1)], border = 'replicate'); +} +""" + +pad_1_0_constant = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [1,16,32,32]); + output = pad(input, padding = [(0,0), (0,0), (1,0), (1,0)], border = 'constant'); +} +""" + +unsqueeze = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16]); + output = unsqueeze(input, axes = [2,3]); +} +""" + +neg_4d = """ +version 1.0; + +graph G( input ) -> ( output ) +{ + input = external(shape = [4,16,32,32]); + output = neg(input); +} +""" + +add_4d = """ +version 1.0; + +graph G( input1, input2 ) -> ( output ) +{ + input1 = external(shape = [4,16,32,32]); + input2 = external(shape = [4,16,32,32]); + output = add(input1, input2); +} +""" diff --git a/tests/python/frontend/nnef/test_forward.py b/tests/python/nightly/frontend/nnef/test_forward.py similarity index 98% rename from tests/python/frontend/nnef/test_forward.py rename to tests/python/nightly/frontend/nnef/test_forward.py index 1e6caceabb470..c07ed20039875 100644 --- a/tests/python/frontend/nnef/test_forward.py +++ b/tests/python/nightly/frontend/nnef/test_forward.py @@ -15,8 +15,6 @@ # specific language governing permissions and limitations # under the License. -import os - import numpy as np import _nnef @@ -27,8 +25,12 @@ import tvm.testing from tvm import relay +import cases_string -graphs_dir = os.path.join("tests", "python", "frontend", "nnef", "cases") +def get_case_graph(name): + if '-' in name: + name = name.replace('-', '_') + return nnef.parse_string(getattr(cases_string, name)) def get_nnef_outputs(path, inputs): @@ -49,14 +51,13 @@ def get_type(val): def verify_model( - model_path, - target, - device, - rtol=1e-5, - atol=1e-5, + model_path, + target, + device, + rtol=1e-5, + atol=1e-5, ): - path = os.path.join(graphs_dir, model_path) - graph = nnef.load_graph(path, load_variables=False) + graph = get_case_graph(model_path) nnef.infer_shapes(graph) inputs = {} # generate inputs @@ -88,7 +89,7 @@ def verify_model( shape = operation.attribs["shape"] assert ( - operation.dtype == "scalar" + operation.dtype == "scalar" ), f"variable of type {operation.dtype} is not supported, please update verify_model" data = np.random.uniform(low=-1.0, size=shape).astype("float32") @@ -724,7 +725,7 @@ def test_cts_le_2d(target, dev): @tvm.testing.parametrize_targets def test_cts_deconv3x3(target, dev): - verify_model("deconv3x3", target, dev, rtol=1e-5, atol=5e-3) + verify_model("deconv3x3", target, dev, rtol=1e-5, atol=1e-2) @tvm.testing.parametrize_targets @@ -774,7 +775,7 @@ def test_cts_conv2x2(target, dev): @tvm.testing.parametrize_targets def test_cts_conv4x4_stride2x2(target, dev): - verify_model("conv4x4_stride2x2", target, dev, rtol=1e-5, atol=5e-3) + verify_model("conv4x4_stride2x2", target, dev, rtol=1e-5, atol=1e-2) @tvm.testing.parametrize_targets @@ -979,7 +980,7 @@ def test_cts_mul_2d(target, dev): @tvm.testing.parametrize_targets def test_cts_deconv3x3_pad1_0(target, dev): - verify_model("deconv3x3_pad1-0", target, dev, rtol=1e-5, atol=5e-3) + verify_model("deconv3x3_pad1-0", target, dev, rtol=1e-5, atol=1e-2) @tvm.testing.parametrize_targets @@ -1154,7 +1155,7 @@ def test_cts_slice(target, dev): @tvm.testing.parametrize_targets def test_cts_deconv2x2(target, dev): - verify_model("deconv2x2", target, dev, rtol=1e-5, atol=5e-3) + verify_model("deconv2x2", target, dev, rtol=1e-5, atol=1e-2) @tvm.testing.parametrize_targets @@ -1214,7 +1215,7 @@ def test_cts_sub_2d(target, dev): @tvm.testing.parametrize_targets def test_cts_deconv3x3_valid(target, dev): - verify_model("deconv3x3_valid", target, dev, rtol=1e-5, atol=5e-3) + verify_model("deconv3x3_valid", target, dev, rtol=1e-5, atol=1e-2) @tvm.testing.parametrize_targets @@ -1249,7 +1250,7 @@ def test_cts_avg_pool3x3_ignore_border(target, dev): @tvm.testing.parametrize_targets def test_cts_deconv3x3_pad0_0(target, dev): - verify_model("deconv3x3_pad0-0", target, dev, rtol=1e-5, atol=5e-3) + verify_model("deconv3x3_pad0-0", target, dev, rtol=1e-5, atol=1e-2) @tvm.testing.parametrize_targets @@ -1319,7 +1320,7 @@ def test_cts_batch_norm(target, dev): @tvm.testing.parametrize_targets def test_cts_deconv3x3_stride2x2(target, dev): - verify_model("deconv3x3_stride2x2", target, dev, rtol=1e-5, atol=5e-3) + verify_model("deconv3x3_stride2x2", target, dev, rtol=1e-5, atol=1e-2) @tvm.testing.parametrize_targets @@ -1589,7 +1590,7 @@ def test_cts_deconv3x3_groups0(target, dev): @tvm.testing.parametrize_targets def test_cts_deconv3x3_pad0_1(target, dev): - verify_model("deconv3x3_pad0-1", target, dev, rtol=1e-5, atol=5e-3) + verify_model("deconv3x3_pad0-1", target, dev, rtol=1e-5, atol=1e-2) @tvm.testing.parametrize_targets diff --git a/tests/python/relax/test_frontend_nnef.py b/tests/python/relax/test_frontend_nnef.py index a699ad91f7067..623780758b8c9 100644 --- a/tests/python/relax/test_frontend_nnef.py +++ b/tests/python/relax/test_frontend_nnef.py @@ -15,71 +15,32 @@ # specific language governing permissions and limitations # under the License. -import os - import numpy as np import _nnef import nnef -import nnef_tools.interpreter.pytorch as interpreter + +import tvm import tvm.testing -import tvm.relax as relax +from tvm import relax import tvm.relax.frontend.nnef -import pytest - -graphs_dir = os.path.join("tests", "python", "frontend", "nnef", "cases") - - -def get_nnef_outputs(path, inputs): - ip = interpreter.Interpreter(path, None, None) - inputs = [inputs[tensor.name] for tensor in ip.input_details()] - return ip(inputs) - - -def get_type(val): - if val == "scalar": - return "float32" - if val == "integer": - return "int32" - if val == "logical": - return "bool" - if val == "string": - return "string" - - -def verify_model( - model_path, - target="llvm", - dev=tvm.cpu(0), - rtol=1e-5, - atol=1e-5, -): - path = os.path.join(graphs_dir, model_path) - graph = nnef.load_graph(path, load_variables=False) - nnef.infer_shapes(graph) - inputs = {} - # generate inputs - for inp in graph.inputs: - intensor = graph.tensors[inp] - shape = intensor.shape - if any(exc in model_path for exc in ["log", "sqrt", "pow", "batch_norm"]): - low = 0.0 - else: - low = -1.0 - high = 1.0 - if "acosh" in model_path: - high = 2.0 - low = 1.0 - if intensor.dtype == "scalar": - inputs[inp] = np.random.uniform(low=low, high=high, size=shape).astype("float32") - elif intensor.dtype == "integer": - inputs[inp] = np.random.randint(0, 64, shape) - elif intensor.dtype == "logical": - inputs[inp] = np.random.binomial(1, 0.5, shape).astype("bool") - elif intensor.dtype == "string": - inputs[inp] = np.random.uniform(low=low, high=high, size=shape).astype("string") - - # set graph parameters + +from tvm.script import ir as I +from tvm.script import relax as R +from tvm.script import tir as T +import tvm.topi as topi + +from ..nightly.frontend.nnef import cases_string + + +def get_case_graph(name): + if '-' in name: + name = name.replace('-', '_') + return nnef.parse_string(getattr(cases_string, name)) + + +def verify_model_struct(model_name, binding, expected): + graph = get_case_graph(model_name) for operation in graph.operations: if operation.name == "variable": tensor_name = operation.outputs["output"] @@ -87,1548 +48,2551 @@ def verify_model( shape = operation.attribs["shape"] assert ( - operation.dtype == "scalar" + operation.dtype == "scalar" ), f"variable of type {operation.dtype} is not supported, please update verify_model" - if any(exc in model_path for exc in ["log", "sqrt", "pow", "batch_norm"]): - low = 0.0 - else: - low = -1.0 - data = np.random.uniform(low=low, size=shape).astype("float32") + data = np.ones(shape).astype("float32") tensor = graph.tensors[tensor_name] graph.tensors[tensor_name] = _nnef.Tensor( tensor.name, tensor.dtype, shape, data, tensor.quantization ) - outputs = get_nnef_outputs(graph, inputs) - - mod = tvm.relax.frontend.nnef.from_nnef(graph) - - exec = relax.build(mod, target=target) - vm = relax.VirtualMachine(exec, dev) - inputs = [tvm.nd.array(arr, device=dev) for arr in inputs.values()] - out = vm["main"](*inputs) - - if isinstance(out, tvm.ir.container.Array): - out = [o.numpy() for o in out] - else: - out = out.numpy() - - if not isinstance(out, (list, tuple)): - out = [out] - - for i, base_out in enumerate(outputs): - tvm.testing.assert_allclose(out[i], outputs[base_out], rtol=rtol, atol=atol) - - -@tvm.testing.parametrize_targets -def test_ats_tan_2d(target, dev): - verify_model("tan_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_tan_4d(target, dev): - verify_model("tan_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_sinh_2d(target, dev): - verify_model("sinh_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_sinh_4d(target, dev): - verify_model("sinh_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_cosh_2d(target, dev): - verify_model("cosh_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_cosh_4d(target, dev): - verify_model("cosh_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_asin_2d(target, dev): - verify_model("asin_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_asin_4d(target, dev): - verify_model("asin_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_acos_2d(target, dev): - verify_model("acos_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_acos_4d(target, dev): - verify_model("acos_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_atan_2d(target, dev): - verify_model("atan_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_atan_4d(target, dev): - verify_model("atan_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_asinh_2d(target, dev): - verify_model("asinh_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_asinh_4d(target, dev): - verify_model("asinh_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_acosh_2d(target, dev): - verify_model("acosh_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_acosh_4d(target, dev): - verify_model("acosh_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_atanh_2d(target, dev): - verify_model("atanh_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_atanh_4d(target, dev): - verify_model("atanh_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_clamp_2d(target, dev): - verify_model("clamp_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_clamp_4d(target, dev): - verify_model("clamp_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_clamp_4d_constant(target, dev): - verify_model("clamp_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_reshape_partial(target, dev): - verify_model("reshape_partial", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_split_unbalanced(target, dev): - verify_model("split_unbalanced", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_stack(target, dev): - verify_model("stack", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_unstack(target, dev): - verify_model("unstack", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_slice_strides(target, dev): - verify_model("slice_strides", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_matmul_2d(target, dev): - verify_model("matmul_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_matmul_2d_transpose(target, dev): - verify_model("matmul_2d_transpose", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_matmul_4d(target, dev): - verify_model("matmul_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_matmul_4d_transpose(target, dev): - verify_model("matmul_4d_transpose", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_prelu(target, dev): - verify_model("prelu", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_prelu_2d_standalone(target, dev): - verify_model("prelu_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_prelu_4d_standalone(target, dev): - verify_model("prelu_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_leaky_relu(target, dev): - verify_model("leaky_relu", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_leaky_relu_2d_standalone(target, dev): - verify_model("leaky_relu_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_leaky_relu_4d_standalone(target, dev): - verify_model("leaky_relu_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_elu(target, dev): - verify_model("elu", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_elu_2d_standalone(target, dev): - verify_model("elu_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_elu_4d_standalone(target, dev): - verify_model("elu_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_selu(target, dev): - verify_model("selu", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_selu_2d_standalone(target, dev): - verify_model("selu_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_selu_4d_standalone(target, dev): - verify_model("selu_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_gelu(target, dev): - verify_model("gelu", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_gelu_2d_standalone(target, dev): - verify_model("gelu_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_gelu_4d_standalone(target, dev): - verify_model("gelu_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_silu(target, dev): - verify_model("silu", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_silu_2d_standalone(target, dev): - verify_model("silu_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_silu_4d_standalone(target, dev): - verify_model("silu_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_softplus(target, dev): - verify_model("softplus", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_softplus_2d_standalone(target, dev): - verify_model("softplus_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_softplus_4d_standalone(target, dev): - verify_model("softplus_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_separable_conv3x3(target, dev): - verify_model("separable_conv3x3", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_separable_conv3x3_with_attrs(target, dev): - verify_model("separable_conv3x3_with_attrs", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_separable_conv5x5(target, dev): - verify_model("separable_conv5x5", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_separable_deconv3x3(target, dev): - verify_model("separable_deconv3x3", target, dev, rtol=1e-5, atol=1e-2) - - -@tvm.testing.parametrize_targets -def test_ats_separable_deconv3x3_with_attrs(target, dev): - verify_model("separable_deconv3x3_with_attrs", target, dev, rtol=1e-5, atol=1e-2) - - -@tvm.testing.parametrize_targets -def test_ats_separable_deconv5x5(target, dev): - verify_model("separable_deconv5x5", target, dev, rtol=1e-5, atol=1e-2) - - -@tvm.testing.parametrize_targets -def test_ats_rms_pool3x3(target, dev): - verify_model("rms_pool3x3", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_local_response_normalization(target, dev): - verify_model("local_response_normalization", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_local_mean_normalization(target, dev): - verify_model("local_mean_normalization", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_local_variance_normalization(target, dev): - verify_model("local_variance_normalization", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_local_contrast_normalization(target, dev): - verify_model("local_contrast_normalization", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_l1_normalization(target, dev): - verify_model("l1_normalization", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_ats_l2_normalization(target, dev): - verify_model("l2_normalization", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_pad_0_1_reflect(target, dev): - verify_model("pad_0-1_reflect", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_pad_1_0_reflect(target, dev): - verify_model("pad_1-0_reflect", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_pad_1_1_reflect(target, dev): - verify_model("pad_1-1_reflect", target, dev, rtol=1e-5, atol=1e-5) - - -# GENERATED CASES START - - -@tvm.testing.parametrize_targets -def test_cts_gt_2d(target, dev): - verify_model("gt_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_2d(target, dev): - verify_model("max_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_mean_reduce_spatial(target, dev): - verify_model("mean_reduce_spatial", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_select_4d(target, dev): - verify_model("select_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_pool3x3_pad1_0(target, dev): - verify_model("max_pool3x3_pad1-0", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_relu(target, dev): - verify_model("relu", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_split_channel(target, dev): - verify_model("split_channel", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_rcp_4d(target, dev): - verify_model("rcp_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_pool2x2(target, dev): - verify_model("max_pool2x2", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_avg_pool2x2(target, dev): - verify_model("avg_pool2x2", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_rcp_2d(target, dev): - verify_model("rcp_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_log2_4d(target, dev): - verify_model("log2_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv3x3_stride2x2(target, dev): - verify_model("conv3x3_stride2x2", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_lt_4d_constant(target, dev): - verify_model("lt_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_or_4d(target, dev): - verify_model("or_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_deconv7x7(target, dev): - verify_model("deconv7x7", target, dev, rtol=1e-5, atol=1e-4) - - -@tvm.testing.parametrize_targets -def test_cts_nearest_upsample(target, dev): - verify_model("nearest_upsample", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_ceil_4d(target, dev): - verify_model("ceil_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_floor_2d(target, dev): - verify_model("floor_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_avg_pool1x1(target, dev): - verify_model("avg_pool1x1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_log_4d(target, dev): - verify_model("log_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_sum_reduce_channel(target, dev): - verify_model("sum_reduce_channel", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_min_reduce_spatial(target, dev): - verify_model("min_reduce_spatial", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_4d_broadcast(target, dev): - verify_model("max_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_pool3x3_pad0_1(target, dev): - verify_model("max_pool3x3_pad0-1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_cos_2d(target, dev): - verify_model("cos_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_not_4d(target, dev): - verify_model("not_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_sub_4d(target, dev): - verify_model("sub_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_bilinear_upsample_aligned_replicate(target, dev): - verify_model("bilinear_upsample_aligned_replicate", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_log_2d(target, dev): - verify_model("log_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_argmin_reduce_spatial(target, dev): - verify_model("argmin_reduce_spatial", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_select_2d(target, dev): - verify_model("select_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_ne_4d(target, dev): - verify_model("ne_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_or_2d(target, dev): - verify_model("or_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_eq_2d(target, dev): - verify_model("eq_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_rsqr_2d(target, dev): - verify_model("rsqr_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_eq_4d(target, dev): - verify_model("eq_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_deconv7x7_stride4x4(target, dev): - verify_model("deconv7x7_stride4x4", target, dev, rtol=1e-5, atol=1e-2) - - -@tvm.testing.parametrize_targets -def test_cts_max_pool3x3(target, dev): - verify_model("max_pool3x3", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_and_4d(target, dev): - verify_model("and_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_mul_4d(target, dev): - verify_model("mul_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_softmax(target, dev): - verify_model("softmax", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_sign_4d(target, dev): - verify_model("sign_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_mul_4d_constant(target, dev): - verify_model("mul_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_le_4d_constant(target, dev): - verify_model("le_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_box2x2(target, dev): - verify_model("box2x2", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_or_4d_broadcast(target, dev): - verify_model("or_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_deconv5x5(target, dev): - verify_model("deconv5x5", target, dev, rtol=1e-5, atol=1e-4) - - -@tvm.testing.parametrize_targets -def test_cts_box3x3_pad1_0(target, dev): - verify_model("box3x3_pad1-0", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_debox3x3_pad1_0(target, dev): - verify_model("debox3x3_pad1-0", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_ge_4d_broadcast(target, dev): - verify_model("ge_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_linear_reshape(target, dev): - verify_model("linear_reshape", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_le_2d(target, dev): - verify_model("le_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_deconv3x3(target, dev): - verify_model("deconv3x3", target, dev, rtol=1e-5, atol=1e-2) - - -@tvm.testing.parametrize_targets -def test_cts_nearest_downsample(target, dev): - verify_model("nearest_downsample", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_select_4d_true(target, dev): - verify_model("select_4d_true", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_min_4d_broadcast(target, dev): - verify_model("min_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_4d(target, dev): - verify_model("max_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_4d_constant(target, dev): - verify_model("max_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_sum_reduce_spatial(target, dev): - verify_model("sum_reduce_spatial", target, dev, rtol=1e-5, atol=1e-4) - - -@tvm.testing.parametrize_targets -def test_cts_min_2d(target, dev): - verify_model("min_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_ge_2d(target, dev): - verify_model("ge_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv2x2(target, dev): - verify_model("conv2x2", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv4x4_stride2x2(target, dev): - verify_model("conv4x4_stride2x2", target, dev, rtol=1e-5, atol=1e-2) - - -@tvm.testing.parametrize_targets -def test_cts_debox1x1(target, dev): - verify_model("debox1x1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_reshape_flatten(target, dev): - verify_model("reshape_flatten", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv3x3_nobias(target, dev): - verify_model("conv3x3_nobias", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_tile_spatial(target, dev): - verify_model("tile_spatial", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_softmax_4d_standalone(target, dev): - verify_model("softmax_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_rsqrt_4d(target, dev): - verify_model("rsqrt_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_concat_channel(target, dev): - verify_model("concat_channel", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_area_downsample(target, dev): - verify_model("area_downsample", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_pool3x3_pad1_1(target, dev): - verify_model("max_pool3x3_pad1-1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_sigmoid_2d_standalone(target, dev): - verify_model("sigmoid_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_ne_4d_constant(target, dev): - verify_model("ne_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv3x3(target, dev): - verify_model("conv3x3", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_all_reduce_channel(target, dev): - verify_model("all_reduce_channel", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_squeeze_spatial(target, dev): - verify_model("squeeze_spatial", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_and_4d_constant(target, dev): - verify_model("and_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_pool3x3_constant_border(target, dev): - verify_model("max_pool3x3_constant-border", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_argmax_reduce_spatial(target, dev): - verify_model("argmax_reduce_spatial", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_cos_4d(target, dev): - verify_model("cos_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_sqr_4d(target, dev): - verify_model("sqr_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_rsqrt_2d(target, dev): - verify_model("rsqrt_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_bilinear_upsample_symmetric_replicate(target, dev): - verify_model("bilinear_upsample_symmetric_replicate", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_tile_channel(target, dev): - verify_model("tile_channel", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_div_4d(target, dev): - verify_model("div_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_sqrt_2d(target, dev): - verify_model("sqrt_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_and_4d_broadcast(target, dev): - verify_model("and_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_transpose_nhwc_to_nchw(target, dev): - verify_model("transpose_nhwc_to_nchw", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_avg_pool3x3_pad0_1(target, dev): - verify_model("avg_pool3x3_pad0-1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_round_2d(target, dev): - verify_model("round_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_box3x3_pad0_1(target, dev): - verify_model("box3x3_pad0-1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_deconv6x6(target, dev): - verify_model("deconv6x6", target, dev, rtol=1e-5, atol=1e-4) - - -@tvm.testing.parametrize_targets -def test_cts_add_4d_constant(target, dev): - verify_model("add_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_lt_2d(target, dev): - verify_model("lt_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_min_4d(target, dev): - verify_model("min_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_box3x3_stride1x1(target, dev): - verify_model("box3x3_stride1x1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_linear_nobias(target, dev): - verify_model("linear_nobias", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_div_2d(target, dev): - verify_model("div_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_avg_pool3x3_stride1x1(target, dev): - verify_model("avg_pool3x3_stride1x1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv7x7(target, dev): - verify_model("conv7x7", target, dev, rtol=1e-5, atol=1e-2) - - -@tvm.testing.parametrize_targets -def test_cts_conv3x3_groups0(target, dev): - verify_model("conv3x3_groups0", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_mul_2d(target, dev): - verify_model("mul_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_deconv3x3_pad1_0(target, dev): - verify_model("deconv3x3_pad1-0", target, dev, rtol=1e-5, atol=1e-2) - - -@tvm.testing.parametrize_targets -def test_cts_ne_2d(target, dev): - verify_model("ne_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_avg_pool3x3_pad1_1(target, dev): - verify_model("avg_pool3x3_pad1-1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_mean_reduce_channel(target, dev): - verify_model("mean_reduce_channel", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv5x5(target, dev): - verify_model("conv5x5", target, dev, rtol=1e-5, atol=1e-2) - - -@tvm.testing.parametrize_targets -def test_cts_max_pool3x3_stride1x1(target, dev): - verify_model("max_pool3x3_stride1x1", target, dev, rtol=1e-5, atol=1e-5) - - -@pytest.mark.skip(reason="Replicate - Edge mode is currently not supported in TVM relax") -@tvm.testing.parametrize_targets -def test_cts_pad_1_0_replicate(target, dev): - verify_model("pad_1-0_replicate", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_debox3x3_pad1_1(target, dev): - verify_model("debox3x3_pad1-1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_avg_pool3x3_pad1_0(target, dev): - verify_model("avg_pool3x3_pad1-0", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_bilinear_upsample_symmetric_constant(target, dev): - verify_model("bilinear_upsample_symmetric_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_gt_4d_broadcast(target, dev): - verify_model("gt_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_tanh_4d_standalone(target, dev): - verify_model("tanh_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_add_2d(target, dev): - verify_model("add_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_rsqr_4d(target, dev): - verify_model("rsqr_4d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_div_4d_broadcast(target, dev): - verify_model("div_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_eq_4d_broadcast(target, dev): - verify_model("eq_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv3x3_valid(target, dev): - verify_model("conv3x3_valid", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_min_4d_constant(target, dev): - verify_model("min_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_or_4d_constant(target, dev): - verify_model("or_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_min_reduce_channel(target, dev): - verify_model("min_reduce_channel", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_reduce_spatial(target, dev): - verify_model("max_reduce_spatial", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_bilinear_upsample_asymmetric_constant(target, dev): - verify_model("bilinear_upsample_asymmetric_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv3x3_pad0_0(target, dev): - verify_model("conv3x3_pad0-0", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv3x3_pad1_0(target, dev): - verify_model("conv3x3_pad1-0", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_abs_2d(target, dev): - verify_model("abs_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_max_reduce_channel(target, dev): - verify_model("max_reduce_channel", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_ge_4d_constant(target, dev): - verify_model("ge_4d_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_transpose_nchw_to_nhwc(target, dev): - verify_model("transpose_nchw_to_nhwc", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_deconv3x3_pad1_1(target, dev): - verify_model("deconv3x3_pad1-1", target, dev, rtol=1e-5, atol=1e-2) - - -@tvm.testing.parametrize_targets -def test_cts_ne_4d_broadcast(target, dev): - verify_model("ne_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_sqr_2d(target, dev): - verify_model("sqr_2d", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_conv3x3_pad1_1(target, dev): - verify_model("conv3x3_pad1-1", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_bilinear_upsample_aligned_constant(target, dev): - verify_model("bilinear_upsample_aligned_constant", target, dev, rtol=1e-5, atol=1e-5) - - -@tvm.testing.parametrize_targets -def test_cts_log2_2d(target, dev): - verify_model("log2_2d", target, dev, rtol=1e-5, atol=1e-5) - + binding = {k: tvm.nd.array(v) for k, v in binding.items()} + expected = relax.transform.BindParams("main", binding)(expected) + + mod = relax.frontend.nnef.from_nnef(graph) + tvm.ir.assert_structural_equal(mod, expected) + + +def get_unary_mod(method, dt="float32", o_dtype=None): + global dtype + dtype = dt + if not o_dtype: + o_dtype = dtype + + def _appl_shape(sh, osh=None): + global shape, o_shape + if not osh: + osh = sh + shape, o_shape = sh, osh + + @tvm.script.ir.ir_module + class expected: + @R.function + def main( + in1: R.Tensor(shape, dtype) + ) -> R.Tensor(o_shape, o_dtype): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor(o_shape, dtype=o_dtype) = method(in1) + R.output(lv) + return lv -@tvm.testing.parametrize_targets -def test_cts_slice(target, dev): - verify_model("slice", target, dev, rtol=1e-5, atol=1e-5) + return expected + + return _appl_shape -@tvm.testing.parametrize_targets -def test_cts_deconv2x2(target, dev): - verify_model("deconv2x2", target, dev, rtol=1e-5, atol=1e-2) +def get_binary_mod(method, dt="float32", o_dtype=None): + global dtype + dtype = dt + if not o_dtype: + o_dtype = dtype + def _appl_shape(sh1, sh2=None, osh=None): + global shape1, shape2, o_shape + if not sh2: + sh2 = sh1 + if not osh: + osh = sh1 + shape1, shape2, o_shape = sh1, sh2, osh -@tvm.testing.parametrize_targets -def test_cts_all_reduce_spatial(target, dev): - verify_model("all_reduce_spatial", target, dev, rtol=1e-5, atol=1e-5) + @tvm.script.ir.ir_module + class expected: + @R.function + def main( + lhs: R.Tensor(shape1, dtype), + rhs: R.Tensor(shape2, dtype) + ) -> R.Tensor(o_shape, o_dtype): + R.func_attr({"num_input": 2}) + with R.dataflow(): + lv: R.Tensor(o_shape, dtype=o_dtype) = method(lhs, rhs) + R.output(lv) + return lv + return expected -@tvm.testing.parametrize_targets -def test_cts_sqrt_4d(target, dev): - verify_model("sqrt_4d", target, dev, rtol=1e-5, atol=1e-5) + return _appl_shape -@tvm.testing.parametrize_targets -def test_cts_conv7x7_stride4x4(target, dev): - verify_model("conv7x7_stride4x4", target, dev, rtol=1e-5, atol=1e-2) +# graph tests +def test_copy(): + @I.ir_module + class expected_2d: + @R.function + def main(input: R.Tensor((4, 16), dtype="float32")) -> R.Tensor((4, 16), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.identity, input) + gv: R.Tensor((4, 16), dtype="float32") = lv + R.output(gv) + return gv + verify_model_struct("copy_2d", {}, expected_2d) -@tvm.testing.parametrize_targets -def test_cts_ge_4d(target, dev): - verify_model("ge_4d", target, dev, rtol=1e-5, atol=1e-5) + @I.ir_module + class expected_4d: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.identity, input) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = lv + R.output(gv) + return gv + verify_model_struct("copy_4d", {}, expected_4d) -@tvm.testing.parametrize_targets -def test_cts_any_reduce_channel(target, dev): - verify_model("any_reduce_channel", target, dev, rtol=1e-5, atol=1e-5) +def test_neg(): + expected = get_unary_mod(R.negative) -@tvm.testing.parametrize_targets -def test_cts_and_2d(target, dev): - verify_model("and_2d", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16) + verify_model_struct("neg_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("neg_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_add_4d_broadcast(target, dev): - verify_model("add_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) +def test_rcp(): + def method(in1): + return R.divide(R.const(1, "float32"), in1) -@tvm.testing.parametrize_targets -def test_cts_copy_2d(target, dev): - verify_model("copy_2d", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method) + shape = (4, 16) + verify_model_struct("rcp_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("rcp_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_ceil_2d(target, dev): - verify_model("ceil_2d", target, dev, rtol=1e-5, atol=1e-5) +def test_exp(): + expected = get_unary_mod(R.exp) + shape1 = (4, 16) + verify_model_struct("exp_2d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_linear_squeeze(target, dev): - verify_model("linear_squeeze", target, dev, rtol=1e-5, atol=1e-5) + shape1 = (4, 16, 32, 32) + verify_model_struct("exp_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_sub_2d(target, dev): - verify_model("sub_2d", target, dev, rtol=1e-5, atol=1e-5) +def test_log(): + expected = get_unary_mod(R.log) + shape = (4, 16) + verify_model_struct("log_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("log_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_deconv3x3_valid(target, dev): - verify_model("deconv3x3_valid", target, dev, rtol=1e-5, atol=1e-2) +def test_sin(): + expected = get_unary_mod(R.sin) -@tvm.testing.parametrize_targets -def test_cts_pow_4d(target, dev): - verify_model("pow_4d", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16) + verify_model_struct("sin_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("sin_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_pad_1_1_constant(target, dev): - verify_model("pad_1-1_constant", target, dev, rtol=1e-5, atol=1e-5) +def test_cos(): + expected = get_unary_mod(R.cos) -@tvm.testing.parametrize_targets -def test_cts_debox3x3(target, dev): - verify_model("debox3x3", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16) + verify_model_struct("cos_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("cos_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_conv1x1(target, dev): - verify_model("conv1x1", target, dev, rtol=1e-5, atol=1e-5) +def test_tan(): + expected = get_unary_mod(R.tan) -@tvm.testing.parametrize_targets -def test_cts_exp_4d(target, dev): - verify_model("exp_4d", target, dev, rtol=1e-5, atol=1e-5) + # 2D + shape = (4, 16) + verify_model_struct("tan_2d", {}, expected(shape)) + # 4D + shape = (4, 16, 32, 32) + verify_model_struct("tan_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_avg_pool3x3_ignore_border(target, dev): - verify_model("avg_pool3x3_ignore-border", target, dev, rtol=1e-5, atol=1e-5) +def test_sinh(): + expected = get_unary_mod(R.sinh) -@tvm.testing.parametrize_targets -def test_cts_deconv3x3_pad0_0(target, dev): - verify_model("deconv3x3_pad0-0", target, dev, rtol=1e-5, atol=1e-2) + shape = (4, 16) + verify_model_struct("sinh_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("sinh_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_pow_4d_broadcast(target, dev): - verify_model("pow_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) +def test_cosh(): + expected = get_unary_mod(R.cosh) -@tvm.testing.parametrize_targets -def test_cts_abs_4d(target, dev): - verify_model("abs_4d", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16) + verify_model_struct("cosh_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("cosh_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_sin_4d(target, dev): - verify_model("sin_4d", target, dev, rtol=1e-5, atol=1e-5) +def test_tanh(): + expected = get_unary_mod(R.tanh) + shape = (4, 16) + verify_model_struct("tanh_2d_standalone", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_select_2d_true(target, dev): - verify_model("select_2d_true", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16, 32, 32) + verify_model_struct("tanh_4d_standalone", {}, expected(shape)) + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), + w1: R.Tensor((4, 1, 1, 1), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.tanh(lv) + R.output(gv) + return gv -@tvm.testing.parametrize_targets -def test_cts_relu_2d_standalone(target, dev): - verify_model("relu_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("tanh", binding, expected) -@tvm.testing.parametrize_targets -def test_cts_reshape_squeeze(target, dev): - verify_model("reshape_squeeze", target, dev, rtol=1e-5, atol=1e-5) +def test_asin(): + expected = get_unary_mod(R.asin) + shape = (4, 16) + verify_model_struct("asin_2d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_sub_4d_constant(target, dev): - verify_model("sub_4d_constant", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16, 32, 32) + verify_model_struct("asin_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_linear(target, dev): - verify_model("linear", target, dev, rtol=1e-5, atol=1e-5) +def test_acos(): + expected = get_unary_mod(R.acos) + shape = (4, 16) + verify_model_struct("acos_2d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_pow_2d(target, dev): - verify_model("pow_2d", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16, 32, 32) + verify_model_struct("acos_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_debox3x3_pad0_1(target, dev): - verify_model("debox3x3_pad0-1", target, dev, rtol=1e-5, atol=1e-5) +def test_atan(): + expected = get_unary_mod(R.atan) + shape = (4, 16) + verify_model_struct("atan_2d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_floor_4d(target, dev): - verify_model("floor_4d", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16, 32, 32) + verify_model_struct("atan_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_deconv3x3_nobias(target, dev): - verify_model("deconv3x3_nobias", target, dev, rtol=1e-5, atol=1e-2) +def test_asinh(): + expected = get_unary_mod(R.asinh) + shape = (4, 16) + verify_model_struct("asinh_2d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_batch_norm(target, dev): - verify_model("batch_norm", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16, 32, 32) + verify_model_struct("asinh_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_deconv3x3_stride2x2(target, dev): - verify_model("deconv3x3_stride2x2", target, dev, rtol=1e-5, atol=1e-2) +def test_acosh(): + expected = get_unary_mod(R.acosh) + shape = (4, 16) + verify_model_struct("acosh_2d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_debox2x2(target, dev): - verify_model("debox2x2", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16, 32, 32) + verify_model_struct("acosh_4d", {}, expected(shape)) -@pytest.mark.skip(reason="Replicate - Edge mode is currently not supported in TVM relax") -@tvm.testing.parametrize_targets -def test_cts_pad_0_1_replicate(target, dev): - verify_model("pad_0-1_replicate", target, dev, rtol=1e-5, atol=1e-5) +def test_atanh(): + expected = get_unary_mod(R.atanh) + shape = (4, 16) + verify_model_struct("atanh_2d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_mul_4d_broadcast(target, dev): - verify_model("mul_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16, 32, 32) + verify_model_struct("atanh_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_debox3x3_pad0_0(target, dev): - verify_model("debox3x3_pad0-0", target, dev, rtol=1e-5, atol=1e-5) +def test_abs(): + expected = get_unary_mod(R.abs) + shape = (4, 16) + verify_model_struct("abs_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("abs_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_argmin_reduce_channel(target, dev): - verify_model("argmin_reduce_channel", target, dev, rtol=1e-5, atol=1e-5) +def test_sign(): + expected = get_unary_mod(R.sign) + shape1 = (4, 16) + verify_model_struct("sign_2d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_copy_4d(target, dev): - verify_model("copy_4d", target, dev, rtol=1e-5, atol=1e-5) + shape1 = (4, 16, 32, 32) + verify_model_struct("sign_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_not_2d(target, dev): - verify_model("not_2d", target, dev, rtol=1e-5, atol=1e-5) +def test_not(): + expected = get_unary_mod(R.logical_not, dt="bool") + shape1 = (4, 16) + verify_model_struct("not_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("not_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_sigmoid_4d_standalone(target, dev): - verify_model("sigmoid_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) +def test_floor(): + expected = get_unary_mod(R.floor) -@tvm.testing.parametrize_targets -def test_cts_exp_2d(target, dev): - verify_model("exp_2d", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16) + verify_model_struct("floor_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("floor_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_lt_4d(target, dev): - verify_model("lt_4d", target, dev, rtol=1e-5, atol=1e-5) +def test_ceil(): + expected = get_unary_mod(R.ceil) + shape = (4, 16) + verify_model_struct("ceil_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("ceil_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_conv4x4(target, dev): - verify_model("conv4x4", target, dev, rtol=1e-5, atol=1e-2) +def test_round(): + expected = get_unary_mod(R.round) -@tvm.testing.parametrize_targets -def test_cts_avg_pool3x3(target, dev): - verify_model("avg_pool3x3", target, dev, rtol=1e-5, atol=1e-5) + shape = (4, 16) + verify_model_struct("round_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("round_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_avg_pool3x3_pad0_0(target, dev): - verify_model("avg_pool3x3_pad0-0", target, dev, rtol=1e-5, atol=1e-5) +def test_add(): + expected = get_binary_mod(R.add) + shape1 = (4, 16) + verify_model_struct("add_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("add_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_conv3x3_pad0_1(target, dev): - verify_model("conv3x3_pad0-1", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("add_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.add(in1, R.const(0.5, "float32")) -@tvm.testing.parametrize_targets -def test_cts_pad_0_1_constant(target, dev): - verify_model("pad_0-1_constant", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method) + shape1 = (4, 16, 32, 32) + verify_model_struct("add_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_deconv4x4(target, dev): - verify_model("deconv4x4", target, dev, rtol=1e-5, atol=1e-5) +def test_sub(): + expected = get_binary_mod(R.subtract) + shape1 = (4, 16) + verify_model_struct("sub_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("sub_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_neg_2d(target, dev): - verify_model("neg_2d", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("sub_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.subtract(in1, R.const(0.5, "float32")) -@pytest.mark.skip(reason="Replicate - Edge mode is currently not supported in TVM relax") -@tvm.testing.parametrize_targets -def test_cts_bilinear_upsample_asymmetric_replicate(target, dev): - verify_model("bilinear_upsample_asymmetric_replicate", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method) + shape1 = (4, 16, 32, 32) + verify_model_struct("sub_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_conv5x5_stride3x3(target, dev): - verify_model("conv5x5_stride3x3", target, dev, rtol=1e-5, atol=1e-2) +def test_mul(): + expected = get_binary_mod(R.multiply) + shape1 = (4, 16) + verify_model_struct("mul_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("mul_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_relu_4d_standalone(target, dev): - verify_model("relu_4d_standalone", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("mul_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.multiply(in1, R.const(0.5, "float32")) -@tvm.testing.parametrize_targets -def test_cts_max_pool1x1(target, dev): - verify_model("max_pool1x1", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method) + shape1 = (4, 16, 32, 32) + verify_model_struct("mul_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_deconv5x5_pad2_2(target, dev): - verify_model("deconv5x5_pad2-2", target, dev, rtol=1e-5, atol=1e-4) +def test_div(): + expected = get_binary_mod(R.divide) + shape1 = (4, 16) + verify_model_struct("div_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("div_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_tile_batch(target, dev): - verify_model("tile_batch", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("div_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.divide(in1, R.const(0.5, "float32")) -@tvm.testing.parametrize_targets -def test_cts_eq_4d_constant(target, dev): - verify_model("eq_4d_constant", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method) + shape1 = (4, 16, 32, 32) + verify_model_struct("div_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_lt_4d_broadcast(target, dev): - verify_model("lt_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) +def test_pow(): + expected = get_binary_mod(R.power) + shape1 = (4, 16) + verify_model_struct("pow_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("pow_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_deconv1x1(target, dev): - verify_model("deconv1x1", target, dev, rtol=1e-5, atol=2e-3) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("pow_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.power(in1, R.const(0.5, "float32")) -@tvm.testing.parametrize_targets -def test_cts_sign_2d(target, dev): - verify_model("sign_2d", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method) + shape1 = (4, 16, 32, 32) + verify_model_struct("pow_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_select_2d_false(target, dev): - verify_model("select_2d_false", target, dev, rtol=1e-5, atol=1e-5) +def test_lt(): + expected = get_binary_mod(R.less, o_dtype="bool") + shape1 = (4, 16) + verify_model_struct("lt_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("lt_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_div_4d_constant(target, dev): - verify_model("div_4d_constant", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("lt_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.less(in1, R.const(0.5, "float32")) -@tvm.testing.parametrize_targets -def test_cts_pow_4d_constant(target, dev): - verify_model("pow_4d_constant", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method, o_dtype="bool") + shape1 = (4, 16, 32, 32) + verify_model_struct("lt_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_round_4d(target, dev): - verify_model("round_4d", target, dev, rtol=1e-5, atol=1e-5) +def test_gt(): + expected = get_binary_mod(R.greater, o_dtype="bool") + shape1 = (4, 16) + verify_model_struct("gt_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("gt_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_debox3x3_stride1x1(target, dev): - verify_model("debox3x3_stride1x1", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("gt_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.greater(in1, R.const(0.5, "float32")) -@tvm.testing.parametrize_targets -def test_cts_deconv5x5_stride3x3(target, dev): - verify_model("deconv5x5_stride3x3", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method, o_dtype="bool") + shape1 = (4, 16, 32, 32) + verify_model_struct("gt_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_sub_4d_broadcast(target, dev): - verify_model("sub_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) +def test_le(): + expected = get_binary_mod(R.less_equal, o_dtype="bool") + shape1 = (4, 16) + verify_model_struct("le_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("le_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_any_reduce_spatial(target, dev): - verify_model("any_reduce_spatial", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("le_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.less_equal(in1, R.const(0.5, "float32")) -@tvm.testing.parametrize_targets -def test_cts_gt_4d_constant(target, dev): - verify_model("gt_4d_constant", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method, o_dtype="bool") + shape1 = (4, 16, 32, 32) + verify_model_struct("le_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_conv6x6(target, dev): - verify_model("conv6x6", target, dev, rtol=1e-5, atol=1e-2) +def test_ge(): + expected = get_binary_mod(R.greater_equal, o_dtype="bool") + shape1 = (4, 16) + verify_model_struct("ge_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("ge_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_le_4d(target, dev): - verify_model("le_4d", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("ge_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.greater_equal(in1, R.const(0.5, "float32")) -@tvm.testing.parametrize_targets -def test_cts_gt_4d(target, dev): - verify_model("gt_4d", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method, o_dtype="bool") + shape1 = (4, 16, 32, 32) + verify_model_struct("ge_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_deconv4x4_stride2x2(target, dev): - verify_model("deconv4x4_stride2x2", target, dev, rtol=1e-5, atol=1e-5) +def test_eq(): + expected = get_binary_mod(R.equal, o_dtype="bool") + shape1 = (4, 16) + verify_model_struct("eq_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("eq_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_le_4d_broadcast(target, dev): - verify_model("le_4d_broadcast", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("eq_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.equal(in1, R.const(0.5, "float32")) -@tvm.testing.parametrize_targets -def test_cts_tanh_2d_standalone(target, dev): - verify_model("tanh_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method, o_dtype="bool") + shape1 = (4, 16, 32, 32) + verify_model_struct("eq_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_box3x3(target, dev): - verify_model("box3x3", target, dev, rtol=1e-5, atol=1e-5) +def test_ne(): + expected = get_binary_mod(R.not_equal, o_dtype="bool") + shape1 = (4, 16) + verify_model_struct("ne_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("ne_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_select_4d_false(target, dev): - verify_model("select_4d_false", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("ne_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.not_equal(in1, R.const(0.5, "float32")) -@tvm.testing.parametrize_targets -def test_cts_tanh(target, dev): - verify_model("tanh", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method, o_dtype="bool") + shape1 = (4, 16, 32, 32) + verify_model_struct("ne_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_sin_2d(target, dev): - verify_model("sin_2d", target, dev, rtol=1e-5, atol=1e-5) +def test_and(): + expected = get_binary_mod(R.logical_and, dt="bool") + shape1 = (4, 16) + verify_model_struct("and_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("and_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_box3x3_pad0_0(target, dev): - verify_model("box3x3_pad0-0", target, dev, rtol=1e-5, atol=1e-5) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("and_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.logical_and(in1, R.const(False, "bool")) -@tvm.testing.parametrize_targets -def test_cts_box1x1(target, dev): - verify_model("box1x1", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method, dt="bool") + shape1 = (4, 16, 32, 32) + verify_model_struct("and_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_box3x3_pad1_1(target, dev): - verify_model("box3x3_pad1-1", target, dev, rtol=1e-5, atol=1e-5) +def test_or(): + expected = get_binary_mod(R.logical_or, dt="bool") + shape1 = (4, 16) + verify_model_struct("or_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("or_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_conv5x5_pad2_2(target, dev): - verify_model("conv5x5_pad2-2", target, dev, rtol=1e-5, atol=1e-2) + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("or_4d_broadcast", {}, expected(shape1, shape2)) + def method(in1): + return R.logical_or(in1, R.const(False, "bool")) -@tvm.testing.parametrize_targets -def test_cts_max_pool3x3_pad0_0(target, dev): - verify_model("max_pool3x3_pad0-0", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method, dt="bool") + shape1 = (4, 16, 32, 32) + verify_model_struct("or_4d_constant", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_softmax_2d_standalone(target, dev): - verify_model("softmax_2d_standalone", target, dev, rtol=1e-5, atol=1e-5) +def test_select(): + @I.ir_module + class expected: + @R.function + def main(cond: R.Tensor((4, 16, 32, 32), dtype="bool"), input1: R.Tensor((4, 16, 32, 32), dtype="float32"), + input2: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 3}) + with R.dataflow(): + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.where(cond, input1, input2) + R.output(gv) + return gv + verify_model_struct("select_4d", {}, expected) -@tvm.testing.parametrize_targets -def test_cts_deconv3x3_groups0(target, dev): - verify_model("deconv3x3_groups0", target, dev, rtol=1e-5, atol=1e-5) + def get_custom_mod(tf): + @I.ir_module + class expected: + @R.function + def main(input1: R.Tensor((4, 16, 32, 32), dtype="float32"), + input2: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 2}) + with R.dataflow(): + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.where(R.const(tf, "bool"), input1, input2) + R.output(gv) + return gv + return expected -@tvm.testing.parametrize_targets -def test_cts_deconv3x3_pad0_1(target, dev): - verify_model("deconv3x3_pad0-1", target, dev, rtol=1e-5, atol=1e-2) + verify_model_struct("select_4d_true", {}, get_custom_mod(True)) + verify_model_struct("select_4d_false", {}, get_custom_mod(False)) -@tvm.testing.parametrize_targets -def test_cts_sigmoid(target, dev): - verify_model("sigmoid", target, dev, rtol=1e-5, atol=1e-5) +def test_sqr(): + def method(in1): + return R.power(in1, R.const(2, "float32")) + expected = get_unary_mod(method) + shape = (4, 16) + verify_model_struct("sqr_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("sqr_4d", {}, expected(shape)) -@tvm.testing.parametrize_targets -def test_cts_argmax_reduce_channel(target, dev): - verify_model("argmax_reduce_channel", target, dev, rtol=1e-5, atol=1e-5) +def test_sqrt(): + expected = get_unary_mod(R.sqrt) + shape = (4, 16) + verify_model_struct("sqrt_2d", {}, expected(shape)) + shape = (4, 16, 32, 32) + verify_model_struct("sqrt_4d", {}, expected(shape)) -@pytest.mark.skip(reason="Replicate - Edge mode is currently not supported in TVM relax") -@tvm.testing.parametrize_targets -def test_cts_pad_1_1_replicate(target, dev): - verify_model("pad_1-1_replicate", target, dev, rtol=1e-5, atol=1e-5) +def test_rsqr(): + def method(in1): + return R.power(in1, R.const(-2, "float32")) -@tvm.testing.parametrize_targets -def test_cts_pad_1_0_constant(target, dev): - verify_model("pad_1-0_constant", target, dev, rtol=1e-5, atol=1e-5) + expected = get_unary_mod(method) + shape1 = (4, 16) + verify_model_struct("rsqr_2d", {}, expected(shape1)) + shape1 = (4, 16, 32, 32) + verify_model_struct("rsqr_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_unsqueeze(target, dev): - verify_model("unsqueeze", target, dev, rtol=1e-5, atol=1e-5) +def test_rsqrt(): + expected = get_unary_mod(R.rsqrt) + shape1 = (4, 16) + verify_model_struct("rsqrt_2d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_neg_4d(target, dev): - verify_model("neg_4d", target, dev, rtol=1e-5, atol=1e-5) + shape1 = (4, 16, 32, 32) + verify_model_struct("rsqrt_4d", {}, expected(shape1)) -@tvm.testing.parametrize_targets -def test_cts_add_4d(target, dev): - verify_model("add_4d", target, dev, rtol=1e-5, atol=1e-5) +def test_log2(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16), dtype="float32")) -> R.Tensor((4, 16), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.log2, input) + gv: R.Tensor((4, 16), dtype="float32") = lv + R.output(gv) + return gv + + verify_model_struct("log2_2d", {}, expected) + + @I.ir_module + class expected4: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.log2, input) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = lv + R.output(gv) + return gv + + verify_model_struct("log2_4d", {}, expected4) + + +def test_min(): + expected = get_binary_mod(R.minimum) + shape1 = (4, 16) + verify_model_struct("min_2d", {}, expected(shape1)) + + shape1 = (4, 16, 32, 32) + verify_model_struct("min_4d", {}, expected(shape1)) + + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("min_4d_broadcast", {}, expected(shape1, shape2)) + + def method(in1): + return R.minimum(in1, R.const(0.5, "float32")) + + expected = get_unary_mod(method) + shape1 = (4, 16, 32, 32) + verify_model_struct("min_4d_constant", {}, expected(shape1)) + + +def test_max(): + expected = get_binary_mod(R.maximum) + shape1 = (4, 16) + verify_model_struct("max_2d", {}, expected(shape1)) + + shape1 = (4, 16, 32, 32) + verify_model_struct("max_4d", {}, expected(shape1)) + + shape1, shape2 = (4, 16, 32, 32), (1, 16, 1, 1) + verify_model_struct("max_4d_broadcast", {}, expected(shape1, shape2)) + + def method(in1): + return R.maximum(in1, R.const(0.5, "float32")) + + expected = get_unary_mod(method) + shape1 = (4, 16, 32, 32) + verify_model_struct("max_4d_constant", {}, expected(shape1)) + + +def test_clamp(): + # custom module needed + def get_custom(dtype="float32"): + def _appl_shape(shape): + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input1: R.Tensor(shape, dtype=dtype), input2: R.Tensor(shape, dtype=dtype), + input3: R.Tensor(shape, dtype=dtype)) -> R.Tensor(shape, dtype=dtype): + R.func_attr({"num_input": 3}) + with R.dataflow(): + lv: R.Tensor(shape, dtype=dtype) = R.minimum(input1, input3) + gv: R.Tensor(shape, dtype=dtype) = R.maximum(lv, input2) + R.output(gv) + return gv + + return expected + + return _appl_shape + + expected = get_custom() + shape = (4, 16) + verify_model_struct("clamp_2d", {}, expected(shape)) + + shape = (4, 16, 32, 32) + verify_model_struct("clamp_4d", {}, expected(shape)) + + # constant limit + # limits need to be extracted from graph + graph = get_case_graph("clamp_4d_constant") + print(graph.operations[1]) + op = graph.operations[1] + lowlim = op.inputs['a'] + highlim = op.inputs['b'] + shape = (4, 16, 32, 32) + + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input1: R.Tensor(shape, dtype="float32")) -> R.Tensor(shape, dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor(shape, dtype="float32") = R.clip(input1, R.prim_value(T.float32(lowlim)), + R.prim_value(T.float32(highlim))) + R.output(lv) + return lv + + verify_model_struct("clamp_4d_constant", {}, expected) + + +def test_conv(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 8, 32, 32), dtype="float32"), filter: R.Tensor((16, 8, 3, 3), dtype="float32"), + bias: R.Tensor((1, 16), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, filter, strides=[1, 1], + padding=[1, 1, 1, 1], dilation=[1, 1], + groups=1, data_layout="NCHW", + kernel_layout="OIHW", out_layout="NCHW", + out_dtype="void") + lv1: R.Tensor((1, 16, 1, 1), dtype="float32") = R.reshape(bias, R.shape([1, 16, 1, 1])) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.add(lv, lv1) + R.output(gv) + return gv + + binding = { + "filter": np.ones((16, 8, 3, 3), dtype="float32"), + "bias": np.ones((1, 16,), dtype="float32"), + } + verify_model_struct("conv3x3", binding, expected) + + @I.ir_module + class expected_stride: + @R.function + def main(input: R.Tensor((4, 8, 32, 32), dtype="float32"), filter: R.Tensor((16, 8, 3, 3), dtype="float32"), + bias: R.Tensor((1, 16), dtype="float32")) -> R.Tensor((4, 16, 16, 16), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 16, 16), dtype="float32") = R.nn.conv2d(input, filter, strides=[2, 2], + padding=[0, 0, 1, 1], dilation=[1, 1], + groups=1, data_layout="NCHW", + kernel_layout="OIHW", out_layout="NCHW", + out_dtype="void") + lv1: R.Tensor((1, 16, 1, 1), dtype="float32") = R.reshape(bias, R.shape([1, 16, 1, 1])) + gv: R.Tensor((4, 16, 16, 16), dtype="float32") = R.add(lv, lv1) + R.output(gv) + return gv + + verify_model_struct("conv3x3_stride2x2", binding, expected_stride) + + @I.ir_module + class expected_group: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), filter: R.Tensor((16, 1, 3, 3), dtype="float32"), + bias: R.Tensor((1, 16), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, filter, strides=[1, 1], + padding=[1, 1, 1, 1], dilation=[1, 1], + groups=16, data_layout="NCHW", + kernel_layout="OIHW", out_layout="NCHW", + out_dtype="void") + lv1: R.Tensor((1, 16, 1, 1), dtype="float32") = R.reshape(bias, R.shape([1, 16, 1, 1])) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.add(lv, lv1) + R.output(gv) + return gv + + binding = { + "filter": np.ones((16, 1, 3, 3), dtype="float32"), + "bias": np.ones((1, 16,), dtype="float32"), + } + verify_model_struct("conv3x3_groups0", binding, expected_group) + + +def test_deconv(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), filter: R.Tensor((16, 8, 3, 3), dtype="float32"), + bias: R.Tensor((1, 8), dtype="float32")) -> R.Tensor((4, 8, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 8, 32, 32), dtype="float32") = R.nn.conv2d_transpose(input, filter, strides=[1, 1], + padding=[1, 1, 1, 1], + output_padding=[0, 0], + dilation=[1, 1], groups=1, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + lv1: R.Tensor((1, 8, 1, 1), dtype="float32") = R.reshape(bias, R.shape([1, 8, 1, 1])) + gv: R.Tensor((4, 8, 32, 32), dtype="float32") = R.add(lv, lv1) + R.output(gv) + return gv + + binding = { + "filter": np.ones((16, 8, 3, 3), dtype="float32"), + "bias": np.ones((1, 8,), dtype="float32"), + } + verify_model_struct("deconv3x3", binding, expected) + + @I.ir_module + class expected_stride: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), filter: R.Tensor((16, 8, 3, 3), dtype="float32"), + bias: R.Tensor((1, 8), dtype="float32")) -> R.Tensor((4, 8, 64, 64), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 8, 64, 64), dtype="float32") = R.nn.conv2d_transpose(input, filter, strides=[2, 2], + padding=[0, 0, 1, 1], + output_padding=[0, 0], + dilation=[1, 1], groups=1, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + lv1: R.Tensor((1, 8, 1, 1), dtype="float32") = R.reshape(bias, R.shape([1, 8, 1, 1])) + gv: R.Tensor((4, 8, 64, 64), dtype="float32") = R.add(lv, lv1) + R.output(gv) + return gv + + verify_model_struct("deconv3x3_stride2x2", binding, expected_stride) + + @I.ir_module + class expected_group: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), filter: R.Tensor((16, 1, 3, 3), dtype="float32"), + bias: R.Tensor((1, 16), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d_transpose(input, filter, strides=[1, 1], + padding=[1, 1, 1, 1], + output_padding=[0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + lv1: R.Tensor((1, 16, 1, 1), dtype="float32") = R.reshape(bias, R.shape([1, 16, 1, 1])) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.add(lv, lv1) + R.output(gv) + return gv + + binding = { + "filter": np.ones((16, 1, 3, 3), dtype="float32"), + "bias": np.ones((1, 16,), dtype="float32"), + } + verify_model_struct("deconv3x3_groups0", binding, expected_group) + + +def test_box(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 16, 16), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 1, 3, 3), dtype="float32") = R.ones(R.shape([16, 1, 3, 3]), dtype="float32") + gv: R.Tensor((4, 16, 16, 16), dtype="float32") = R.nn.conv2d(input, lv, strides=[2, 2], + padding=[0, 0, 1, 1], dilation=[1, 1], + groups=16, data_layout="NCHW", + kernel_layout="OIHW", out_layout="NCHW", + out_dtype="void") + R.output(gv) + return gv + + verify_model_struct("box3x3", {}, expected) + + @I.ir_module + class expected_stride: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 1, 3, 3), dtype="float32") = R.ones(R.shape([16, 1, 3, 3]), dtype="float32") + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, lv, strides=[1, 1], + padding=[1, 1, 1, 1], dilation=[1, 1], + groups=16, data_layout="NCHW", + kernel_layout="OIHW", out_layout="NCHW", + out_dtype="void") + R.output(gv) + return gv + + verify_model_struct("box3x3_stride1x1", {}, expected_stride) + + +def test_debox(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 64, 64), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 1, 3, 3), dtype="float32") = R.ones(R.shape([16, 1, 3, 3]), dtype="float32") + gv: R.Tensor((4, 16, 64, 64), dtype="float32") = R.nn.conv2d_transpose(input, lv, strides=[2, 2], + padding=[0, 0, 1, 1], + output_padding=[0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + R.output(gv) + return gv + + verify_model_struct("debox3x3", {}, expected) + + @I.ir_module + class expected_stride: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 1, 3, 3), dtype="float32") = R.ones(R.shape([16, 1, 3, 3]), dtype="float32") + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d_transpose(input, lv, strides=[1, 1], + padding=[1, 1, 1, 1], + output_padding=[0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + R.output(gv) + return gv + + verify_model_struct("debox3x3_stride1x1", {}, expected_stride) + + +def test_nearest_downsample(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 16, 16), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 1, 1, 1), dtype="float32") = R.ones(R.shape([16, 1, 1, 1]), dtype="float32") + gv: R.Tensor((4, 16, 16, 16), dtype="float32") = R.nn.conv2d(input, lv, strides=[2, 2], + padding=[0, 0, 0, 0], dilation=[1, 1], + groups=16, data_layout="NCHW", + kernel_layout="OIHW", out_layout="NCHW", + out_dtype="void") + R.output(gv) + return gv + + verify_model_struct("nearest_downsample", {}, expected) + + +def test_area_downsample(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 16, 16), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 1, 2, 2), dtype="float32") = R.full(R.shape([16, 1, 2, 2]), R.const(0.25, "float32"), + dtype="float32") + gv: R.Tensor((4, 16, 16, 16), dtype="float32") = R.nn.conv2d(input, lv, strides=[2, 2], + padding=[0, 0, 0, 0], dilation=[1, 1], + groups=16, data_layout="NCHW", + kernel_layout="OIHW", out_layout="NCHW", + out_dtype="void") + R.output(gv) + return gv + + verify_model_struct("area_downsample", {}, expected) + + +def test_nearest_upsample(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 64, 64), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.image.resize2d, input, [0, 0, 0, 0], [64, 64], method="nearest_neighbor", + rounding_method="round") + gv: R.Tensor((4, 16, 64, 64), dtype="float32") = lv + R.output(gv) + return gv + + verify_model_struct("nearest_upsample", {}, expected) + + +def test_bilinear_upsample(): + @I.ir_module + class expected_s_c: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), + w1: R.Tensor((1, 1, 4, 4), dtype="float32") + ) -> R.Tensor((4, 16, 64, 64), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 1, 4, 4), dtype="float32") = R.tile(w1, + repeats=[16, 1, 1, 1]) + gv: R.Tensor((4, 16, 64, 64), dtype="float32") = R.nn.conv2d_transpose(input, lv, strides=[2, 2], + padding=[1, 1, 1, 1], + output_padding=[0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + R.output(gv) + return gv + + binding = { + "w1": np.array([[[[0.0625, 0.1875, 0.1875, 0.0625], + [0.1875, 0.5625, 0.5625, 0.1875], + [0.1875, 0.5625, 0.5625, 0.1875], + [0.0625, 0.1875, 0.1875, 0.0625]]]], dtype="float32") + } + verify_model_struct("bilinear_upsample_symmetric_constant", binding, expected_s_c) + + @I.ir_module + class expected_s_r: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 64, 64), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.image.resize2d, input, [0, 0, 0, 0], [64, 64], method="linear", + coordinate_transformation_mode="half_pixel", ) + gv: R.Tensor((4, 16, 64, 64), dtype="float32") = lv + R.output(gv) + return gv + + verify_model_struct("bilinear_upsample_symmetric_replicate", {}, expected_s_r) + + @I.ir_module + class expected_a_c: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 64, 64), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.image.resize2d, input, [0, 0, 0, 0], [64, 64], method="linear", + coordinate_transformation_mode="align_corners") + gv: R.Tensor((4, 16, 64, 64), dtype="float32") = lv + R.output(gv) + return gv + + verify_model_struct("bilinear_upsample_aligned_constant", {}, expected_a_c) + + @I.ir_module + class expected_a_r: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 64, 64), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.image.resize2d, input, [0, 0, 0, 0], [64, 64], method="linear", + coordinate_transformation_mode="align_corners") + gv: R.Tensor((4, 16, 64, 64), dtype="float32") = lv + R.output(gv) + return gv + + verify_model_struct("bilinear_upsample_aligned_replicate", {}, expected_a_r) + + @I.ir_module + class expected_as_c: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), + w1: R.Tensor((1, 1, 4, 4), dtype="float32")) -> R.Tensor((4, 16, 64, 64), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 1, 3, 3), dtype="float32") = R.tile(w1, + repeats=[16, 1, 1, 1]) + gv: R.Tensor((4, 16, 64, 64), dtype="float32") = R.nn.conv2d_transpose(input, lv, strides=[2, 2], + padding=[1, 1, 0, 0], + output_padding=[0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + R.output(gv) + return gv + + binding = { + "w1": np.array([[[[0.25, 0.5, 0.25], + [0.5, 1., 0.5], + [0.25, 0.5, 0.25]]]], dtype="float32") + } + verify_model_struct("bilinear_upsample_asymmetric_constant", binding, expected_as_c) + + # Skip because Replicate - Edge mode is currently not supported in Relax + # verify_model_struct("bilinear_upsample_asymmetric_replicate", {}, None) + + +def test_sum_reduce(): + def method(in1): + return R.sum(in1, axis=[1], keepdims=True) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 1, 32, 32) + verify_model_struct("sum_reduce_channel", {}, expected(shape, o_shape)) + + def method(in1): + return R.sum(in1, axis=[2, 3], keepdims=True) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 16, 1, 1) + verify_model_struct("sum_reduce_spatial", {}, expected(shape, o_shape)) + + +def test_max_reduce(): + def method(in1): + return R.max(in1, axis=[1], keepdims=True) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 1, 32, 32) + verify_model_struct("max_reduce_channel", {}, expected(shape, o_shape)) + + def method(in1): + return R.max(in1, axis=[2, 3], keepdims=True) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 16, 1, 1) + verify_model_struct("max_reduce_spatial", {}, expected(shape, o_shape)) + + +def test_min_reduce(): + def method(in1): + return R.min(in1, axis=[1], keepdims=True) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 1, 32, 32) + verify_model_struct("min_reduce_channel", {}, expected(shape, o_shape)) + + def method(in1): + return R.min(in1, axis=[2, 3], keepdims=True) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 16, 1, 1) + verify_model_struct("min_reduce_spatial", {}, expected(shape, o_shape)) + + +def test_argmax_reduce(): + @I.ir_module + class expected_ch: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 1, 32, 32), dtype="int64"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.argmax, input, [1], keepdims=True) + gv: R.Tensor((4, 1, 32, 32), dtype="int64") = lv + R.output(gv) + return gv + + verify_model_struct("argmax_reduce_channel", {}, expected_ch) + + @I.ir_module + class expected_sp: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 1, 1), dtype="int64"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.argmax, input, [2, 3], keepdims=True) + gv: R.Tensor((4, 16, 1, 1), dtype="int64") = lv + R.output(gv) + return gv + + verify_model_struct("argmax_reduce_spatial", {}, expected_sp) + + +def test_argmin_reduce(): + @I.ir_module + class expected_ch: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 1, 32, 32), dtype="int64"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.argmin, input, [1], keepdims=True) + gv: R.Tensor((4, 1, 32, 32), dtype="int64") = lv + R.output(gv) + return gv + + verify_model_struct("argmin_reduce_channel", {}, expected_ch) + + @I.ir_module + class expected_sp: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 1, 1), dtype="int64"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.argmin, input, [2, 3], keepdims=True) + gv: R.Tensor((4, 16, 1, 1), dtype="int64") = lv + R.output(gv) + return gv + + verify_model_struct("argmin_reduce_spatial", {}, expected_sp) + + +def test_all_reduce(): + @I.ir_module + class expected_ch: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="bool")) -> R.Tensor((4, 1, 32, 32), dtype="bool"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.all, input, [1], True) + gv: R.Tensor((4, 1, 32, 32), dtype="bool") = lv + R.output(gv) + return gv + + verify_model_struct("all_reduce_channel", {}, expected_ch) + + @I.ir_module + class expected_sp: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="bool")) -> R.Tensor((4, 16, 1, 1), dtype="bool"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.all, input, [2, 3], True) + gv: R.Tensor((4, 16, 1, 1), dtype="bool") = lv + R.output(gv) + return gv + + verify_model_struct("all_reduce_spatial", {}, expected_sp) + + +def test_any_reduce(): + @I.ir_module + class expected_ch: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="bool")) -> R.Tensor((4, 1, 32, 32), dtype="bool"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.any, input, [1], True) + gv: R.Tensor((4, 1, 32, 32), dtype="bool") = lv + R.output(gv) + return gv + + verify_model_struct("any_reduce_channel", {}, expected_ch) + + @I.ir_module + class expected_sp: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="bool")) -> R.Tensor((4, 16, 1, 1), dtype="bool"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.any, input, [2, 3], True) + gv: R.Tensor((4, 16, 1, 1), dtype="bool") = lv + R.output(gv) + return gv + + verify_model_struct("any_reduce_spatial", {}, expected_sp) + + +def test_mean_reduce(): + def method(in1): + return R.mean(in1, axis=[2, 3], keepdims=True) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 16, 1, 1) + verify_model_struct("mean_reduce_spatial", {}, expected(shape, o_shape)) + + def method(in1): + return R.mean(in1, axis=[1], keepdims=True) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 1, 32, 32) + verify_model_struct("mean_reduce_channel", {}, expected(shape, o_shape)) + + +def test_reshape(): + def met(in1): + return R.reshape(in1, R.shape(list(o_shape))) + + shape = (2, 3, 3, 3, 2) + o_shape = (2, 3, 9, 2) + expected = get_unary_mod(met) + verify_model_struct("reshape_partial", {}, expected(shape, o_shape)) + + shape = (4, 16, 1, 1) + o_shape = (4, 16) + verify_model_struct("reshape_squeeze", {}, expected(shape, o_shape)) + + shape = (4, 16, 32, 32) + o_shape = (4, 16384) + verify_model_struct("reshape_flatten", {}, expected(shape, o_shape)) + + +def test_squeeze(): + def method(in1): + return R.squeeze(in1, axis=[2, 3]) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 1, 1), (4, 16) + verify_model_struct("squeeze_spatial", {}, expected(shape, o_shape)) + + +def test_unsqueeze(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16), dtype="float32")) -> R.Tensor((4, 16, 1, 1), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 1), dtype="float32") = R.expand_dims(input, axis=[2]) + gv: R.Tensor((4, 16, 1, 1), dtype="float32") = R.expand_dims(lv, axis=[3]) + R.output(gv) + return gv + + verify_model_struct("unsqueeze", {}, expected) + + +def test_transpose(): + def method(in1): + return R.permute_dims(in1, axes=[0, 3, 1, 2]) + + expected = get_unary_mod(method) + shape, o_shape = (4, 32, 32, 16), (4, 16, 32, 32) + verify_model_struct("transpose_nhwc_to_nchw", {}, expected(shape, o_shape)) + + def method(in1): + return R.permute_dims(in1, axes=[0, 2, 3, 1]) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 32, 32, 16) + verify_model_struct("transpose_nchw_to_nhwc", {}, expected(shape, o_shape)) + + +def test_split(): + @tvm.script.ir.ir_module + class expected_ch: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tuple( + R.Tensor((4, 8, 32, 32), dtype="float32"), R.Tensor((4, 8, 32, 32), dtype="float32")): + R.func_attr({"num_input": 1}) + with (R.dataflow()): + lv: R.Tuple(R.Tensor((4, 8, 32, 32), dtype="float32"), + R.Tensor((4, 8, 32, 32), dtype="float32")) = \ + R.split(input, indices_or_sections=[8], axis=1) + lv1: R.Tensor((4, 8, 32, 32), dtype="float32") = lv[0] + lv2: R.Tensor((4, 8, 32, 32), dtype="float32") = lv[1] + gv: R.Tuple(R.Tensor((4, 8, 32, 32), dtype="float32"), + R.Tensor((4, 8, 32, 32), dtype="float32")) = lv1, lv2 + R.output(gv) + return gv + + verify_model_struct("split_channel", {}, expected_ch) + + @tvm.script.ir.ir_module + class expected_ub: + @R.function + def main(input: R.Tensor((4, 32, 3), dtype="float32")) \ + -> R.Tuple( + R.Tensor((4, 12, 3), dtype="float32"), + R.Tensor((4, 4, 3), dtype="float32"), + R.Tensor((4, 16, 3), dtype="float32") + ): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tuple(R.Tensor((4, 12, 3), dtype="float32"), + R.Tensor((4, 4, 3), dtype="float32"), + R.Tensor((4, 16, 3), dtype="float32") + ) = R.split(input, indices_or_sections=[12, 16], axis=1) + lv1: R.Tensor((4, 12, 3), dtype="float32") = lv[0] + lv2: R.Tensor((4, 4, 3), dtype="float32") = lv[1] + lv3: R.Tensor((4, 16, 3), dtype="float32") = lv[2] + gv: R.Tuple(R.Tensor((4, 12, 3), dtype="float32"), + R.Tensor((4, 4, 3), dtype="float32"), + R.Tensor((4, 16, 3), dtype="float32")) = lv1, lv2, lv3 + R.output(gv) + return gv + + verify_model_struct("split_unbalanced", {}, expected_ub) + + +def test_concat(): + def method(in1, in2): + return R.concat((in1, in2), axis=1) + + expected = get_binary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 32, 32, 32) + verify_model_struct("concat_channel", {}, expected(shape, osh=o_shape)) + + +def test_stack(): + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input1: R.Tensor((4, 16, 32, 32), dtype="float32"), + input2: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 2, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 2}) + with R.dataflow(): + lv: R.Tensor((4, 1, 16, 32, 32), dtype="float32") = R.expand_dims(input1, axis=[1]) + lv1: R.Tensor((4, 1, 16, 32, 32), dtype="float32") = R.expand_dims(input2, axis=[1]) + gv: R.Tensor((4, 2, 16, 32, 32), dtype="float32") = R.concat((lv, lv1), axis=1) + R.output(gv) + return gv + + verify_model_struct("stack", {}, expected) + + +def test_unstack(): + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 3, 16), dtype="float32")) -> \ + R.Tuple(R.Tensor((4, 16), dtype="float32"), + R.Tensor((4, 16), dtype="float32"), + R.Tensor((4, 16), dtype="float32")): + R.func_attr({"num_input": 1}) + with (R.dataflow()): + lv: R.Tuple(R.Tensor((4, 1, 16), dtype="float32"), + R.Tensor((4, 1, 16), dtype="float32"), + R.Tensor((4, 1, 16), dtype="float32")) = \ + R.split(input, indices_or_sections=[1, 2], axis=1) + lv1: R.Tensor((4, 1, 16), dtype="float32") = lv[0] + lv2: R.Tensor((4, 16), dtype="float32") = R.squeeze(lv1, axis=[1]) + lv3: R.Tensor((4, 1, 16), dtype="float32") = lv[1] + lv4: R.Tensor((4, 16), dtype="float32") = R.squeeze(lv3, axis=[1]) + lv5: R.Tensor((4, 1, 16), dtype="float32") = lv[2] + lv6: R.Tensor((4, 16), dtype="float32") = R.squeeze(lv5, axis=[1]) + gv: R.Tuple(R.Tensor((4, 16), dtype="float32"), R.Tensor((4, 16), dtype="float32"), + R.Tensor((4, 16), dtype="float32")) = lv2, lv4, lv6 + R.output(gv) + return gv + + verify_model_struct("unstack", {}, expected) + + +def test_slice(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> \ + R.Tensor((4, 16, 30, 28), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + gv: R.Tensor((4, 16, 30, 28), dtype="float32") = \ + R.strided_slice(input, + (R.prim_value(2), R.prim_value(3)), + (R.prim_value(1), R.prim_value(2)), + (R.prim_value(-1), R.prim_value(-2)), + (R.prim_value(1), R.prim_value(1)), assume_inbound=False) + R.output(gv) + return gv + + verify_model_struct("slice", {}, expected) + + @I.ir_module + class expected_stride: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> \ + R.Tensor((4, 4, 12, 29), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + gv: R.Tensor((4, 4, 12, 29), dtype="float32") = \ + R.strided_slice(input, (R.prim_value(1), R.prim_value(2), R.prim_value(3)), + (R.prim_value(5), R.prim_value(16), R.prim_value(2)), + (R.prim_value(1), R.prim_value(4), R.prim_value(-1)), + (R.prim_value(-1), R.prim_value(-1), R.prim_value(1)), + assume_inbound=False) + R.output(gv) + return gv + + verify_model_struct("slice_strides", {}, expected_stride) + + +def test_pad(): + @I.ir_module + class expected01: + @R.function + def main(input: R.Tensor((1, 16, 32, 32), dtype="float32")) -> R.Tensor((1, 16, 33, 33), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.nn.mirror_pad, input, [0,0,0,0], [0,0,1,1], "REFLECT") + gv: R.Tensor((1, 16, 33, 33), dtype="float32") = lv + R.output(gv) + return gv + + verify_model_struct("pad_0-1_reflect", {}, expected01) + + @I.ir_module + class expected10: + @R.function + def main(input: R.Tensor((1, 16, 32, 32), dtype="float32")) -> R.Tensor((1, 16, 33, 33), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.nn.mirror_pad, input, [0,0,1,1], [0,0,0,0], "REFLECT") + gv: R.Tensor((1, 16, 33, 33), dtype="float32") = lv + R.output(gv) + return gv + + verify_model_struct("pad_1-0_reflect", {}, expected10) + + @I.ir_module + class expected11: + @R.function + def main(input: R.Tensor((1, 16, 32, 32), dtype="float32")) -> R.Tensor((1, 16, 34, 34), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.nn.mirror_pad, input, [0,0,1,1], [0,0,1,1], "REFLECT") + gv: R.Tensor((1, 16, 34, 34), dtype="float32") = lv + R.output(gv) + return gv + + verify_model_struct("pad_1-1_reflect", {}, expected11) + + def method_wr(pw): + def method(in1): + return R.nn.pad(in1, pad_value=R.const(0, "float32"), pad_width=pw, pad_mode="constant") + + return method + + expected = get_unary_mod(method_wr([0, 0, 0, 0, 0, 1, 0, 1])) + shape, o_shape = (1, 16, 32, 32), (1, 16, 33, 33) + verify_model_struct("pad_0-1_constant", {}, expected(shape, o_shape)) + + expected = get_unary_mod(method_wr([0, 0, 0, 0, 1, 0, 1, 0])) + verify_model_struct("pad_1-0_constant", {}, expected(shape, o_shape)) + + expected = get_unary_mod(method_wr([0, 0, 0, 0, 1, 1, 1, 1])) + o_shape = (1, 16, 34, 34) + verify_model_struct("pad_1-1_constant", {}, expected(shape, o_shape)) + + # Replicate - Edge mode is currently not supported in TVM relax + # verify_model_struct("pad_0-1_replicate", {}, None) + # verify_model_struct("pad_1-0_replicate", {}, None) + # verify_model_struct("pad_1-1_replicate", {}, None) + + +def test_tile(): + def method(in1): + return R.tile(in1, repeats=[1, 1, 3, 3, ]) + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 16, 96, 96) + verify_model_struct("tile_spatial", {}, expected(shape, o_shape)) + + def method(in1): + return R.tile(in1, repeats=[1, 16]) + + expected = get_unary_mod(method) + shape, o_shape = (16, 1), (16, 16) + verify_model_struct("tile_channel", {}, expected(shape, o_shape)) + + def method(in1): + return R.tile(in1, repeats=[16, 1]) + + expected = get_unary_mod(method) + shape, o_shape = (1, 16), (16, 16) + verify_model_struct("tile_batch", {}, expected(shape, o_shape)) + + +def test_matmul(): + def method(in1, in2): + return R.matmul(in1, in2, out_dtype="void") + + shape1, shape2 = (4, 16), (16, 4) + expected = get_binary_mod(method) + verify_model_struct("matmul_2d", {}, expected(shape1, shape2, (4, 4))) + + shape1 = (4, 16, 32, 32) + verify_model_struct("matmul_4d", {}, expected(shape1)) + + def get_custom_mod(dtype="float32"): + def _appl_shape(sh1, sh2, osh, axes): + global shape1, shape2, o_shape + shape1, shape2, o_shape = sh1, sh2, osh + sh1_t = tuple([shape1[i] for i in axes]) + + @tvm.script.ir.ir_module + class expected: + @R.function + def main( + lhs: R.Tensor(shape1, dtype), + rhs: R.Tensor(shape2, dtype) + ) -> R.Tensor(o_shape, "float32"): + R.func_attr({"num_input": 2}) + with R.dataflow(): + lv: R.Tensor(sh1_t, dtype=dtype) = R.permute_dims(lhs, axes=axes) + gv: R.Tensor(o_shape, dtype="float32") = R.matmul(lv, rhs, out_dtype="void") + R.output(gv) + return gv + + return expected + + return _appl_shape + + shape1 = (4, 16) + expected = get_custom_mod() + verify_model_struct("matmul_2d_transpose", {}, expected(shape1, shape1, (16, 16), [1, 0])) + + shape1 = (4, 16, 32, 32) + verify_model_struct("matmul_4d_transpose", {}, expected(shape1, shape1, shape1, [0, 1, 3, 2])) + + +def test_sigmoid(): + expected = get_unary_mod(R.sigmoid) + shape = (4, 16) + verify_model_struct("sigmoid_2d_standalone", {}, expected(shape)) + + shape = (4, 16, 32, 32) + verify_model_struct("sigmoid_4d_standalone", {}, expected(shape)) + + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), + w1: R.Tensor((4, 1, 1, 1), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.sigmoid(lv) + R.output(gv) + return gv + + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("sigmoid", binding, expected) + + +def test_relu(): + expected = get_unary_mod(R.nn.relu) + shape = (4, 16) + verify_model_struct("relu_2d_standalone", {}, expected(shape)) + + shape = (4, 16, 32, 32) + verify_model_struct("relu_4d_standalone", {}, expected(shape)) + + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), + w1: R.Tensor((4, 1, 1, 1), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.relu(lv) + R.output(gv) + return gv + + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("relu", binding, expected) + + +def test_prelu(): + def get_custom_mod(dtype="float32"): + def _appl_shape(sh1, sh2, osh, axes): + global shape1, shape2, o_shape + shape1, shape2, o_shape = sh1, sh2, osh + expanded = [1] * len(sh1) + expanded[1] = sh2[0] + + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input1: R.Tensor(shape1, dtype="float32"), + input2: R.Tensor(shape2, dtype="float32")) -> R.Tensor(o_shape, dtype="float32"): + R.func_attr({"num_input": 2}) + with R.dataflow(): + lv: R.Tensor(shape1, dtype="bool") = R.less(input1, R.const(0, "float32")) + lv1: R.Tensor(expanded, dtype="float32") = R.expand_dims(input2, axis=axes) + lv2: R.Tensor(o_shape, dtype="float32") = R.multiply(lv1, input1) + gv: R.Tensor(o_shape, dtype="float32") = R.where(lv, lv2, input1) + R.output(gv) + return gv + + return expected + + return _appl_shape + + expected = get_custom_mod() + shape1, shape2 = (16, 16), (16,) + verify_model_struct("prelu_2d_standalone", {}, expected(shape1, shape2, shape1, [0])) + + shape1, shape2 = (16, 16, 32, 32), (16,) + verify_model_struct("prelu_4d_standalone", {}, expected(shape1, shape2, shape1, [0, 2, 3])) + + @I.ir_module + class expected: + @R.function + def main(input1: R.Tensor((16, 16, 32, 32), dtype="float32"), + input2: R.Tensor((16,), dtype="float32"), + w1: R.Tensor((16, 1, 1, 1), dtype="float32")) -> R.Tensor((16, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 2}) + with R.dataflow(): + lv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.nn.conv2d(input1, + w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + lv1: R.Tensor((16, 16, 32, 32), dtype="bool") = R.less(lv, R.const(0, "float32")) + lv2: R.Tensor((1, 16, 1, 1), dtype="float32") = R.expand_dims(input2, axis=[0, 2, 3]) + lv3: R.Tensor((16, 16, 32, 32), dtype="float32") = R.multiply(lv2, lv) + gv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.where(lv1, lv3, lv) + R.output(gv) + return gv + + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("prelu", binding, expected) + + +def test_leaky_relu(): + alpha = 0.5 + + def method(in1): + return R.nn.leakyrelu(in1, alpha) + + expected = get_unary_mod(method) + shape = (16, 16) + + verify_model_struct("leaky_relu_2d_standalone", {}, expected(shape)) + + shape = (16, 16, 32, 32) + verify_model_struct("leaky_relu_4d_standalone", {}, expected(shape)) + + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((16, 16, 32, 32), dtype="float32"), + w1: R.Tensor((16, 1, 1, 1), dtype="float32")) -> R.Tensor((16, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + gv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.nn.leakyrelu(lv, alpha=0.5) + R.output(gv) + return gv + + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("leaky_relu", binding, expected) + + +def test_elu(): + def get_custom_mod(): + def _appl_shape(shape): + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input: R.Tensor(shape, dtype="float32")) -> R.Tensor(shape, dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor(shape, dtype="float32") = R.exp(input) + lv1: R.Tensor(shape, dtype="bool") = R.less(input, R.const(0, "float32")) + lv2: R.Tensor(shape, dtype="float32") = R.subtract(lv, R.const(1, "float32")) + lv3: R.Tensor(shape, dtype="float32") = R.multiply(R.const(1, "float32"), lv2) + gv: R.Tensor(shape, dtype="float32") = R.where(lv1, lv3, input) + R.output(gv) + return gv + + return expected + + return _appl_shape + + expected = get_custom_mod() + shape = (16, 16) + + verify_model_struct("elu_2d_standalone", {}, expected(shape)) + + shape = (16, 16, 32, 32) + verify_model_struct("elu_4d_standalone", {}, expected(shape)) + + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((16, 16, 32, 32), dtype="float32"), + w1: R.Tensor((16, 1, 1, 1), dtype="float32")) -> R.Tensor((16, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + lv1: R.Tensor((16, 16, 32, 32), dtype="float32") = R.exp(lv) + lv2: R.Tensor((16, 16, 32, 32), dtype="bool") = R.less(lv, R.const(0, "float32")) + lv3: R.Tensor((16, 16, 32, 32), dtype="float32") = R.subtract(lv1, R.const(1, "float32")) + lv4: R.Tensor((16, 16, 32, 32), dtype="float32") = R.multiply(R.const(1, "float32"), lv3) + gv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.where(lv2, lv4, lv) + R.output(gv) + return gv + + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("elu", binding, expected) + + +def test_selu(): + def get_custom_mod(): + def _appl_shape(shape): + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input: R.Tensor(shape, dtype="float32")) -> R.Tensor(shape, dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor(shape, dtype="float32") = R.exp(input) + lv1: R.Tensor(shape, dtype="bool") = R.less(input, R.const(0, "float32")) + lv2: R.Tensor(shape, dtype="float32") = R.subtract(lv, R.const(1, "float32")) + lv3: R.Tensor(shape, dtype="float32") = R.multiply(R.const(1.6732631921768188, "float32"), + lv2) + lv4: R.Tensor(shape, dtype="float32") = R.where(lv1, lv3, input) + gv: R.Tensor(shape, dtype="float32") = R.multiply(R.const(1.0507010221481323, "float32"), + lv4) + R.output(gv) + return gv + + return expected + + return _appl_shape + + expected = get_custom_mod() + shape = (16, 16) + verify_model_struct("selu_2d_standalone", {}, expected(shape)) + + shape = (16, 16, 32, 32) + verify_model_struct("selu_4d_standalone", {}, expected(shape)) + + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((16, 16, 32, 32), dtype="float32"), + w1: R.Tensor((16, 1, 1, 1), dtype="float32")) -> R.Tensor((16, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + lv1: R.Tensor((16, 16, 32, 32), dtype="float32") = R.exp(lv) + lv2: R.Tensor((16, 16, 32, 32), dtype="bool") = R.less(lv, R.const(0, "float32")) + lv3: R.Tensor((16, 16, 32, 32), dtype="float32") = R.subtract(lv1, R.const(1, "float32")) + lv4: R.Tensor((16, 16, 32, 32), dtype="float32") = R.multiply(R.const(1.6732631921768188, "float32"), + lv3) + lv5: R.Tensor((16, 16, 32, 32), dtype="float32") = R.where(lv2, lv4, lv) + gv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.multiply(R.const(1.0507010221481323, "float32"), + lv5) + R.output(gv) + return gv + + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("selu", binding, expected) + + +def test_gelu(): + expected = get_unary_mod(R.nn.gelu) + shape = (16, 16) + verify_model_struct("gelu_2d_standalone", {}, expected(shape)) + + shape = (16, 16, 32, 32) + verify_model_struct("gelu_4d_standalone", {}, expected(shape)) + + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((16, 16, 32, 32), dtype="float32"), + w1: R.Tensor((16, 1, 1, 1), dtype="float32")) -> R.Tensor((16, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + gv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.nn.gelu(lv) + R.output(gv) + return gv + + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("gelu", binding, expected) + + +def test_silu(): + def get_custom_mod(): + def _appl_shape(shape): + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input: R.Tensor(shape, dtype="float32")) -> R.Tensor(shape, dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor(shape, dtype="float32") = R.sigmoid(input) + gv: R.Tensor(shape, dtype="float32") = R.multiply(input, lv) + R.output(gv) + return gv + + return expected + + return _appl_shape + + expected = get_custom_mod() + shape = (16, 16) + + verify_model_struct("silu_2d_standalone", {}, expected(shape)) + + shape = (16, 16, 32, 32) + verify_model_struct("silu_4d_standalone", {}, expected(shape)) + + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((16, 16, 32, 32), dtype="float32"), + w1: R.Tensor((16, 1, 1, 1), dtype="float32")) -> R.Tensor((16, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + lv1: R.Tensor((16, 16, 32, 32), dtype="float32") = R.sigmoid(lv) + gv: R.Tensor((16, 16, 32, 32), dtype="float32") = R.multiply(lv, lv1) + R.output(gv) + return gv + + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("silu", binding, expected) + + +def test_softmax(): + def method(in1): + return R.nn.softmax(in1, axis=1) + + expected = get_unary_mod(method) + shape = (4, 16) + verify_model_struct("softmax_2d_standalone", {}, expected(shape)) + + shape = (4, 16, 32, 32) + verify_model_struct("softmax_4d_standalone", {}, expected(shape)) + + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), + w1: R.Tensor((16, 1, 1, 1), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.softmax(lv, axis=1) + R.output(gv) + return gv + + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("softmax", binding, expected) + + +def test_softplus(): + def get_custom_mod(): + def _appl_shape(shape): + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input: R.Tensor(shape, dtype="float32")) -> R.Tensor(shape, dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor(shape, dtype="float32") = R.exp(input) + lv1: R.Tensor(shape, dtype="float32") = R.add(lv, R.const(1, "float32")) + gv: R.Tensor(shape, dtype="float32") = R.log(lv1) + R.output(gv) + return gv + + return expected + + return _appl_shape + + expected = get_custom_mod() + shape = (4, 16) + + verify_model_struct("softplus_2d_standalone", {}, expected(shape)) + + shape = (4, 16, 32, 32) + verify_model_struct("softplus_4d_standalone", {}, expected(shape)) + + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), + w1: R.Tensor((16, 1, 1, 1), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, w1, + strides=[1, 1], padding=[0, 0, 0, 0], + dilation=[1, 1], groups=16, + data_layout="NCHW", kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + lv1: R.Tensor((4, 16, 32, 32), dtype="float32") = R.exp(lv) + lv2: R.Tensor((4, 16, 32, 32), dtype="float32") = R.add(lv1, R.const(1, "float32")) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.log(lv2) + R.output(gv) + return gv + + binding = {"w1": np.ones([16, 1, 1, 1], dtype="float32")} + verify_model_struct("softplus", binding, expected) + + +def test_linear(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16), dtype="float32"), weights: R.Tensor((32, 16), dtype="float32"), + bias: R.Tensor((1, 32), dtype="float32")) -> R.Tensor((4, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 32), dtype="float32") = R.permute_dims(weights, axes=[1, 0]) + lv1: R.Tensor((4, 32), dtype="float32") = R.matmul(input, lv, out_dtype="void") + lv2: R.Tensor((1, 32), dtype="float32") = R.reshape(bias, R.shape([1, 32])) + gv: R.Tensor((4, 32), dtype="float32") = R.add(lv1, lv2) + R.output(gv) + return gv + + binding = { + "weights": np.ones([32, 16], dtype="float32"), + "bias": np.ones([1, 32], dtype="float32") + } + verify_model_struct("linear", binding, expected) + + @I.ir_module + class expected_nb: + @R.function + def main(input: R.Tensor((4, 16), dtype="float32"), weights: R.Tensor((32, 16), dtype="float32")) -> R.Tensor( + (4, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 32), dtype="float32") = R.permute_dims(weights, axes=[1, 0]) + gv: R.Tensor((4, 32), dtype="float32") = R.matmul(input, lv, out_dtype="void") + R.output(gv) + return gv + + binding = {"weights": np.ones([32, 16], dtype="float32")} + verify_model_struct("linear_nobias", binding, expected_nb) + + +def test_separable_conv(): + @tvm.script.ir.ir_module + class expected_wb: + @R.function + def main(input1: R.Tensor((4, 8, 32, 32), dtype="float32"), + plane_filter: R.Tensor((8, 1, 3, 3), dtype="float32"), + point_filter: R.Tensor((16, 8, 1, 1), dtype="float32"), + bias: R.Tensor((1, 16,), dtype="float32"), + ) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 8, 32, 32), dtype="float32") = R.nn.conv2d(input1, + plane_filter, + strides=[1, 1], + padding=[1, 1, 1, 1], + dilation=[1, 1], groups=8, + data_layout="NCHW", + kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + lv1: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(lv, + point_filter, + strides=[1, 1], + padding=[0, 0, 0, 0], + dilation=[1, 1], groups=1, + data_layout="NCHW", + kernel_layout="OIHW", + out_layout="NCHW", + out_dtype="void") + lv2: R.Tensor((1, 16, 1, 1), dtype="float32") = R.reshape(bias, + R.shape([1, 16, 1, 1])) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.add(lv1, lv2) + R.output(gv) + return gv + + binding = { + "plane_filter": np.ones((8, 1, 3, 3), dtype="float32"), + "point_filter": np.ones((16, 8, 1, 1), dtype="float32"), + "bias": np.ones((1, 16,), dtype="float32"), + } + verify_model_struct("separable_conv3x3", binding, expected_wb) + + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input1: R.Tensor((4, 8, 32, 32), dtype="float32"), + plane_filter: R.Tensor((8, 1, 3, 3), dtype="float32"), + point_filter: R.Tensor((16, 8, 1, 1), dtype="float32"), + ) -> R.Tensor((4, 16, 16, 16), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 8, 16, 16), dtype="float32") = R.nn.conv2d(input1, + plane_filter, + strides=[2, 2], + padding=[0, 0, 1, 1], + dilation=[1, 1], + groups=8, + data_layout="NCHW", + kernel_layout="OIHW", + out_layout="NCHW", out_dtype="void") + gv: R.Tensor((4, 16, 16, 16), dtype="float32") = R.nn.conv2d(lv, + point_filter, + strides=[1, 1], + padding=[0, 0, 0, 0], + dilation=[1, 1], + groups=1, + data_layout="NCHW", + kernel_layout="OIHW", + out_layout="NCHW", + out_dtype="void") + R.output(gv) + return gv + + binding = { + "plane_filter": np.ones((8, 1, 3, 3), dtype="float32"), + "point_filter": np.ones((16, 8, 1, 1), dtype="float32"), + } + verify_model_struct("separable_conv3x3_with_attrs", binding, expected) + + +def test_separable_deconv(): + @tvm.script.ir.ir_module + class expected_wb: + @R.function + def main(input1: R.Tensor((4, 16, 32, 32), dtype="float32"), + plane_filter: R.Tensor((8, 1, 3, 3), dtype="float32"), + point_filter: R.Tensor((16, 8, 1, 1), dtype="float32"), + bias: R.Tensor((1, 8,), dtype="float32"), + ) -> R.Tensor((4, 8, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 8, 32, 32), dtype="float32") = R.nn.conv2d_transpose(input1, point_filter, + strides=[1, 1], + padding=[0, 0, 0, 0], + output_padding=[0, 0], + dilation=[1, 1], groups=1, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + lv1: R.Tensor((4, 8, 32, 32), dtype="float32") = R.nn.conv2d_transpose(lv, plane_filter, + strides=[1, 1], + padding=[1, 1, 1, 1], + output_padding=[0, 0], + dilation=[1, 1], + groups=8, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + lv2: R.Tensor((1, 16, 1, 1), dtype="float32") = R.reshape(bias, + R.shape([1, 8, 1, 1])) + gv: R.Tensor((4, 8, 32, 32), dtype="float32") = R.add(lv1, lv2) + R.output(gv) + return gv + + binding = { + "plane_filter": np.ones((8, 1, 3, 3), dtype="float32"), + "point_filter": np.ones((16, 8, 1, 1), dtype="float32"), + "bias": np.ones((1, 8,), dtype="float32"), + } + verify_model_struct("separable_deconv3x3", binding, expected_wb) + + @tvm.script.ir.ir_module + class expected: + @R.function + def main(input1: R.Tensor((4, 16, 32, 32), dtype="float32"), + plane_filter: R.Tensor((8, 1, 3, 3), dtype="float32"), + point_filter: R.Tensor((16, 8, 1, 1), dtype="float32"), + ) -> R.Tensor((4, 8, 64, 64), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 8, 32, 32), dtype="float32") = R.nn.conv2d_transpose(input1, + point_filter, + strides=[1, 1], + padding=[0, 0, 0, 0], + output_padding=[0, 0], + dilation=[1, 1], groups=1, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + gv: R.Tensor((4, 8, 64, 64), dtype="float32") = R.nn.conv2d_transpose(lv, + plane_filter, + strides=[2, 2], + padding=[0, 0, 1, 1], + output_padding=[0, 0], + dilation=[1, 1], groups=8, + data_layout="NCHW", + kernel_layout="IOHW", + out_layout="NCHW", + out_dtype="void") + R.output(gv) + return gv + + binding = { + "plane_filter": np.ones((8, 1, 3, 3), dtype="float32"), + "point_filter": np.ones((16, 8, 1, 1), dtype="float32"), + } + verify_model_struct("separable_deconv3x3_with_attrs", binding, expected) + + +def test_max_pool(): + def method(in1): + return R.nn.max_pool2d(in1, + pool_size=[3, 3], + strides=[2, 2], + dilation=[1, 1], + padding=[0, 0, 1, 1], + ceil_mode=False, + count_include_pad=False, + layout="NCHW", + out_layout="NCHW") + + expected = get_unary_mod(method) + shape, o_shape = (4, 16, 32, 32), (4, 16, 16, 16) + + verify_model_struct("max_pool3x3", {}, expected(shape, o_shape)) + + def method(in1): + return R.nn.max_pool2d(in1, + pool_size=[3, 3], + strides=[1, 1], + dilation=[1, 1], + padding=[1, 1, 1, 1], + ceil_mode=False, + count_include_pad=False, layout="NCHW", out_layout="NCHW") + + expected = get_unary_mod(method) + shape = (4, 16, 32, 32) + verify_model_struct("max_pool3x3_stride1x1", {}, expected(shape)) + + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 16, 16), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 33, 33), dtype="float32") = R.nn.pad(input, pad_value=R.const(0, "float32"), + pad_width=[0, 0, 0, 0, 0, 1, 0, 1], + pad_mode="constant") + gv: R.Tensor((4, 16, 16, 16), dtype="float32") = R.nn.max_pool2d(lv, pool_size=[3, 3], strides=[2, 2], + dilation=[1, 1], padding=[0, 0, 0, 0], + ceil_mode=False, + count_include_pad=False, layout="NCHW", + out_layout="NCHW") + R.output(gv) + return gv + + verify_model_struct("max_pool3x3_constant-border", {}, expected) + + +def test_avg_pool(): + shape, o_shape = (4, 16, 32, 32), (4, 16, 16, 16) + + def method(in1): + return R.nn.avg_pool2d(in1, + pool_size=[3, 3], + strides=[2, 2], + dilation=[1, 1], + padding=[0, 0, 1, 1], + ceil_mode=False, + count_include_pad=True, + layout="NCHW", + out_layout="NCHW") + + expected = get_unary_mod(method) + verify_model_struct("avg_pool3x3", {}, expected(shape, o_shape)) + + def method(in1): + return R.nn.avg_pool2d(in1, + pool_size=[3, 3], + strides=[1, 1], + dilation=[1, 1], + padding=[1, 1, 1, 1], + ceil_mode=False, + count_include_pad=True, + layout="NCHW", + out_layout="NCHW") + + expected = get_unary_mod(method) + verify_model_struct("avg_pool3x3_stride1x1", {}, expected(shape)) + + def method(in1): + return R.nn.avg_pool2d(in1, + pool_size=[3, 3], + strides=[2, 2], + dilation=[1, 1], + padding=[0, 0, 1, 1], + ceil_mode=False, + count_include_pad=False, + layout="NCHW", + out_layout="NCHW") + + expected = get_unary_mod(method) + verify_model_struct("avg_pool3x3_ignore-border", {}, expected(shape, o_shape)) + + +def test_rms_pool(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 16, 16), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.power(input, R.const(2, "float32")) + lv1: R.Tensor((4, 16, 16, 16), dtype="float32") = R.nn.avg_pool2d(lv, pool_size=[3, 3], strides=[2, 2], + dilation=[1, 1], padding=[0, 0, 1, 1], + ceil_mode=False, + count_include_pad=True, layout="NCHW", + out_layout="NCHW") + gv: R.Tensor((4, 16, 16, 16), dtype="float32") = R.sqrt(lv1) + R.output(gv) + return gv + + verify_model_struct("rms_pool3x3", {}, expected) + + +def test_local_response_normalization(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv = R.emit_te(topi.nn.lrn, input, 5, 1, 1e-5, 0.75, 1.0) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = lv + R.output(gv) + return gv + + verify_model_struct("local_response_normalization", {}, expected) + + +def test_local_mean_normalization(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 1, 3, 3), dtype="float32") = R.full(R.shape([16, 1, 3, 3]), + R.const(0.1111111119389534, "float32"), + dtype="float32") + lv1: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, lv, strides=[1, 1], + padding=[1, 1, 1, 1], dilation=[1, 1], + groups=16, data_layout="NCHW", + kernel_layout="OIHW", out_layout="NCHW", + out_dtype="void") + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.subtract(input, lv1) + R.output(gv) + return gv + + verify_model_struct("local_mean_normalization", {}, expected) + + +def test_local_variance_normalization(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.power(input, R.const(2, "float32")) + lv1: R.Tensor((16, 1, 3, 3), dtype="float32") = R.full(R.shape([16, 1, 3, 3]), + R.const(0.1111111119389534, "float32"), + dtype="float32") + lv2: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(lv, lv1, strides=[1, 1], + padding=[1, 1, 1, 1], dilation=[1, 1], + groups=16, data_layout="NCHW", + kernel_layout="OIHW", out_layout="NCHW", + out_dtype="void") + lv3: R.Tensor((4, 16, 32, 32), dtype="float32") = R.sqrt(lv2) + lv4: R.Tensor((4, 16, 32, 32), dtype="float32") = R.add(lv3, R.const(1, "float32")) + lv5: R.Tensor((4, 16, 32, 32), dtype="float32") = R.maximum(lv4, + R.const(9.9999997473787516e-06, "float32")) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.divide(input, lv5) + R.output(gv) + return gv + + verify_model_struct("local_variance_normalization", {}, expected) + + +def test_local_contrast_normalization(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16, 1, 3, 3), dtype="float32") = R.full(R.shape([16, 1, 3, 3]), + R.const(0.1111111119389534, "float32"), + dtype="float32") + lv1: R.Tensor((4, 16, 32, 32), dtype="float32") = R.nn.conv2d(input, lv, strides=[1, 1], + padding=[1, 1, 1, 1], dilation=[1, 1], + groups=16, data_layout="NCHW", + kernel_layout="OIHW", out_layout="NCHW", + out_dtype="void") + lv2: R.Tensor((4, 16, 32, 32), dtype="float32") = R.subtract(input, lv1) + lv3: R.Tensor((4, 16, 32, 32), dtype="float32") = R.power(lv2, R.const(2, "float32")) + lv4: R.Tensor((16, 4, 32, 32), dtype="float32") = R.permute_dims(lv3, axes=[1, 0, 2, 3]) + lv5: R.Tensor((16, 4, 32, 32), dtype="float32") = R.nn.avg_pool2d(lv4, pool_size=[3, 3], strides=[1, 1], + dilation=[1, 1], padding=[1, 1, 1, 1], + ceil_mode=False, + count_include_pad=True, layout="NCHW", + out_layout="NCHW") + lv6: R.Tensor((4, 16, 32, 32), dtype="float32") = R.permute_dims(lv5, axes=[1, 0, 2, 3]) + lv7: R.Tensor((4, 16, 32, 32), dtype="float32") = R.sqrt(lv6) + lv8: R.Tensor((4, 16, 32, 32), dtype="float32") = R.add(lv7, R.const(1, "float32")) + lv9: R.Tensor((4, 16, 32, 32), dtype="float32") = R.maximum(lv8, + R.const(9.9999997473787516e-06, "float32")) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.divide(lv2, lv9) + R.output(gv) + return gv + + verify_model_struct("local_contrast_normalization", {}, expected) + + +def test_l1_normalization(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.abs(input) + lv1: R.Tensor((4, 1, 32, 32), dtype="float32") = R.sum(lv, axis=[1], keepdims=True) + lv2: R.Tensor((4, 1, 32, 32), dtype="float32") = R.add(lv1, R.const(1, "float32")) + lv3: R.Tensor((4, 1, 32, 32), dtype="float32") = R.maximum(lv2, + R.const(9.9999997473787516e-06, "float32")) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.divide(input, lv3) + R.output(gv) + return gv + + verify_model_struct("l1_normalization", {}, expected) + + +def test_l2_normalization(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.power(input, R.const(2, "float32")) + lv1: R.Tensor((4, 1, 32, 32), dtype="float32") = R.sum(lv, axis=[1], keepdims=True) + lv2: R.Tensor((4, 1, 32, 32), dtype="float32") = R.sqrt(lv1) + lv3: R.Tensor((4, 1, 32, 32), dtype="float32") = R.add(lv2, R.const(0, "float32")) + lv4: R.Tensor((4, 1, 32, 32), dtype="float32") = R.maximum(lv3, + R.const(0.0010000000474974513, "float32")) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = R.divide(input, lv4) + R.output(gv) + return gv + + verify_model_struct("l2_normalization", {}, expected) + + +def test_batch_norm(): + @I.ir_module + class expected: + @R.function + def main(input: R.Tensor((4, 16, 32, 32), dtype="float32"), mean: R.Tensor((1, 16), dtype="float32"), + variance: R.Tensor((1, 16), dtype="float32"), offset: R.Tensor((1, 16), dtype="float32"), + scale: R.Tensor((1, 16), dtype="float32")) -> R.Tensor((4, 16, 32, 32), dtype="float32"): + R.func_attr({"num_input": 1}) + with R.dataflow(): + lv: R.Tensor((16,), dtype="float32") = R.squeeze(scale, axis=[0]) + lv1: R.Tensor((16,), dtype="float32") = R.squeeze(offset, axis=[0]) + lv2: R.Tensor((16,), dtype="float32") = R.squeeze(mean, axis=[0]) + lv3: R.Tensor((16,), dtype="float32") = R.squeeze(variance, axis=[0]) + lv4 = R.emit_te(topi.nn.batch_norm, input, lv, lv1, lv2, lv3, 1, 1e-3) + gv: R.Tensor((4, 16, 32, 32), dtype="float32") = lv4[0] + R.output(gv) + return gv + + binding = { + "mean": np.ones((1, 16), dtype="float32"), + "variance": np.ones((1, 16), dtype="float32"), + "offset": np.ones((1, 16), dtype="float32"), + "scale": np.ones((1, 16), dtype="float32") + } + verify_model_struct("batch_norm", binding, expected) diff --git a/tests/scripts/task_python_nightly.sh b/tests/scripts/task_python_nightly.sh index f8423602bd4b7..0536f9681623a 100755 --- a/tests/scripts/task_python_nightly.sh +++ b/tests/scripts/task_python_nightly.sh @@ -27,3 +27,6 @@ make cython3 find . -type f -path "*.pyc" | xargs rm -f run_pytest cython python-topi-nightly tests/python/topi/nightly + +echo "Running relay NNEF frontend execution test..." +run_pytest cython python-frontend-nnef tests/python/frontend/nnef