You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by lu...@apache.org on 2022/09/05 08:27:13 UTC

[tvm] branch main updated: [ETHOSN] Use pytest parameterization for integration tests (#12688)

This is an automated email from the ASF dual-hosted git repository.

lukhut pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 5dcf62288b [ETHOSN] Use pytest parameterization for integration tests (#12688)
5dcf62288b is described below

commit 5dcf62288b1d998df74ac36e48fcfe2424a0def8
Author: Luke Hutton <lu...@arm.com>
AuthorDate: Mon Sep 5 09:27:03 2022 +0100

    [ETHOSN] Use pytest parameterization for integration tests (#12688)
    
    Using pytest parameterization helps identify the particular parameter combinations that are failing for a given test. Additionally, it can be useful when parallelizing the tests. This commit makes sure that "trials" have been replaced by parameterization as well as completing a general cleanup.
---
 tests/python/contrib/test_ethosn/test_conv2d.py    | 399 +++++++++++----------
 .../contrib/test_ethosn/test_depth_to_space.py     |  59 ++-
 .../contrib/test_ethosn/test_fullyconnected.py     |  95 ++---
 tests/python/contrib/test_ethosn/test_pooling.py   |  77 ++--
 tests/python/contrib/test_ethosn/test_relu.py      |  71 ++--
 tests/python/contrib/test_ethosn/test_resize.py    |  42 +--
 tests/python/contrib/test_ethosn/test_sigmoid.py   |  82 ++---
 tests/python/contrib/test_ethosn/test_split.py     |  59 ++-
 .../python/contrib/test_ethosn/test_topologies.py  |  73 ++--
 9 files changed, 492 insertions(+), 465 deletions(-)

diff --git a/tests/python/contrib/test_ethosn/test_conv2d.py b/tests/python/contrib/test_ethosn/test_conv2d.py
index ffe66f0d2b..4026f8267d 100644
--- a/tests/python/contrib/test_ethosn/test_conv2d.py
+++ b/tests/python/contrib/test_ethosn/test_conv2d.py
@@ -18,11 +18,14 @@
 """Arm(R) Ethos(TM)-N integration conv2d tests"""
 
 import math
+
 import numpy as np
 import pytest
+
 import tvm
 from tvm import relay
 from tvm.testing import requires_ethosn
+
 from . import infrastructure as tei
 
 
@@ -99,12 +102,12 @@ def _get_model(
         padding=p if pad in ("attr", "both") else (0, 0, 0, 0),
         out_dtype="int32",
     )
-    b = tvm.nd.array(
+    bias_data = tvm.nd.array(
         np.random.randint(
             np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=(out_channels,), dtype="int32"
         )
     )
-    biasc = relay.const(b, "int32")
+    biasc = relay.const(bias_data, "int32")
     bias = relay.nn.bias_add(conv, biasc, axis=3)
     if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray):
         req_input_sc = [sc * input_sc for sc in kernel_sc.numpy()]
@@ -118,209 +121,222 @@ def _get_model(
         relay.const(output_zp, "int32"),  # output zero point
         out_dtype=dtype,
     )
-    params = {"w": weights_array, "b": b}
+    params = {"w": weights_array, "b": bias_data}
     return req, params
 
 
 @requires_ethosn
-@pytest.mark.parametrize("depthwise", [False, True])
-@pytest.mark.parametrize("dtype", ["uint8", "int8"])
-def test_conv2d(dtype, depthwise):
+@pytest.mark.parametrize(
+    "dtype,qnn_per_channel", [("uint8", False), ("int8", False), ("int8", True)]
+)
+@pytest.mark.parametrize("pad,stride", [("attr", (2, 2)), ("none", (2, 2)), ("op", (1, 1))])
+@pytest.mark.parametrize(
+    "shape,out_channels,kernel_size",
+    [
+        [(1, 17, 20, 26), 4, (3, 1)],
+        [(1, 9, 20, 30), 7, (1, 5)],
+        [(1, 21, 21, 22), 8, (2, 2)],
+    ],
+)
+def test_conv2d(
+    dtype,
+    shape,
+    out_channels,
+    kernel_size,
+    pad,
+    stride,
+    qnn_per_channel,
+):
     """Compare Conv2D output with TVM."""
-
-    trials = [
-        [(1, 17, 20, 26), 4, 3, 1, "attr", (2, 2), (1, 1), False],
-        [(1, 30, 27, 30), 5, 5, 3, "none", (1, 1), (1, 1), False],
-        [(1, 30, 27, 30), 5, 5, 3, "none", (1, 1), (1, 1), dtype == "int8"],
-        [(1, 14, 28, 11), 6, 2, 2, "op", (2, 2), (1, 1), False],
-        [(1, 9, 20, 30), 7, 1, 5, "none", (1, 1), (1, 1), False],
-        [(1, 21, 21, 22), 8, 5, 1, "attr", (2, 2), (1, 1), False],
-        [(1, 21, 21, 22), 8, 5, 1, "attr", (2, 2), (1, 1), dtype == "int8"],
-        [(1, 21, 25, 29), 9, 2, 5, "op", (1, 1), (1, 1), False],
-        [(1, 21, 25, 29), 9, 2, 5, "op", (1, 1), (1, 1), dtype == "int8"],
-        [(1, 31, 28, 15), 10, 1, 2, "attr", (2, 2), (1, 1), False],
-        [(1, 21, 21, 8), 11, 3, 3, "none", (1, 1), (1, 1), False],
-        [(1, 5, 11, 6), 12, 5, 2, "op", (2, 2), (1, 1), False],
-        [(1, 12, 7, 18), 13, 1, 3, "op", (1, 1), (1, 1), False],
-        [(1, 24, 6, 26), 14, 3, 5, "none", (2, 2), (1, 1), False],
-        [(1, 19, 24, 16), 15, 2, 1, "attr", (1, 1), (1, 1), False],
-    ]
-
     np.random.seed(0)
-    for shape, out_channels, kernel_h, kernel_w, pad, stride, dilation, qnn_per_channel in trials:
-        if depthwise:
-            out_channels = shape[3]
-            groups = out_channels
-            kernel_w = kernel_h
-            weight_format = "HWOI"
-            stride = (1, 1) if kernel_w == 1 else (2, 2)
-        else:
-            groups = 1
-            weight_format = "HWIO"
 
-        outputs = []
-        inputs = {
-            "a": tvm.nd.array(
-                np.random.randint(
-                    np.iinfo(dtype).min,
-                    np.iinfo(dtype).max + 1,
-                    size=shape,
-                    dtype=dtype,
-                )
-            ),
-        }
-        input_zp = np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
-        input_sc = np.random.random() * 2
-        if qnn_per_channel:
-            kernel_sc = tvm.nd.array(
-                np.random.uniform(low=0, high=2, size=(out_channels,)).astype(np.float32)
+    dilation = (1, 1)
+    groups = 1
+    weight_format = "HWIO"
+
+    outputs = []
+    inputs = {
+        "a": tvm.nd.array(
+            np.random.randint(
+                np.iinfo(dtype).min,
+                np.iinfo(dtype).max + 1,
+                size=shape,
+                dtype=dtype,
             )
-        else:
-            kernel_sc = np.random.random() * 2
-        kernel_zp = (
-            0 if dtype == "int8" else np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
-        )
-        output_zp, output_sc = tei.get_conv2d_qnn_params(
-            dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_h, kernel_w, shape[3]
-        )
-        model, params = _get_model(
-            shape,
-            kernel_h,
-            kernel_w,
-            input_zp,
-            input_sc,
-            kernel_zp,
-            kernel_sc,
-            output_zp,
-            output_sc,
-            pad,
-            stride,
-            dilation,
-            groups,
-            dtype,
-            out_channels,
-            weight_format,
+        ),
+    }
+    input_zp = np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
+    input_sc = np.random.random() * 2
+    if qnn_per_channel:
+        kernel_sc = tvm.nd.array(
+            np.random.uniform(low=0, high=2, size=(out_channels,)).astype(np.float32)
         )
-        for npu in [False, True]:
-            mod = tei.make_module(model, params)
-            outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
+    else:
+        kernel_sc = np.random.random() * 2
+    kernel_zp = (
+        0 if dtype == "int8" else np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
+    )
+    output_zp, output_sc = tei.get_conv2d_qnn_params(
+        dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_size[0], kernel_size[1], shape[3]
+    )
+    model, params = _get_model(
+        shape,
+        kernel_size[0],
+        kernel_size[1],
+        input_zp,
+        input_sc,
+        kernel_zp,
+        kernel_sc,
+        output_zp,
+        output_sc,
+        pad,
+        stride,
+        dilation,
+        groups,
+        dtype,
+        out_channels,
+        weight_format,
+    )
+    for npu in [False, True]:
+        mod = tei.make_module(model, params)
+        outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
 
-        tei.verify(outputs, dtype, 1)
+    tei.verify(outputs, dtype, 1)
 
 
 @requires_ethosn
-def test_conv2d_failure():
-    """Check Conv2D error messages."""
+@pytest.mark.parametrize(
+    "dtype,qnn_per_channel", [("uint8", False), ("int8", False), ("int8", True)]
+)
+@pytest.mark.parametrize("pad,stride", [("attr", (2, 2)), ("none", (2, 2)), ("op", (1, 1))])
+@pytest.mark.parametrize(
+    "shape,kernel_size",
+    [
+        [(1, 17, 20, 28), (3, 3)],
+        [(1, 9, 20, 30), (5, 5)],
+        [(1, 21, 21, 22), (2, 2)],
+    ],
+)
+def test_conv2d_depthwise(
+    dtype,
+    shape,
+    kernel_size,
+    pad,
+    stride,
+    qnn_per_channel,
+):
+    """Compare Conv2D output with TVM."""
+    np.random.seed(0)
 
-    trials = [
-        (
-            (1, 4, 4, 4),
-            1,
-            1,
-            0,
-            1024,
-            0,
-            1024,
-            0,
-            1,
-            "none",
-            (1, 1),
-            (1, 1),
-            1,
-            "uint8",
-            8,
-            "HWIO",
-            "Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)",
+    dilation = (1, 1)
+    out_channels = shape[3]
+    groups = out_channels
+    weight_format = "HWOI"
+
+    outputs = []
+    inputs = {
+        "a": tvm.nd.array(
+            np.random.randint(
+                np.iinfo(dtype).min,
+                np.iinfo(dtype).max + 1,
+                size=shape,
+                dtype=dtype,
+            )
         ),
+    }
+    input_zp = np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
+    input_sc = np.random.random() * 2
+    if qnn_per_channel:
+        kernel_sc = tvm.nd.array(
+            np.random.uniform(low=0, high=2, size=(out_channels,)).astype(np.float32)
+        )
+    else:
+        kernel_sc = np.random.random() * 2
+    kernel_zp = (
+        0 if dtype == "int8" else np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max)
+    )
+    output_zp, output_sc = tei.get_conv2d_qnn_params(
+        dtype, input_zp, input_sc, kernel_zp, kernel_sc, kernel_size[0], kernel_size[1], shape[3]
+    )
+    model, params = _get_model(
+        shape,
+        kernel_size[0],
+        kernel_size[1],
+        input_zp,
+        input_sc,
+        kernel_zp,
+        kernel_sc,
+        output_zp,
+        output_sc,
+        pad,
+        stride,
+        dilation,
+        groups,
+        dtype,
+        out_channels,
+        weight_format,
+    )
+    for npu in [False, True]:
+        mod = tei.make_module(model, params)
+        outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))
+
+    tei.verify(outputs, dtype, 1)
+
+
+@requires_ethosn
+@pytest.mark.parametrize(
+    "shape,pad,stride,dilation,err_msg",
+    [
         (
             (1, 4, 4, 4),
-            2,
-            2,
-            0,
-            1,
-            0,
-            1,
-            0,
-            2,
             "both",
             (1, 1),
             (1, 1),
-            1,
-            "uint8",
-            8,
-            "HWIO",
             "both op and attr padding exist, must be either op/attr only or no padding",
         ),
         (
             (1, 4, 4, 4),
-            1,
-            1,
-            0,
-            1,
-            0,
-            1,
-            0,
-            2,
             "none",
             (1, 1, 1),
             (1, 1),
-            1,
-            "uint8",
-            8,
-            "HWIO",
             "stride size=3, stride size must = 2",
         ),
         (
             (1, 4, 4, 4),
-            1,
-            1,
-            0,
-            1,
-            0,
-            1,
-            0,
-            2,
             "none",
             (1, 1),
             (2, 1),
-            1,
-            "uint8",
-            8,
-            "HWIO",
             "dilation=[2, 1], dilation must = [1, 1]",
         ),
         (
             (2, 4, 4, 4),
-            1,
-            1,
-            0,
-            1,
-            0,
-            1,
-            0,
-            2,
             "none",
             (1, 1),
             (1, 1),
-            1,
-            "uint8",
-            8,
-            "HWIO",
             "batch size=2, batch size must = 1",
         ),
-    ]
-
+    ],
+)
+def test_conv2d_failure(shape, pad, stride, dilation, err_msg):
+    """Check Conv2D error messages."""
     np.random.seed(0)
-    for (
+
+    kernel_size = (2, 2)
+    groups = 1
+    dtype = "uint8"
+    out_channels = 8
+    weight_format = "HWIO"
+
+    model, _ = _get_model(
         shape,
-        kernel_h,
-        kernel_w,
-        input_zp,
-        input_sc,
-        kernel_zp,
-        kernel_sc,
-        output_zp,
-        output_sc,
+        kernel_size[0],
+        kernel_size[1],
+        0,
+        1,
+        0,
+        1,
+        0,
+        1,
         pad,
         stride,
         dilation,
@@ -328,26 +344,43 @@ def test_conv2d_failure():
         dtype,
         out_channels,
         weight_format,
-        err_msg,
-    ) in trials:
-        model, _ = _get_model(
-            shape,
-            kernel_h,
-            kernel_w,
-            input_zp,
-            input_sc,
-            kernel_zp,
-            kernel_sc,
-            output_zp,
-            output_sc,
-            pad,
-            stride,
-            dilation,
-            groups,
-            dtype,
-            out_channels,
-            weight_format,
-        )
-        model = tei.make_ethosn_composite(model, "ethos-n.qnn_conv2d")
-        mod = tei.make_ethosn_partition(model)
-        tei.test_error(mod, {}, err_msg)
+    )
+    model = tei.make_ethosn_composite(model, "ethos-n.qnn_conv2d")
+    mod = tei.make_ethosn_partition(model)
+    tei.test_error(mod, {}, err_msg)
+
+
+@requires_ethosn
+def test_conv2d_out_of_range_scale():
+    """Check Conv2D scale out of range error."""
+    np.random.seed(0)
+
+    input_sc = 1024
+    kernel_sc = 1024
+    output_sc = 1
+
+    model, _ = _get_model(
+        (1, 4, 4, 4),
+        1,
+        1,
+        0,
+        input_sc,
+        0,
+        kernel_sc,
+        0,
+        output_sc,
+        "none",
+        (1, 1),
+        (1, 1),
+        1,
+        "uint8",
+        8,
+        "HWIO",
+    )
+    model = tei.make_ethosn_composite(model, "ethos-n.qnn_conv2d")
+    mod = tei.make_ethosn_partition(model)
+
+    expected_err_msg = (
+        "Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)"
+    )
+    tei.test_error(mod, {}, expected_err_msg)
diff --git a/tests/python/contrib/test_ethosn/test_depth_to_space.py b/tests/python/contrib/test_ethosn/test_depth_to_space.py
index c071fe00f2..732932d8f3 100644
--- a/tests/python/contrib/test_ethosn/test_depth_to_space.py
+++ b/tests/python/contrib/test_ethosn/test_depth_to_space.py
@@ -33,37 +33,35 @@ def _get_model(shape, block, dtype, layout):
 
 @requires_ethosn
 @pytest.mark.parametrize("dtype", ["uint8", "int8"])
-def test_depth_to_space(dtype):
-    """Compare Depth To Space output with TVM."""
-
-    trials = [
+@pytest.mark.parametrize(
+    "shape",
+    [
         (1, 16, 16, 16),
         (1, 64, 32, 16),
-    ]
-
+    ],
+)
+def test_depth_to_space(dtype, shape):
+    """Compare Depth To Space output with TVM."""
     np.random.seed(0)
-    for shape in trials:
-        inputs = {
-            "a": tvm.nd.array(
-                np.random.randint(
-                    np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype
-                )
-            )
-        }
-        outputs = []
-        for npu in [False, True]:
-            model = _get_model(shape, 2, dtype, "NHWC")
-            mod = tei.make_module(model, {})
-            outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
 
-        tei.verify(outputs, dtype, 1)
+    inputs = {
+        "a": tvm.nd.array(
+            np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
+        )
+    }
+    outputs = []
+    for npu in [False, True]:
+        model = _get_model(shape, 2, dtype, "NHWC")
+        mod = tei.make_module(model, {})
+        outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
 
+    tei.verify(outputs, dtype, 1)
 
-@requires_ethosn
-def test_depth_to_space_failure():
-    """Check Depth To Space error messages."""
 
-    trials = [
+@requires_ethosn
+@pytest.mark.parametrize(
+    "shape,block,dtype,layout,err_msg",
+    [
         ((2, 16, 16, 16), 2, "uint8", "NHWC", "batch size=2, batch size must = 1"),
         (
             (1, 16, 16, 16),
@@ -74,9 +72,10 @@ def test_depth_to_space_failure():
         ),
         ((1, 16, 16, 16), 4, "uint8", "NHWC", "Only block size of 2 is supported"),
         ((1, 16, 16, 16), 2, "uint8", "NCHW", "Input layer must be NHWC or NHWCB"),
-    ]
-
-    for shape, block, dtype, layout, err_msg in trials:
-        model = _get_model(shape, block, dtype, layout)
-        mod = tei.make_ethosn_partition(model)
-        tei.test_error(mod, {}, err_msg)
+    ],
+)
+def test_depth_to_space_failure(shape, block, dtype, layout, err_msg):
+    """Check Depth To Space error messages."""
+    model = _get_model(shape, block, dtype, layout)
+    mod = tei.make_ethosn_partition(model)
+    tei.test_error(mod, {}, err_msg)
diff --git a/tests/python/contrib/test_ethosn/test_fullyconnected.py b/tests/python/contrib/test_ethosn/test_fullyconnected.py
index d5510bb79d..d38b2528c7 100644
--- a/tests/python/contrib/test_ethosn/test_fullyconnected.py
+++ b/tests/python/contrib/test_ethosn/test_fullyconnected.py
@@ -114,62 +114,63 @@ def test_fullyconnected(shape, out_channels, dtype, input_zp, input_sc, kernel_z
 
 
 @requires_ethosn
-def test_fullyconnected_failure():
-    """Check Fully Connected error messages."""
-
-    trials = [
-        (
-            (1, 64),
-            (1, 64),
-            0,
-            1024,
-            0,
-            1024,
-            0,
-            1,
-            "uint8",
-            "Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)",
-        ),
+@pytest.mark.parametrize(
+    "shape,weight_shape,err_msg",
+    [
         (
             (1, 1, 1, 64),
             (1, 64),
-            0,
-            1,
-            0,
-            1,
-            0,
-            1,
-            "uint8",
             "Weights tensor must have I dimension equal to the number"
             " of channels of the input tensor.;",
         ),
-        ((1024, 64), (1, 64), 0, 1, 0, 1, 0, 1, "uint8", "batch size=1024, batch size must = 1;"),
-    ]
-
+        ((1024, 64), (1, 64), "batch size=1024, batch size must = 1;"),
+    ],
+)
+def test_fullyconnected_failure(shape, weight_shape, err_msg):
+    """Check Fully Connected error messages."""
     np.random.seed(0)
-    for (
+
+    dtype = "uint8"
+
+    model, _ = _get_model(
         shape,
         weight_shape,
-        input_zp,
+        0,
+        1,
+        0,
+        1,
+        0,
+        1,
+        dtype,
+    )
+    model = tei.make_ethosn_composite(model, "ethos-n.qnn_fc")
+    mod = tei.make_ethosn_partition(model)
+    tei.test_error(mod, {}, err_msg)
+
+
+@requires_ethosn
+def test_fullyconnected_scale_out_of_range():
+    """Check Fully Connected out of range scale error message."""
+    np.random.seed(0)
+
+    input_sc = 1024
+    kernel_sc = 1024
+    output_sc = 1
+
+    model, _ = _get_model(
+        (1, 64),
+        (1, 64),
+        0,
         input_sc,
-        kernel_zp,
+        0,
         kernel_sc,
-        output_zp,
+        0,
         output_sc,
-        dtype,
-        err_msg,
-    ) in trials:
-        model, _ = _get_model(
-            shape,
-            weight_shape,
-            input_zp,
-            input_sc,
-            kernel_zp,
-            kernel_sc,
-            output_zp,
-            output_sc,
-            dtype,
-        )
-        model = tei.make_ethosn_composite(model, "ethos-n.qnn_fc")
-        mod = tei.make_ethosn_partition(model)
-        tei.test_error(mod, {}, err_msg)
+        "uint8",
+    )
+    model = tei.make_ethosn_composite(model, "ethos-n.qnn_fc")
+    mod = tei.make_ethosn_partition(model)
+    expected_error_msg = (
+        "Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)"
+    )
+    tei.test_error(mod, {}, expected_error_msg)
diff --git a/tests/python/contrib/test_ethosn/test_pooling.py b/tests/python/contrib/test_ethosn/test_pooling.py
index e1c7358f71..1e0487d767 100644
--- a/tests/python/contrib/test_ethosn/test_pooling.py
+++ b/tests/python/contrib/test_ethosn/test_pooling.py
@@ -38,91 +38,88 @@ def _get_model(shape, typef, sizes, strides, pads, layout, dtype):
 
 @requires_ethosn
 @pytest.mark.parametrize("dtype", ["uint8", "int8"])
-def test_pooling(dtype):
+@pytest.mark.parametrize(
+    "shape,typef,size,stride,pad",
+    [
+        ((1, 8, 8, 8), relay.nn.max_pool2d, (2, 2), (2, 2), (0, 0, 0, 0)),
+        ((1, 9, 9, 9), relay.nn.max_pool2d, (3, 3), (2, 2), (0, 0, 0, 0)),
+        ((1, 8, 8, 8), relay.nn.avg_pool2d, (3, 3), (1, 1), (1, 1, 1, 1)),
+    ],
+)
+def test_pooling(dtype, shape, typef, size, stride, pad):
     """Compare Pooling output with TVM."""
+    np.random.seed(0)
 
-    trials = [
-        ((1, 8, 8, 8), relay.nn.max_pool2d, (2, 2), (2, 2), (0, 0, 0, 0), "NHWC"),
-        ((1, 9, 9, 9), relay.nn.max_pool2d, (3, 3), (2, 2), (0, 0, 0, 0), "NHWC"),
-        ((1, 8, 8, 8), relay.nn.avg_pool2d, (3, 3), (1, 1), (1, 1, 1, 1), "NHWC"),
-    ]
+    layout = "NHWC"
 
-    np.random.seed(0)
-    for shape, typef, size, stride, pad, layout in trials:
-        inputs = {
-            "a": tvm.nd.array(
-                np.random.randint(
-                    low=np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=shape, dtype=dtype
-                )
-            ),
-        }
-        outputs = []
-        model = _get_model(shape, typef, size, stride, pad, layout, dtype)
-        for npu in [False, True]:
-            mod = tei.make_module(model, {})
-            outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
+    inputs = {
+        "a": tvm.nd.array(
+            np.random.randint(
+                low=np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=shape, dtype=dtype
+            )
+        ),
+    }
+    outputs = []
+    model = _get_model(shape, typef, size, stride, pad, layout, dtype)
+    for npu in [False, True]:
+        mod = tei.make_module(model, {})
+        outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
 
-        tei.verify(outputs, dtype, 1)
+    tei.verify(outputs, dtype, 1)
 
 
 @requires_ethosn
-def test_pooling_failure():
-    """Check Pooling error messages."""
-
-    trials = [
+@pytest.mark.parametrize(
+    "shape,size,stride,layout,dtype,err_msg",
+    [
         (
             (2, 8, 8, 8),
-            relay.nn.max_pool2d,
             (2, 2),
             (2, 2),
-            (0, 0, 0, 0),
             "NHWC",
             "uint8",
             "batch size=2, batch size must = 1",
         ),
         (
             (1, 8, 8, 8),
-            relay.nn.max_pool2d,
             (2, 2),
             (2, 2),
-            (0, 0, 0, 0),
             "NHWC",
             "int16",
             "dtype='int16', dtype must be either uint8, int8 or int32",
         ),
         (
             (1, 8, 8, 8),
-            relay.nn.max_pool2d,
             (2, 2),
             (2, 2),
-            (0, 0, 0, 0),
             "NCHW",
             "uint8",
             "data format=NCHW, data format must = NHWC",
         ),
         (
             (1, 8, 8, 8),
-            relay.nn.max_pool2d,
             (2, 2),
             (2, 2, 2),
-            (0, 0, 0, 0),
             "NHWC",
             "uint8",
             "stride size=3, stride size must = 2",
         ),
         (
             (1, 8, 8, 8),
-            relay.nn.max_pool2d,
             (2, 2, 2),
             (2, 2),
-            (0, 0, 0, 0),
             "NHWC",
             "uint8",
             "dimensions=3, dimensions must = 2",
         ),
-    ]
+    ],
+)
+def test_pooling_failure(shape, size, stride, layout, dtype, err_msg):
+    """Check Pooling error messages."""
+
+    typef = relay.nn.max_pool2d
+    pad = (0, 0, 0, 0)
 
-    for shape, typef, size, stride, pad, layout, dtype, err_msg in trials:
-        model = _get_model(shape, typef, size, stride, pad, layout, dtype)
-        mod = tei.make_ethosn_partition(model)
-        tei.test_error(mod, {}, err_msg)
+    model = _get_model(shape, typef, size, stride, pad, layout, dtype)
+    mod = tei.make_ethosn_partition(model)
+    tei.test_error(mod, {}, err_msg)
diff --git a/tests/python/contrib/test_ethosn/test_relu.py b/tests/python/contrib/test_ethosn/test_relu.py
index f56a1cd7ad..db1894931d 100644
--- a/tests/python/contrib/test_ethosn/test_relu.py
+++ b/tests/python/contrib/test_ethosn/test_relu.py
@@ -33,53 +33,50 @@ def _get_model(shape, dtype, a_min, a_max):
 
 
 @requires_ethosn
-@pytest.mark.parametrize("dtype", ["uint8", "int8"])
-def test_relu(dtype):
-    """Compare Relu output with TVM."""
-
-    trials = [
+@pytest.mark.parametrize(
+    "shape,a_min,a_max,dtype",
+    [
         ((1, 4, 4, 4), 65, 178, "uint8"),
         ((1, 8, 4, 2), 1, 254, "uint8"),
-        ((1, 16), 12, 76, "uint8"),
-        ((1, 4, 4, 4), 65, 125, "int8"),
         ((1, 8, 4, 2), -100, 100, "int8"),
         ((1, 16), -120, -20, "int8"),
-    ]
-
+    ],
+)
+def test_relu(dtype, shape, a_min, a_max):
+    """Compare Relu output with TVM."""
     np.random.seed(0)
-    for shape, a_min, a_max, trial_dtype in trials:
-        if trial_dtype == dtype:
-            inputs = {
-                "a": tvm.nd.array(
-                    np.random.randint(
-                        low=np.iinfo(dtype).min,
-                        high=np.iinfo(dtype).max + 1,
-                        size=shape,
-                        dtype=dtype,
-                    )
-                ),
-            }
-            outputs = []
-            for npu in [False, True]:
-                model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
-                mod = tei.make_module(model, {})
-                outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
 
-            tei.verify(outputs, dtype, 1)
+    inputs = {
+        "a": tvm.nd.array(
+            np.random.randint(
+                low=np.iinfo(dtype).min,
+                high=np.iinfo(dtype).max + 1,
+                size=shape,
+                dtype=dtype,
+            )
+        ),
+    }
+    outputs = []
+    for npu in [False, True]:
+        model = _get_model(inputs["a"].shape, dtype, a_min, a_max)
+        mod = tei.make_module(model, {})
+        outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
 
+    tei.verify(outputs, dtype, 1)
 
-@requires_ethosn
-def test_relu_failure():
-    """Check Relu error messages."""
 
-    trials = [
+@requires_ethosn
+@pytest.mark.parametrize(
+    "shape,dtype,a_min,a_max,err_msg",
+    [
         ((1, 4, 4, 4, 4), "uint8", 65, 78, "dimensions=5, dimensions must be <= 4"),
         ((1, 8, 4, 2), "int16", 1, 254, "dtype='int16', dtype must be either uint8, int8 or int32"),
         ((1, 8, 4, 2), "uint8", 254, 1, "Relu has lower bound > upper bound"),
         ((2, 2, 2, 2), "uint8", 1, 63, "batch size=2, batch size must = 1; "),
-    ]
-
-    for shape, dtype, a_min, a_max, err_msg in trials:
-        model = _get_model(shape, dtype, a_min, a_max)
-        mod = tei.make_ethosn_partition(model)
-        tei.test_error(mod, {}, err_msg)
+    ],
+)
+def test_relu_failure(shape, dtype, a_min, a_max, err_msg):
+    """Check Relu error messages."""
+    model = _get_model(shape, dtype, a_min, a_max)
+    mod = tei.make_ethosn_partition(model)
+    tei.test_error(mod, {}, err_msg)
diff --git a/tests/python/contrib/test_ethosn/test_resize.py b/tests/python/contrib/test_ethosn/test_resize.py
index 2cc641e63b..b437ad1e54 100644
--- a/tests/python/contrib/test_ethosn/test_resize.py
+++ b/tests/python/contrib/test_ethosn/test_resize.py
@@ -97,10 +97,9 @@ def test_resize(dtype, shape, size, coordinate_transformation_mode, rounding_met
 
 
 @requires_ethosn
-def test_resize_failure():
-    """Check Resize error messages."""
-
-    trials = [
+@pytest.mark.parametrize(
+    "size,err_msg",
+    [
         (
             (30, 20),
             "Requested height isn't supported",
@@ -117,22 +116,25 @@ def test_resize_failure():
             (20, 19),
             "Requested width and height must be both even or both odd",
         ),
-    ]
+    ],
+)
+def test_resize_failure(size, err_msg):
+    """Check Resize error messages."""
+
     dtype = "int8"
     zp_min = np.iinfo(dtype).min
 
-    for size, err_msg in trials:
-        model = _get_model(
-            shape=(1, 10, 10, 1),
-            dtype=dtype,
-            size=size,
-            input_zp=zp_min + 128,
-            input_sc=0.0784314,
-            output_zp=zp_min + 128,
-            output_sc=0.0784314,
-            coordinate_transformation_mode="half_pixel",
-            rounding_method="round_prefer_ceil",
-        )
-        model = tei.make_ethosn_composite(model, "ethos-n.qnn_resize")
-        mod = tei.make_ethosn_partition(model)
-        tei.test_error(mod, {}, err_msg)
+    model = _get_model(
+        shape=(1, 10, 10, 1),
+        dtype=dtype,
+        size=size,
+        input_zp=zp_min + 128,
+        input_sc=0.0784314,
+        output_zp=zp_min + 128,
+        output_sc=0.0784314,
+        coordinate_transformation_mode="half_pixel",
+        rounding_method="round_prefer_ceil",
+    )
+    model = tei.make_ethosn_composite(model, "ethos-n.qnn_resize")
+    mod = tei.make_ethosn_partition(model)
+    tei.test_error(mod, {}, err_msg)
diff --git a/tests/python/contrib/test_ethosn/test_sigmoid.py b/tests/python/contrib/test_ethosn/test_sigmoid.py
index ae8c301ff0..bddd160491 100644
--- a/tests/python/contrib/test_ethosn/test_sigmoid.py
+++ b/tests/python/contrib/test_ethosn/test_sigmoid.py
@@ -44,59 +44,59 @@ def _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype):
 
 @requires_ethosn
 @pytest.mark.parametrize("dtype", ["uint8", "int8"])
-def test_sigmoid(dtype):
-    """Compare Sigmoid output with TVM."""
-
-    trials = [
+@pytest.mark.parametrize(
+    "shape",
+    [
         (1, 16, 16, 16),
         (1, 8, 8),
-    ]
-
+    ],
+)
+def test_sigmoid(dtype, shape):
+    """Compare Sigmoid output with TVM."""
     np.random.seed(0)
-    for shape in trials:
-        inputs = {
-            "a": tvm.nd.array(
-                np.random.randint(
-                    np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype
-                )
-            ),
-        }
-        outputs = []
-        for npu in [False, True]:
-            for _ in range(1, 2):
-                if dtype == "uint8":
-                    input_zp = 0
-                    output_zp = 0
-                else:
-                    input_zp = 127
-                    output_zp = -128
-                model = _get_model(shape, input_zp, 0.02, output_zp, 1.0 / 256.0, dtype)
-                mod = tei.make_module(model, [])
-                outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
 
-        tei.verify(outputs, dtype, 1)
+    inputs = {
+        "a": tvm.nd.array(
+            np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
+        ),
+    }
+    outputs = []
+    for npu in [False, True]:
+        for _ in range(1, 2):
+            if dtype == "uint8":
+                input_zp = 0
+                output_zp = 0
+            else:
+                input_zp = 127
+                output_zp = -128
+            model = _get_model(shape, input_zp, 0.02, output_zp, 1.0 / 256.0, dtype)
+            mod = tei.make_module(model, [])
+            outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
 
+    tei.verify(outputs, dtype, 1)
 
-@requires_ethosn
-@pytest.mark.parametrize("dtype", ["uint8", "int8"])
-def test_sigmoid_failure(dtype):
-    """Check Sigmoid error messages."""
 
-    test_zp = 0 if dtype == "uint8" else -128
-    trials = [
-        ((2, 4, 4, 4), 64, 0.2, test_zp, 1 / 256, "batch size=2, batch size must = 1"),
+@requires_ethosn
+@pytest.mark.parametrize(
+    "shape,input_zp,input_sc,output_zp,output_sc,err_msg",
+    [
+        ((2, 4, 4, 4), 64, 0.2, 0, 1 / 256, "batch size=2, batch size must = 1"),
         (
             (1, 4, 4, 4),
             64,
             0.2,
             3,
             1,
-            f"output quantization params=(3, 1), must = ({test_zp}, 1/256)",
+            "output quantization params=(3, 1), must = (0, 1/256)",
         ),
-    ]
+    ],
+)
+def test_sigmoid_failure(shape, input_zp, input_sc, output_zp, output_sc, err_msg):
+    """Check Sigmoid error messages."""
+
+    dtype = "uint8"
 
-    for shape, input_zp, input_sc, output_zp, output_sc, err_msg in trials:
-        model = _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype)
-        model = tei.make_ethosn_composite(model, "ethos-n.qnn_sigmoid")
-        mod = tei.make_ethosn_partition(model)
-        tei.test_error(mod, {}, err_msg)
+    model = _get_model(shape, input_zp, input_sc, output_zp, output_sc, dtype)
+    model = tei.make_ethosn_composite(model, "ethos-n.qnn_sigmoid")
+    mod = tei.make_ethosn_partition(model)
+    tei.test_error(mod, {}, err_msg)
diff --git a/tests/python/contrib/test_ethosn/test_split.py b/tests/python/contrib/test_ethosn/test_split.py
index 7f8787afe9..afbc45a080 100644
--- a/tests/python/contrib/test_ethosn/test_split.py
+++ b/tests/python/contrib/test_ethosn/test_split.py
@@ -36,39 +36,37 @@ def _get_model(shape, dtype, splits, axis):
 @pytest.mark.skip("Split is not supported by the 3.0.1 version of the driver stack.")
 @requires_ethosn
 @pytest.mark.parametrize("dtype", ["uint8", "int8"])
-def test_split(dtype):
-    """Compare Split output with TVM."""
-
-    trials = [
+@pytest.mark.parametrize(
+    "shape,splits,axis",
+    [
         ((1, 16, 16, 32), (2, 7, 10), 2),
         ((1, 12, 8, 16), 3, 1),
-    ]
-
+    ],
+)
+def test_split(dtype, shape, splits, axis):
+    """Compare Split output with TVM."""
     np.random.seed(0)
-    for shape, splits, axis in trials:
-        outputs = []
-        inputs = {
-            "a": tvm.nd.array(
-                np.random.randint(
-                    np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype
-                )
-            )
-        }
-        for npu in [False, True]:
-            model = _get_model(shape, dtype, splits, axis)
-            mod = tei.make_module(model, {})
-            output_count = splits if isinstance(splits, int) else len(splits) + 1
-            outputs.append(tei.build_and_run(mod, inputs, output_count, {}, npu=npu))
+
+    outputs = []
+    inputs = {
+        "a": tvm.nd.array(
+            np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
+        )
+    }
+    for npu in [False, True]:
+        model = _get_model(shape, dtype, splits, axis)
+        mod = tei.make_module(model, {})
+        output_count = splits if isinstance(splits, int) else len(splits) + 1
+        outputs.append(tei.build_and_run(mod, inputs, output_count, {}, npu=npu))
 
         tei.verify(outputs, dtype, 0)
 
 
 @pytest.mark.skip("Split is not supported by the 3.0.1 version of the driver stack.")
 @requires_ethosn
-def test_split_failure():
-    """Check Split error messages."""
-
-    trials = [
+@pytest.mark.parametrize(
+    "shape,dtype,splits,axis,err_msg",
+    [
         ((1, 4, 4, 4, 4), "uint8", 4, 2, "dimensions=5, dimensions must be <= 4;"),
         ((1, 4, 4, 4), "int16", 4, 2, "dtype='int16', dtype must be either uint8, int8 or int32;"),
         ((2, 4, 4, 4), "uint8", 4, 2, "batch size=2, batch size must = 1;"),
@@ -81,9 +79,10 @@ def test_split_failure():
             "Split along the channels dimension (axis 3) requires all output sizes "
             "(specified in splitInfo.m_Sizes) to be multiples of 16;",
         ),
-    ]
-
-    for shape, dtype, splits, axis, err_msg in trials:
-        model = _get_model(shape, dtype, splits, axis)
-        mod = tei.make_ethosn_partition(model)
-        tei.test_error(mod, {}, err_msg)
+    ],
+)
+def test_split_failure(shape, dtype, splits, axis, err_msg):
+    """Check Split error messages."""
+    model = _get_model(shape, dtype, splits, axis)
+    mod = tei.make_ethosn_partition(model)
+    tei.test_error(mod, {}, err_msg)
diff --git a/tests/python/contrib/test_ethosn/test_topologies.py b/tests/python/contrib/test_ethosn/test_topologies.py
index 19d7accadb..dc6a2ed086 100644
--- a/tests/python/contrib/test_ethosn/test_topologies.py
+++ b/tests/python/contrib/test_ethosn/test_topologies.py
@@ -237,8 +237,15 @@ def test_output_order_different_sizes(dtype):
 
 @requires_ethosn
 @pytest.mark.parametrize("dtype", ["uint8", "int8"])
-def test_split_with_asym_concats(dtype):
+@pytest.mark.parametrize(
+    "shape,splits,axis",
+    [
+        ((1, 16, 16, 32), (2, 7, 10), 2),
+    ],
+)
+def test_split_with_asym_concats(dtype, shape, splits, axis):
     """Test a model with split and contatenates."""
+    np.random.seed(0)
 
     def get_model(shape, dtype, splits, axis):
         a = relay.var("a", shape=shape, dtype=dtype)
@@ -263,51 +270,43 @@ def test_split_with_asym_concats(dtype):
         )
         return relay.Tuple((con2, con1))
 
-    trials = [
-        ((1, 16, 16, 32), (2, 7, 10), 2),
-    ]
-
-    np.random.seed(0)
-    for shape, splits, axis in trials:
-        outputs = []
-        inputs = {
-            "a": tvm.nd.array(
-                np.random.randint(
-                    np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype
-                )
-            )
-        }
-        for npu in [False, True]:
-            model = get_model(shape, dtype, splits, axis)
-            mod = tei.make_module(model, {})
+    outputs = []
+    inputs = {
+        "a": tvm.nd.array(
+            np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
+        )
+    }
+    for npu in [False, True]:
+        model = get_model(shape, dtype, splits, axis)
+        mod = tei.make_module(model, {})
 
-            expected_host_ops = 1
-            npu_partitions = 2
+        expected_host_ops = 1
+        npu_partitions = 2
 
-            # Mock inference is only supported when the whole graph is offloaded to the NPU
-            if ethosn_available() == Available.SW_ONLY:
-                tei.build(
+        # Mock inference is only supported when the whole graph is offloaded to the NPU
+        if ethosn_available() == Available.SW_ONLY:
+            tei.build(
+                mod,
+                {},
+                npu=npu,
+                expected_host_ops=expected_host_ops,
+                npu_partitions=npu_partitions,
+            )
+        else:
+            outputs.append(
+                tei.build_and_run(
                     mod,
+                    inputs,
+                    2,
                     {},
                     npu=npu,
                     expected_host_ops=expected_host_ops,
                     npu_partitions=npu_partitions,
                 )
-            else:
-                outputs.append(
-                    tei.build_and_run(
-                        mod,
-                        inputs,
-                        2,
-                        {},
-                        npu=npu,
-                        expected_host_ops=expected_host_ops,
-                        npu_partitions=npu_partitions,
-                    )
-                )
+            )
 
-        if outputs:
-            tei.verify(outputs, dtype, 0)
+    if outputs:
+        tei.verify(outputs, dtype, 0)
 
 
 @pytest.mark.skip("Split is not supported by the 3.0.1 version of the driver stack.")