You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2021/12/01 16:56:38 UTC

[GitHub] [tvm] lhutton1 commented on a change in pull request #9576: [microNPU] Mean legalization support

lhutton1 commented on a change in pull request #9576:
URL: https://github.com/apache/tvm/pull/9576#discussion_r760379893



##########
File path: tests/python/contrib/test_ethosu/test_codegen.py
##########
@@ -514,6 +514,110 @@ def representative_dataset():
     infra.verify_source(compiled_models, accel_type)
 
 
+@pytest.mark.parametrize(
+    "accel_type",
+    ACCEL_TYPES,
+)
+@pytest.mark.parametrize(
+    "ifm_shape, axis, keep_dims, use_same_quantization",
+    [
+        # mean to depthwise + multiply
+        [(1, 8, 16, 16), (1, 2), True, False],
+        [(1, 3, 4), (0, 1), True, False],
+        [(1, 65, 2, 1), (1, 2), True, False],  # special case when h > 64
+        # mean to average pool
+        [(1, 8, 16, 16), (2,), False, True],
+        [(3, 3, 4), (0,), True, True],
+        [(8, 5), (0,), False, True],
+        # mean to depthwise
+        [(1, 8, 16, 16), (2,), True, False],
+        [(1, 8, 16, 16), (2, 1), False, False],
+        [(8, 4), (0,), False, False],
+    ],
+)
+def test_mean(accel_type, ifm_shape, axis, keep_dims, use_same_quantization):
+    dtype = "int8"
+
+    def create_mod_from_tflite():
+        class Model(tf.Module):
+            @tf.function
+            def tf_function(self, x):
+                op = tf.math.reduce_mean(x, axis=axis, keepdims=keep_dims)
+                return op
+
+        model = Model()
+        concrete_func = model.tf_function.get_concrete_function(
+            tf.TensorSpec(ifm_shape, dtype=tf.float32)
+        )
+
+        # Convert the model
+        def representative_dataset():
+            for _ in range(100):
+                data = np.random.rand(*tuple(ifm_shape))
+                yield [data.astype(np.float32)]
+
+        converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
+        converter.optimizations = [tf.lite.Optimize.DEFAULT]
+        converter.representative_dataset = representative_dataset
+        converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
+        converter.inference_input_type = tf.int8
+        converter.inference_output_type = tf.int8
+        tflite_graph = converter.convert()
+        tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
+
+        mod, _ = relay.frontend.from_tflite(
+            tflite_model,
+            shape_dict={"ifm": ifm_shape},
+            dtype_dict={"ifm": dtype},
+        )
+        input_data, output_data = infra.generate_ref_data_tflite(tflite_graph)
+        return mod, input_data, output_data
+
+    def create_mod_from_relay():
+        ifm = relay.var("input", shape=ifm_shape, dtype=dtype)
+        cast = relay.cast(ifm, dtype="int32")
+        mean = relay.mean(cast, axis=axis, keepdims=keep_dims)
+        requantize = relay.qnn.op.requantize(
+            mean,
+            input_scale=relay.const(1.0, dtype="float32"),
+            input_zero_point=relay.const(0, dtype="int32"),
+            output_scale=relay.const(1.0, dtype="float32"),
+            output_zero_point=relay.const(0, dtype="int32"),
+        )
+
+        func = relay.Function(relay.analysis.free_vars(requantize), requantize)
+        mod = tvm.IRModule.from_expr(func)
+
+        input_data = {"input": np.random.randint(low=-127, high=128, size=ifm_shape, dtype=dtype)}
+        output_data = generate_ref_data(mod, input_data)
+        return mod, input_data, output_data
+
+    mod, input_data, output_data = (
+        create_mod_from_relay() if use_same_quantization else create_mod_from_tflite()
+    )
+    mod = partition_for_ethosu(mod)
+
+    # TODO(lhutton1) For now output is not bit exact with TFLite.
+    # This is because TFLite reference kernels are not being used.
+    # For this, TFLite will need upgrading to 2.6.
+    compiled_models = infra.build_source(
+        mod, input_data, output_data, accel_type, output_tolerance=1
+    )
+
+    # Assumes only two runtime.Modules are created -- i.e. single offload module
+    imported_modules = compiled_models[0].executor_factory.lib.imported_modules

Review comment:
       Thanks @manupa-arm, I've just pushed an update :)




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org