You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by me...@apache.org on 2022/09/12 20:31:59 UTC

[tvm] branch main updated: [Hexagon] Validate 2-d physical shapes for TIR-derived schedules (#12662)

This is an automated email from the ASF dual-hosted git repository.

mehrdadh pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 9671aee942 [Hexagon] Validate 2-d physical shapes for TIR-derived schedules (#12662)
9671aee942 is described below

commit 9671aee942503815ad2a586406eef11391287ee5
Author: Eric Lunderberg <Lu...@users.noreply.github.com>
AuthorDate: Mon Sep 12 15:31:52 2022 -0500

    [Hexagon] Validate 2-d physical shapes for TIR-derived schedules (#12662)
    
    Previously, the test cases only tested TE-based schedules.  This
    commit runs the same tests for equivalent TIR-based schedules as
    well.  This is intended to catch Hexagon-specific regressions, such as
    the one resolved in https://github.com/apache/tvm/pull/12652.
---
 .../test_hexagon/test_2d_physical_buffers.py       | 59 +++++++++++++++++++++-
 1 file changed, 58 insertions(+), 1 deletion(-)

diff --git a/tests/python/contrib/test_hexagon/test_2d_physical_buffers.py b/tests/python/contrib/test_hexagon/test_2d_physical_buffers.py
old mode 100644
new mode 100755
index cebb36edc3..cba6ddc443
--- a/tests/python/contrib/test_hexagon/test_2d_physical_buffers.py
+++ b/tests/python/contrib/test_hexagon/test_2d_physical_buffers.py
@@ -41,6 +41,8 @@ from .infrastructure import allocate_hexagon_array
 # there as well
 # pylint: disable=invalid-name
 
+schedule_type = tvm.testing.parameter("TE", "TIR")
+
 dtype = tvm.testing.parameter("int8")
 batch_size = tvm.testing.parameter(
     16,
@@ -198,6 +200,7 @@ class TestElementWise:
     @tvm.testing.fixture
     def schedule_args(
         self,
+        schedule_type,
         input_shape,
         dtype,
         input_layout,
@@ -206,12 +209,39 @@ class TestElementWise:
         working_scope,
     ):
         """Create and return the schedule and input args after applying layout transform"""
+        if schedule_type == "TE":
+
+            return self._te_schedule_args(
+                input_shape, dtype, input_layout, output_layout, working_layout, working_scope
+            )
+        elif schedule_type == "TIR":
+            return self._tir_schedule_args(
+                input_shape, dtype, input_layout, output_layout, working_layout, working_scope
+            )
+
+        else:
+            raise ValueError(f"Unknown schedule type: {schedule_type}")
+
+    def _te_tensors(self, input_shape, dtype):
         input_tensor = te.placeholder(input_shape, dtype, name="Input")
         output_tensor = te.compute(
             shape=input_tensor.shape,
             fcompute=lambda *indices: (2 * input_tensor[indices]).astype(dtype),
             name="Output",
         )
+        return input_tensor, output_tensor
+
+    def _te_schedule_args(
+        self,
+        input_shape,
+        dtype,
+        input_layout,
+        output_layout,
+        working_layout,
+        working_scope,
+    ):
+        input_tensor, output_tensor = self._te_tensors(input_shape, dtype)
+
         schedule = te.create_schedule(output_tensor.op)
 
         write_cache = schedule.cache_write(output_tensor, working_scope)
@@ -235,6 +265,33 @@ class TestElementWise:
 
         return [schedule, [input_tensor, output_tensor]]
 
+    def _tir_schedule_args(
+        self, input_shape, dtype, input_layout, output_layout, working_layout, working_scope
+    ):
+        tensors = self._te_tensors(input_shape, dtype)
+
+        sch = tvm.tir.Schedule(te.create_prim_func(tensors))
+
+        cache_read_block = sch.cache_read("Output", 0, working_scope)
+        cache_write_block = sch.cache_write("Output", 0, working_scope)
+
+        def apply_transform(block, buffer_name, layout):
+            if layout == "nhwc":
+                pass
+            elif layout == "nchw-8h8w32c-1d":
+                sch.transform_layout(block, buffer_name, layout_transform_1d)
+            elif layout == "nchw-8h8w32c-2d":
+                sch.transform_layout(block, buffer_name, layout_transform_2d)
+            else:
+                raise RuntimeError(f"Unexpected layout '{layout}'")
+
+        apply_transform(cache_read_block, ("read", 0), input_layout)
+        apply_transform(cache_read_block, ("write", 0), working_layout)
+        apply_transform(cache_write_block, ("read", 0), working_layout)
+        apply_transform(cache_write_block, ("write", 0), output_layout)
+
+        return [sch.mod]
+
     @tvm.testing.fixture
     def ir_module(self, schedule_args):
         # If the two buffers are accessed with the same indices, CSE
@@ -272,7 +329,7 @@ class TestElementWise:
                 "Input.global.vtcm": working_layout,
                 "Output.global.vtcm": working_layout,
                 "Output": output_layout,
-            }[buffer.name]
+            }[buffer.name.replace("_", ".")]
 
             expected_physical_dimensions = {
                 "nhwc": 1,