You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2021/09/07 21:13:40 UTC

[GitHub] [tvm] areusch commented on a change in pull request #8409: Prepare the topi tests for AArch64 CI.

areusch commented on a change in pull request #8409:
URL: https://github.com/apache/tvm/pull/8409#discussion_r703834064



##########
File path: tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py
##########
@@ -51,26 +52,25 @@ def get_ref_data():
 
     a_np, w_np, b_np = get_ref_data()
 
-    def check_device(device):
-        dev = tvm.device(device, 0)
-        if not tvm.testing.device_enabled(device):
-            print("Skip because %s is not enabled" % device)
+    def check_device(target, dev):
+        if not tvm.testing.device_enabled(target):
+            print("Skip because %s is not enabled" % target)
             return
-        print("Running on target: %s" % device)
+        print("Running on target: %s" % target)
 
-        with tvm.target.Target(device):
+        with tvm.target.Target(target):
             B = topi.nn.conv2d(A, W, stride, padding, dilation, layout="NHWC", out_dtype="int32")
             s = topi.x86.schedule_conv2d_nhwc_pack_int8([B])
         a = tvm.nd.array(a_np, dev)
         w = tvm.nd.array(w_np, dev)
         b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
-        func = tvm.build(s, [A, W, B], device)
+        func = tvm.build(s, [A, W, B], target)
         func(a, w, b)
         tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
 
     # for device in ['llvm -mcpu=skylake-avx512']:
-    for device in ["llvm"]:
-        check_device(device)
+    for target, dev in tvm.testing.enabled_targets():

Review comment:
       same question--possible to use tvm.testing.parametrize_targets ?

##########
File path: tests/python/topi/python/test_topi_sparse.py
##########
@@ -594,7 +580,11 @@ def check_device(device):
         )
         tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np.astype("float32"), atol=1e-4, rtol=1e-4)
 
-    check_device("llvm")
+    # Unable to use the idiom below as the test isn't suitable for non llvm targets.

Review comment:
       same question here

##########
File path: tests/python/topi/python/test_topi_sparse.py
##########
@@ -72,12 +67,15 @@ def check_device(device):
         assert a.data.dtype == A.data.dtype
         assert a.indices.dtype == A.indices.dtype
         assert a.indptr.dtype == A.indptr.dtype
-        f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmv")
+        f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], target, name="csrmv")
         f(_nr, a.data, a.indices, a.indptr, b, c, d)
         tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4)
 
-    for device in ["llvm"]:
-        check_device(device)
+    # Unable to use the following idiom and thus restrict to llvm only.

Review comment:
       can you comment why?

##########
File path: tests/python/topi/python/test_topi_sparse.py
##########
@@ -102,25 +100,23 @@ def get_ref_data():
 
     a_np, b_np, c_np, d_np = get_ref_data()
 
-    def check_device(device):
-        dev = tvm.device(device, 0)
-        if not tvm.testing.device_enabled(device):
-            print("Skip because %s is not enabled" % device)
-            return
-        print("Running on target: %s" % device)
+    def check_device(target, dev):
         a = tvmsp.array(a_np, dev)
         _nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
         assert a.shape[0] == a.indptr.shape[0] - 1
         b = tvm.nd.array(b_np, dev)
         c = tvm.nd.array(c_np, dev)
         d = tvm.nd.array(np.zeros((_nr, out_dim), dtype=dtype), dev)
-        f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmm")
+        f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], target, name="csrmm")
 
         f(_nr, a.data, a.indices, a.indptr, b, c, d)
         tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-2, atol=1e-2)
 
-    for device in ["llvm"]:
-        check_device(device)
+    # This test is not yet ready to use the idiom ,

Review comment:
       could you say what's missing in the comment?

##########
File path: tests/python/topi/python/test_topi_conv2d_nhwc.py
##########
@@ -66,21 +60,25 @@ def get_ref_data():
 
     a_np, w_np, b_np = get_ref_data()
 
-    def check_device(target, dev):
-        print("Running on target: %s" % target)
-        with tvm.target.Target(target):
-            fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv2d_nhwc_implement)
+    def check_device(device):
+        if not tvm.testing.device_enabled(device):
+            print("Skip because %s is not enabled" % device)
+            return
+        print("Running on target: %s" % device)
+        with tvm.target.Target(device):
+            fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nhwc_implement)
             B = fcompute(A, W, stride, padding, dilation, dtype)
             s = fschedule([B])
+        dev = tvm.device(device, 0)
         a = tvm.nd.array(a_np, dev)
         w = tvm.nd.array(w_np, dev)
         b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
-        func = tvm.build(s, [A, W, B], target)
+        func = tvm.build(s, [A, W, B], device)
         func(a, w, b)
         tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
 
-    for target, dev in tvm.testing.enabled_targets():
-        check_device(target, dev)
+    for device in ["llvm", "cuda"]:

Review comment:
       should these become @tvm.testing.requires_?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org