You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by ju...@apache.org on 2022/11/10 15:29:55 UTC

[tvm] branch feature/2022-11-09/printer-explicit-ir-node updated: Update test_async_dma_pipeline.py

This is an automated email from the ASF dual-hosted git repository.

junrushao pushed a commit to branch feature/2022-11-09/printer-explicit-ir-node
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/feature/2022-11-09/printer-explicit-ir-node by this push:
     new a35a8d00ff Update test_async_dma_pipeline.py
a35a8d00ff is described below

commit a35a8d00ff7e3677bd57bbe586affbec0fa410fa
Author: Junru Shao <ju...@gmail.com>
AuthorDate: Thu Nov 10 07:29:47 2022 -0800

    Update test_async_dma_pipeline.py
---
 tests/python/contrib/test_hexagon/test_async_dma_pipeline.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tests/python/contrib/test_hexagon/test_async_dma_pipeline.py b/tests/python/contrib/test_hexagon/test_async_dma_pipeline.py
index 19b380c1bd..9f8e639b53 100644
--- a/tests/python/contrib/test_hexagon/test_async_dma_pipeline.py
+++ b/tests/python/contrib/test_hexagon/test_async_dma_pipeline.py
@@ -128,9 +128,9 @@ def get_single_dma_schedule(size_a, size_w):
         a_buffer = T.match_buffer(a_input, a_shape, dtype="uint8", scope="global")
         w_buffer = T.match_buffer(b_input, w_shape, dtype="uint8", scope="global")
         c_buffer = T.match_buffer(c_output, out_shape, dtype="int32", scope="global")
-        a_global_vtcm = T.alloc_buffer(a_shape, dtype="uint8", scope="global.vtcm")
-        w_global_vtcm = T.alloc_buffer(w_shape, dtype="uint8", scope="global.vtcm")
-        c_global_vtcm = T.alloc_buffer(out_shape, dtype="int32", scope="global.vtcm")
+        a_global_vtcm = T.alloc_buffer(a_shape, dtype="uint8", mem_scope="global.vtcm")
+        w_global_vtcm = T.alloc_buffer(w_shape, dtype="uint8", mem_scope="global.vtcm")
+        c_global_vtcm = T.alloc_buffer(out_shape, dtype="int32", mem_scope="global.vtcm")
         T.evaluate(
             T.tvm_call_packed(
                 "device_api.hexagon.mem_copy_DLTensor",