You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2020/09/08 00:02:54 UTC

[GitHub] [incubator-tvm] xqdan commented on issue #6413: TIR Printer bugs

xqdan commented on issue #6413:
URL: https://github.com/apache/incubator-tvm/issues/6413#issuecomment-688547478


   @spectrometerHBH you can reproduce with 
   ```
   diff --git a/tests/python/unittest/test_te_schedule_tensor_core.py b/tests/python/unittest/test_te_schedule_tensor_core.py
   index ae2301caf..b832a0bce 100644
   --- a/tests/python/unittest/test_te_schedule_tensor_core.py
   +++ b/tests/python/unittest/test_te_schedule_tensor_core.py
   @@ -103,14 +103,16 @@ def intrin_wmma_store_matrix(shape):
        return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC})
   
   
   -def test_tensor_core_batch_matmal():
   -    if not tvm.gpu(0).exist or not tvm.runtime.enabled("cuda"):
   -        print("skip because cuda is not enabled..")
   -        return
   -    if not nvcc.have_tensorcore(tvm.gpu(0).compute_version):
   -        print("skip because gpu does not support tensor core")
   -        return
   +__TRACE_COUNTER__ = 0
   +def dumpir(module, info, is_before):
   +    global __TRACE_COUNTER__
   +    if bool(is_before) == False:
   +        __TRACE_COUNTER__ += 1
   +        pname = str(__TRACE_COUNTER__).rjust(2,'0')  + "_" + info.name + ".ir"
   +        with open(pname, "a") as f:
   +            f.write(str(module))
   
   +def test_tensor_core_batch_matmal():
        batch_size = 4
        n = 512
        m, l = n, n
   @@ -195,7 +197,8 @@ def test_tensor_core_batch_matmal():
        s[C].tensorize(kernel_i, intrin_wmma_store_matrix((32, 8, 16)))
        s[CF].tensorize(_i, intrin_wmma_gemm((32, 8, 16)))
   
   -    func = tvm.build(s, [A, B, C], 'cuda')
   +    with tvm.transform.PassContext(opt_level=3, trace=dumpir):
   +        func = tvm.build(s, [A, B, C], 'cuda')
   
        ctx = tvm.gpu(0)
        a_np = np.random.uniform(size=(batch_size, nn, ll, 32, 16)).astype(A.dtype)
   @@ -384,4 +387,4 @@ def test_tensor_core_batch_conv():
   
    if __name__ == '__main__':
        test_tensor_core_batch_matmal()
   -    test_tensor_core_batch_conv()
   +    #test_tensor_core_batch_conv()
   ```


----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org