You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2021/09/20 15:02:43 UTC

[GitHub] [tvm] gromero opened a new issue #9049: [Bug] test_autotune_conv2d fails on Disco board

gromero opened a new issue #9049:
URL: https://github.com/apache/tvm/issues/9049


   test_autotune_conv2d() test is currently failing on Disco boards, as per:
   
   ```================================================================================= FAILURES ==================================================================================
   __________________________________________________________________ test_autotune_conv2d[stm32f746g_disco] ___________________________________________________________________
   
   temp_dir = <tvm.contrib.utils.TempDirectory object at 0x7fd4b06f50d0>, board = 'stm32f746g_disco', west_cmd = 'west', tvm_debug = False
   
       @tvm.testing.requires_micro
       def test_autotune_conv2d(temp_dir, board, west_cmd, tvm_debug):
           """Test AutoTune for microTVM Zephyr"""
           import tvm.relay as relay
       
           model = conftest.ZEPHYR_BOARDS[board]
       
           # Create a Relay model
           data_shape = (1, 3, 16, 16)
           weight_shape = (8, 3, 5, 5)
           data = relay.var("data", relay.TensorType(data_shape, "float32"))
           weight = relay.var("weight", relay.TensorType(weight_shape, "float32"))
           y = relay.nn.conv2d(
               data,
               weight,
               padding=(2, 2),
               kernel_size=(5, 5),
               kernel_layout="OIHW",
               out_dtype="float32",
           )
           f = relay.Function([data, weight], y)
           mod = tvm.IRModule.from_expr(f)
           mod = relay.transform.InferType()(mod)
       
           data_sample = np.random.rand(data_shape[0], data_shape[1], data_shape[2], data_shape[3]).astype(
               "float32"
           )
           weight_sample = np.random.rand(
               weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3]
           ).astype("float32")
           params = {mod["main"].params[1].name_hint: weight_sample}
       
           target = tvm.target.target.micro(model)
           pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True})
           with pass_context:
               tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
           assert len(tasks) > 0
       
           repo_root = pathlib.Path(
               subprocess.check_output(["git", "rev-parse", "--show-toplevel"], encoding="utf-8").strip()
           )
           template_project_dir = repo_root / "apps" / "microtvm" / "zephyr" / "template_project"
           module_loader = tvm.micro.AutoTvmModuleLoader(
               template_project_dir=template_project_dir,
               project_options={
                   "zephyr_board": board,
                   "west_cmd": west_cmd,
                   "verbose": 1,
                   "project_type": "host_driven",
               },
           )
       
           timeout = 200
           builder = tvm.autotvm.LocalBuilder(
               timeout=timeout,
               n_parallel=1,
               build_kwargs={"build_option": {"tir.disable_vectorize": True}},
               do_fork=True,
               build_func=tvm.micro.autotvm_build_func,
           )
           runner = tvm.autotvm.LocalRunner(
               number=1, repeat=1, timeout=timeout, module_loader=module_loader
           )
       
           measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
       
           log_path = pathlib.Path("zephyr_autotune.log")
           if log_path.exists():
               log_path.unlink()
       
           n_trial = 10
           for task in tasks:
               tuner = tvm.autotvm.tuner.GATuner(task)
               tuner.tune(
                   n_trial=n_trial,
                   measure_option=measure_option,
                   callbacks=[
                       tvm.autotvm.callback.log_to_file(str(log_path)),
                       tvm.autotvm.callback.progress_bar(n_trial, si_prefix="M"),
                   ],
                   si_prefix="M",
               )
               assert tuner.best_flops > 0
       
   >       check_tune_log(log_path)
   
   tests/micro/zephyr/test_zephyr.py:469: 
   _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
   
   log_path = PosixPath('zephyr_autotune.log')
   
       def check_tune_log(log_path: Union[pathlib.Path, str]):
           """Read the tuning log and check each result."""
           with open(log_path, "r") as f:
               lines = f.readlines()
       
           for line in lines:
               if len(line) > 0:
                   tune_result = json.loads(line)
   >               assert tune_result["result"][0][0] < 1000000000.0
   E               AssertionError
   
   python/tvm/micro/testing.py:33: AssertionError
   ------------------------------------- generated xml file: /home/gromero/git/tvm/build/pytest-results/python-microtvm-zephyr-ctypes.xml --------------------------------------
   ========================================================================== short test summary info ========================================================================
   FAILED tests/micro/zephyr/test_zephyr.py::test_autotune_conv2d[stm32f746g_disco] - AssertionError
   ============================================================ 1 failed, 9 passed, 2 skipped in 366.09s (0:06:06) =============================================================```
   
   as of HEAD at `v0.4-5171-g44b644c6a`
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org



[GitHub] [tvm] gromero commented on issue #9049: [Bug] test_autotune_conv2d fails on Disco board

Posted by GitBox <gi...@apache.org>.
gromero commented on issue #9049:
URL: https://github.com/apache/tvm/issues/9049#issuecomment-927375975


   That issue also happens on NXP i.MX RT 1050 EVK boards:
   
   ```
   ==================================================================================================== FAILURES =====================================================================================================
   ______________________________________________________________________________________ test_autotune_conv2d[mimxrt1050_evk] _______________________________________________________________________________________
   
   temp_dir = <tvm.contrib.utils.TempDirectory object at 0x7f9fd1e55520>, board = 'mimxrt1050_evk', west_cmd = 'west', tvm_debug = False
   
       @tvm.testing.requires_micro
       def test_autotune_conv2d(temp_dir, board, west_cmd, tvm_debug):
           """Test AutoTune for microTVM Zephyr"""
           model = test_utils.ZEPHYR_BOARDS[board]
           build_config = {"debug": tvm_debug}
       
           # Create a Relay model
           data_shape = (1, 3, 16, 16)
           weight_shape = (8, 3, 5, 5)
           data = relay.var("data", relay.TensorType(data_shape, "float32"))
           weight = relay.var("weight", relay.TensorType(weight_shape, "float32"))
           y = relay.nn.conv2d(
               data,
               weight,
               padding=(2, 2),
               kernel_size=(5, 5),
               kernel_layout="OIHW",
               out_dtype="float32",
           )
           f = relay.Function([data, weight], y)
           mod = tvm.IRModule.from_expr(f)
           mod = relay.transform.InferType()(mod)
       
           data_sample = np.random.rand(data_shape[0], data_shape[1], data_shape[2], data_shape[3]).astype(
               "float32"
           )
           weight_sample = np.random.rand(
               weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3]
           ).astype("float32")
           params = {mod["main"].params[1].name_hint: weight_sample}
       
           target = tvm.target.target.micro(model)
           pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True})
           with pass_context:
               tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
           assert len(tasks) > 0
       
           config_main_stack_size = None
           if test_utils.qemu_boards(board):
               config_main_stack_size = 1536
       
           project_options = {
               "zephyr_board": board,
               "west_cmd": west_cmd,
               "verbose": 1,
               "project_type": "host_driven",
           }
           if config_main_stack_size is not None:
               project_options["config_main_stack_size"] = config_main_stack_size
       
           module_loader = tvm.micro.AutoTvmModuleLoader(
               template_project_dir=test_utils.TEMPLATE_PROJECT_DIR,
               project_options=project_options,
           )
       
           timeout = 200
           builder = tvm.autotvm.LocalBuilder(
               timeout=timeout,
               n_parallel=1,
               build_kwargs={"build_option": {"tir.disable_vectorize": True}},
               do_fork=True,
               build_func=tvm.micro.autotvm_build_func,
           )
           runner = tvm.autotvm.LocalRunner(
               number=1, repeat=1, timeout=timeout, module_loader=module_loader
           )
       
           measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
       
           log_path = pathlib.Path("zephyr_autotune.log")
           if log_path.exists():
               log_path.unlink()
       
           n_trial = 10
           for task in tasks:
               tuner = tvm.autotvm.tuner.GATuner(task)
               tuner.tune(
                   n_trial=n_trial,
                   measure_option=measure_option,
                   callbacks=[
                       tvm.autotvm.callback.log_to_file(str(log_path)),
                       tvm.autotvm.callback.progress_bar(n_trial, si_prefix="M"),
                   ],
                   si_prefix="M",
               )
               assert tuner.best_flops > 0
       
   >       check_tune_log(log_path)
   
   tests/micro/zephyr/test_zephyr.py:461: 
   _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
   
   log_path = PosixPath('zephyr_autotune.log')
   
       def check_tune_log(log_path: Union[pathlib.Path, str]):
           """Read the tuning log and check each result."""
           with open(log_path, "r") as f:
               lines = f.readlines()
       
           for line in lines:
               if len(line) > 0:
                   tune_result = json.loads(line)
   >               assert tune_result["result"][0][0] < 1000000000.0
   E               AssertionError
   
   python/tvm/micro/testing.py:33: AssertionError
   -------------------------------------------------------- generated xml file: /home/gromero/git/tvm/build/pytest-results/python-microtvm-zephyr-ctypes.xml ---------------------------------------------------------
   ============================================================================================= short test summary info =============================================================================================
   FAILED tests/micro/zephyr/test_zephyr.py::test_autotune_conv2d[mimxrt1050_evk] - AssertionError
   =============================================================================== 1 failed, 9 passed, 2 skipped in 470.15s (0:07:50) ================================================================================```


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org