You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by ar...@apache.org on 2022/07/19 16:25:18 UTC
[tvm] branch main updated: [microTVM] Make Arduino API server obey timeout (#12074)
This is an automated email from the ASF dual-hosted git repository.
areusch pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new ae015d9ea2 [microTVM] Make Arduino API server obey timeout (#12074)
ae015d9ea2 is described below
commit ae015d9ea24916b016b9412611d692298420e59b
Author: Gavin Uberti <gu...@users.noreply.github.com>
AuthorDate: Tue Jul 19 12:25:12 2022 -0400
[microTVM] Make Arduino API server obey timeout (#12074)
* Make Arduino API server obey timeout
* Pass arm_cpu as default option to micro testing
Syntax fix
Increase Zephyr default stack size for create_aot_session
* Set write_timeout when appropriate
* Fix unit tests and linting
Check whether arm-cpu flag is breaking tests
Update tests for arm-cpu flag
---
.../arduino/template_project/microtvm_api_server.py | 6 +++---
python/tvm/micro/testing/evaluation.py | 8 ++++++--
python/tvm/micro/testing/utils.py | 5 ++++-
tests/micro/common/test_autotune.py | 13 +++----------
4 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/apps/microtvm/arduino/template_project/microtvm_api_server.py b/apps/microtvm/arduino/template_project/microtvm_api_server.py
index 0e922f06cb..1d3b69e2e6 100644
--- a/apps/microtvm/arduino/template_project/microtvm_api_server.py
+++ b/apps/microtvm/arduino/template_project/microtvm_api_server.py
@@ -507,7 +507,7 @@ class Handler(server.ProjectAPIHandler):
break
time.sleep(0.5)
- self._serial = serial.Serial(port, baudrate=115200, timeout=5)
+ self._serial = serial.Serial(port, baudrate=115200, timeout=10)
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
@@ -522,13 +522,13 @@ class Handler(server.ProjectAPIHandler):
self._serial = None
def read_transport(self, n, timeout_sec):
- # It's hard to set timeout_sec, so we just throw it away
- # TODO fix this
+ self._serial.timeout = timeout_sec
if self._serial is None:
raise server.TransportClosedError()
return self._serial.read(n)
def write_transport(self, data, timeout_sec):
+ self._serial.write_timeout = timeout_sec
if self._serial is None:
raise server.TransportClosedError()
return self._serial.write(data)
diff --git a/python/tvm/micro/testing/evaluation.py b/python/tvm/micro/testing/evaluation.py
index c60f0fc482..7f946faed5 100644
--- a/python/tvm/micro/testing/evaluation.py
+++ b/python/tvm/micro/testing/evaluation.py
@@ -83,7 +83,9 @@ def create_aot_session(
params,
build_dir=Path(tempfile.mkdtemp()),
tune_logs=None,
+ timeout_override=None,
use_cmsis_nn=False,
+ project_options=None,
):
"""AOT-compiles and uploads a model to a microcontroller, and returns the RPC session"""
@@ -108,7 +110,6 @@ def create_aot_session(
parameter_size = len(tvm.runtime.save_param_dict(lowered.get_params()))
print(f"Model parameter size: {parameter_size}")
- # Once the project has been uploaded, we don't need to keep it
project = tvm.micro.generate_project(
str(tvm.micro.get_microtvm_template_projects(platform)),
lowered,
@@ -116,12 +117,15 @@ def create_aot_session(
{
f"{platform}_board": board,
"project_type": "host_driven",
+ # {} shouldn't be the default value for project options ({}
+ # is mutable), so we use this workaround
+ **(project_options or {}),
},
)
project.build()
project.flash()
- return tvm.micro.Session(project.transport())
+ return tvm.micro.Session(project.transport(), timeout_override=timeout_override)
# This utility functions was designed ONLY for one input / one output models
diff --git a/python/tvm/micro/testing/utils.py b/python/tvm/micro/testing/utils.py
index 820b649c74..323108b253 100644
--- a/python/tvm/micro/testing/utils.py
+++ b/python/tvm/micro/testing/utils.py
@@ -41,8 +41,11 @@ def get_supported_boards(platform: str):
def get_target(platform: str, board: str):
+ """Intentionally simple function for making target strings for microcontrollers.
+ If you need more complex arguments, one should call target.micro directly. Note
+ that almost all, but not all, supported microcontrollers are Arm-based."""
model = get_supported_boards(platform)[board]["model"]
- return str(tvm.target.target.micro(model))
+ return str(tvm.target.target.micro(model, options=["-device=arm_cpu"]))
def check_tune_log(log_path: Union[Path, str]):
diff --git a/tests/micro/common/test_autotune.py b/tests/micro/common/test_autotune.py
index 37836563a0..1575036bf7 100644
--- a/tests/micro/common/test_autotune.py
+++ b/tests/micro/common/test_autotune.py
@@ -17,10 +17,6 @@
from io import StringIO
import json
-from pathlib import Path
-import sys
-import tempfile
-from typing import Union
import numpy as np
import pytest
@@ -50,18 +46,15 @@ def test_kws_autotune_workflow(platform, board, tmp_path):
str_logs = str_io_logs.getvalue().rstrip().split("\n")
logs = list(map(json.loads, str_logs))
- assert len(logs) == 2 * TUNING_RUNS_PER_OPERATOR # Two operators
+ assert len(logs) == 1 * TUNING_RUNS_PER_OPERATOR # One operator
# Check we tested both operators
op_names = list(map(lambda x: x["input"][1], logs))
- assert op_names[0] == op_names[1] == "dense_nopack.x86"
- assert op_names[2] == op_names[3] == "dense_pack.x86"
+ assert op_names[0] == op_names[1] == "conv2d_nhwc_spatial_pack.arm_cpu"
# Make sure we tested different code. != does deep comparison in Python 3
assert logs[0]["config"]["index"] != logs[1]["config"]["index"]
assert logs[0]["config"]["entity"] != logs[1]["config"]["entity"]
- assert logs[2]["config"]["index"] != logs[3]["config"]["index"]
- assert logs[2]["config"]["entity"] != logs[3]["config"]["entity"]
# Compile the best model with AOT and connect to it
with tvm.micro.testing.create_aot_session(
@@ -82,7 +75,7 @@ def test_kws_autotune_workflow(platform, board, tmp_path):
labels = [0, 0, 0]
# Validate perforance across random runs
- time, acc = tvm.micro.testing.evaluate_model_accuracy(
+ time, _ = tvm.micro.testing.evaluate_model_accuracy(
session, aot_executor, samples, labels, runs_per_sample=20
)
# `time` is the average time taken to execute model inference on the