You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by tq...@apache.org on 2020/11/08 14:12:58 UTC
[incubator-tvm] branch main updated: [DOC] Improve the order of
tutorials within a subsection (#6880)
This is an automated email from the ASF dual-hosted git repository.
tqchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 75eff37 [DOC] Improve the order of tutorials within a subsection (#6880)
75eff37 is described below
commit 75eff37732489b5558b5ca946a233087aa5dfde3
Author: Lianmin Zheng <li...@gmail.com>
AuthorDate: Sun Nov 8 06:12:45 2020 -0800
[DOC] Improve the order of tutorials within a subsection (#6880)
---
docs/README.txt | 5 ++
docs/conf.py | 69 ++++++++++++++++++++++
tutorials/auto_scheduler/tune_conv2d_layer_cuda.py | 4 +-
tutorials/auto_scheduler/tune_matmul_x86.py | 2 +-
tutorials/autotvm/tune_conv2d_cuda.py | 3 +-
tutorials/autotvm/tune_relay_arm.py | 6 +-
tutorials/autotvm/tune_relay_cuda.py | 12 +---
tutorials/autotvm/tune_relay_mobile_gpu.py | 6 +-
tutorials/autotvm/tune_relay_x86.py | 6 +-
tutorials/autotvm/tune_simple_template.py | 2 +-
10 files changed, 86 insertions(+), 29 deletions(-)
diff --git a/docs/README.txt b/docs/README.txt
index eeec6d9..e409107 100644
--- a/docs/README.txt
+++ b/docs/README.txt
@@ -51,3 +51,8 @@ You will need a gpu CI environment.
```bash
./tests/scripts/task_python_docs.sh
```
+
+Define the Order of Tutorials
+-----------------------------
+You can define the order of tutorials with `conf.py::subsection_order` and `conf.py::within_subsection_order`.
+By default, the tutorials within one subsection is sorted by filename.
\ No newline at end of file
diff --git a/docs/conf.py b/docs/conf.py
index 5bf2d6b..e3ddae2 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -204,6 +204,74 @@ subsection_order = ExplicitOrder(
]
)
+# Explicitly define the order within a subsection.
+# The listed files are sorted according to the list.
+# The unlisted files are sorted by filenames.
+# The unlisted files always appear after listed files.
+within_subsection_order = {
+ "get_started": [
+ "relay_quick_start.py",
+ "tensor_expr_get_started.py",
+ "tvmc_command_line_driver.py",
+ "cross_compilation_and_rpc.py",
+ ],
+ "frontend": [
+ "from_pytorch.py",
+ "from_tensorflow.py",
+ "from_mxnet.py",
+ "from_onnx.py",
+ "from_keras.py",
+ "from_tflite.py",
+ "from_coreml.py",
+ "from_darknet.py",
+ "from_caffe2.py",
+ ],
+ "language": [
+ "schedule_primitives.py",
+ "reduciton.py",
+ "intrin_math.py",
+ "scan.py",
+ "extern_op.py",
+ "tensorize.py",
+ "tuple_inputs.py",
+ "tedd.py",
+ ],
+ "optimize": [
+ "opt_gemm.py",
+ "opt_conv_cuda.py",
+ "opt_conv_tensorcore.py",
+ "opt_matmul_auto_tensorcore.py",
+ ],
+ "autotvm": [
+ "tune_simple_template.py",
+ "tune_conv2d_cuda.py",
+ "tune_relay_cuda.py",
+ "tune_relay_x86.py",
+ "tune_relay_arm.py",
+ "tune_relay_mobile_gpu.py",
+ ],
+ "auto_scheduler": ["tune_matmul_x86.py", "tune_conv2d_layer_cuda.py"],
+}
+
+
+class WithinSubsectionOrder:
+ def __init__(self, src_dir):
+ self.src_dir = src_dir.split("/")[-1]
+
+ def __call__(self, filename):
+ # If the order is provided, use the provided order
+ if (
+ self.src_dir in within_subsection_order
+ and filename in within_subsection_order[self.src_dir]
+ ):
+ index = within_subsection_order[self.src_dir].index(filename)
+ assert index < 1e10
+ return "\0%010d" % index
+
+ # Otherwise, sort by filename
+ return filename
+
+
sphinx_gallery_conf = {
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("tvm", "numpy"),
@@ -213,6 +281,7 @@ sphinx_gallery_conf = {
"numpy": "https://numpy.org/doc/stable",
},
"examples_dirs": examples_dirs,
+ "within_subsection_order": WithinSubsectionOrder,
"gallery_dirs": gallery_dirs,
"subsection_order": subsection_order,
"filename_pattern": os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", ".py"),
diff --git a/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py b/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py
index 42273bf..d1b3c22 100644
--- a/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py
+++ b/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py
@@ -22,8 +22,7 @@ Auto-scheduling a convolution layer for GPU
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \
`Chengfan Jia <https://github.com/jcf94/>`_
-
-Different from the existing :ref:`autotvm <tutorials-autotvm-sec>` which relies on
+Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any templates.
Users only need to write the computation declaration without any schedule commands or templates.
The auto-scheduler can automatically generate a large search space and
@@ -182,7 +181,6 @@ func = tvm.build(sch, args, target)
# and resume the status of search policy and cost model with the log file.
# In the example below we resume the status and do more 5 trials.
-
cost_model = auto_scheduler.XGBModel()
cost_model.update_from_file(log_file)
search_policy = auto_scheduler.SketchPolicy(
diff --git a/tutorials/auto_scheduler/tune_matmul_x86.py b/tutorials/auto_scheduler/tune_matmul_x86.py
index 0f2ebe0..2bd47de 100644
--- a/tutorials/auto_scheduler/tune_matmul_x86.py
+++ b/tutorials/auto_scheduler/tune_matmul_x86.py
@@ -20,7 +20,7 @@ Auto-scheduling matrix multiplication for CPU
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \
`Chengfan Jia <https://github.com/jcf94/>`_
-Different from the existing :ref:`autotvm <tutorials-autotvm-sec>` which relies on
+Different from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on
manual templates to define the search space, the auto-scheduler does not require any templates.
Users only need to write the computation declaration without any schedule commands or templates.
The auto-scheduler can automatically generate a large search space and
diff --git a/tutorials/autotvm/tune_conv2d_cuda.py b/tutorials/autotvm/tune_conv2d_cuda.py
index b307077..b662baf 100644
--- a/tutorials/autotvm/tune_conv2d_cuda.py
+++ b/tutorials/autotvm/tune_conv2d_cuda.py
@@ -53,8 +53,7 @@ import sys
import numpy as np
import tvm
-from tvm import te
-from tvm import topi
+from tvm import te, topi, testing
from tvm.topi.testing import conv2d_nchw_python
from tvm import autotvm
diff --git a/tutorials/autotvm/tune_relay_arm.py b/tutorials/autotvm/tune_relay_arm.py
index 7514ee7..c69c7d9 100644
--- a/tutorials/autotvm/tune_relay_arm.py
+++ b/tutorials/autotvm/tune_relay_arm.py
@@ -66,9 +66,7 @@ import os
import numpy as np
import tvm
-from tvm import te
-from tvm import autotvm
-from tvm import relay
+from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.utils import tempdir
@@ -104,7 +102,7 @@ def get_network(name, batch_size):
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
- input_shape = (1, 3, 299, 299)
+ input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
diff --git a/tutorials/autotvm/tune_relay_cuda.py b/tutorials/autotvm/tune_relay_cuda.py
index f9b8921..3dccefe 100644
--- a/tutorials/autotvm/tune_relay_cuda.py
+++ b/tutorials/autotvm/tune_relay_cuda.py
@@ -64,12 +64,9 @@ import os
import numpy as np
import tvm
-from tvm import te
-from tvm import autotvm
-from tvm import relay
+from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
-from tvm.contrib.utils import tempdir
import tvm.contrib.graph_runtime as runtime
#################################################################
@@ -102,7 +99,7 @@ def get_network(name, batch_size):
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
- input_shape = (1, 3, 299, 299)
+ input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
@@ -239,11 +236,6 @@ def tune_and_evaluate(tuning_opt):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
- # export library
- tmp = tempdir()
- filename = "net.tar"
- lib.export_library(tmp.relpath(filename))
-
# load parameters
ctx = tvm.context(str(target), 0)
module = runtime.GraphModule(lib["default"](ctx))
diff --git a/tutorials/autotvm/tune_relay_mobile_gpu.py b/tutorials/autotvm/tune_relay_mobile_gpu.py
index b7fbf89..3611696 100644
--- a/tutorials/autotvm/tune_relay_mobile_gpu.py
+++ b/tutorials/autotvm/tune_relay_mobile_gpu.py
@@ -65,9 +65,7 @@ import os
import numpy as np
import tvm
-from tvm import te
-from tvm import autotvm
-from tvm import relay
+from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib.utils import tempdir
@@ -103,7 +101,7 @@ def get_network(name, batch_size):
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
- input_shape = (1, 3, 299, 299)
+ input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
diff --git a/tutorials/autotvm/tune_relay_x86.py b/tutorials/autotvm/tune_relay_x86.py
index b1b7ca2..5b3d032 100644
--- a/tutorials/autotvm/tune_relay_x86.py
+++ b/tutorials/autotvm/tune_relay_x86.py
@@ -32,9 +32,7 @@ import os
import numpy as np
import tvm
-from tvm import te
-from tvm import autotvm
-from tvm import relay
+from tvm import relay, autotvm
from tvm.relay import testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner
@@ -73,7 +71,7 @@ def get_network(name, batch_size):
batch_size=batch_size, version="1.1", dtype=dtype
)
elif name == "inception_v3":
- input_shape = (1, 3, 299, 299)
+ input_shape = (batch_size, 3, 299, 299)
mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)
elif name == "mxnet":
# an example for mxnet model
diff --git a/tutorials/autotvm/tune_simple_template.py b/tutorials/autotvm/tune_simple_template.py
index b5167b3..4c5c7da 100644
--- a/tutorials/autotvm/tune_simple_template.py
+++ b/tutorials/autotvm/tune_simple_template.py
@@ -59,7 +59,7 @@ import sys
import numpy as np
import tvm
-from tvm import te
+from tvm import te, testing
# the module is called `autotvm`
from tvm import autotvm