You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by ar...@apache.org on 2023/01/07 01:32:28 UTC

[tvm] branch main updated: [docs] Add "Open with Colab" button to documentation (#13627)

This is an automated email from the ASF dual-hosted git repository.

areusch pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 30abbe9832 [docs] Add "Open with Colab" button to documentation (#13627)
30abbe9832 is described below

commit 30abbe98321acf594d2cd0d6b9a7c570471d9264
Author: Gavin Uberti <gu...@users.noreply.github.com>
AuthorDate: Fri Jan 6 17:32:23 2023 -0800

    [docs] Add "Open with Colab" button to documentation (#13627)
    
    * Add Colab header to TVM tutorials by default
    * Fix library imports to work with Colab
    * Better support for Jupyter magic and directives
    
    Co-authored-by: Mehrdad Hessar <mh...@octoml.ai>
---
 docs/README.md                                     |  39 ++++
 docs/conf.py                                       | 210 ++++++++++++++++++++-
 gallery/how_to/compile_models/from_coreml.py       |   7 +-
 gallery/how_to/compile_models/from_darknet.py      |   5 +-
 gallery/how_to/compile_models/from_keras.py        |   7 +-
 gallery/how_to/compile_models/from_mxnet.py        |  10 +-
 gallery/how_to/compile_models/from_oneflow.py      |   4 +-
 gallery/how_to/compile_models/from_onnx.py         |   9 +-
 gallery/how_to/compile_models/from_paddle.py       |   6 +-
 gallery/how_to/compile_models/from_pytorch.py      |  12 +-
 gallery/how_to/compile_models/from_tensorflow.py   |   5 +
 gallery/how_to/compile_models/from_tflite.py       |   5 +-
 .../how_to/deploy_models/deploy_model_on_adreno.py |   1 +
 .../how_to/deploy_models/deploy_model_on_nano.py   |   1 +
 .../deploy_object_detection_pytorch.py             |   4 +-
 .../how_to/extend_tvm/bring_your_own_datatypes.py  |   2 +-
 gallery/how_to/optimize_operators/opt_conv_cuda.py |   1 +
 .../optimize_operators/opt_conv_tensorcore.py      |   1 +
 .../tune_conv2d_layer_cuda.py                      |   1 +
 .../how_to/tune_with_autotvm/tune_conv2d_cuda.py   |   1 +
 .../how_to/tune_with_autotvm/tune_relay_cuda.py    |   1 +
 .../how_to/work_with_microtvm/install_cmsis.rst    |  35 ++++
 .../work_with_microtvm/install_dependencies.rst    |  33 ++++
 .../how_to/work_with_microtvm/install_zephyr.rst   |  52 +++++
 gallery/how_to/work_with_microtvm/micro_aot.py     |  31 ++-
 .../how_to/work_with_microtvm/micro_autotune.py    |  26 ++-
 gallery/how_to/work_with_microtvm/micro_pytorch.py |   6 +-
 gallery/how_to/work_with_microtvm/micro_tflite.py  | 128 +++----------
 gallery/how_to/work_with_microtvm/micro_train.py   |  15 +-
 gallery/how_to/work_with_pytorch/using_as_torch.py |   8 +
 .../work_with_pytorch/using_optimized_torch.py     |  10 +-
 gallery/how_to/work_with_relay/build_gcn.py        |   8 +-
 gallery/how_to/work_with_relay/using_relay_viz.py  |   7 +
 gallery/how_to/work_with_schedules/reduction.py    |   1 +
 gallery/how_to/work_with_schedules/scan.py         |   1 +
 gallery/tutorial/intro_topi.py                     |   1 +
 gallery/tutorial/relay_quick_start.py              |   1 +
 gallery/tutorial/tensor_ir_blitz_course.py         |   1 +
 tests/lint/check_request_hook.py                   |  35 ++--
 39 files changed, 564 insertions(+), 167 deletions(-)

diff --git a/docs/README.md b/docs/README.md
index 6c32d2d6bf..572b72fc3c 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -111,3 +111,42 @@ python tests/scripts/ci.py docs --full
 You can define the order of tutorials with `subsection_order` and
 `within_subsection_order` in [`conf.py`](conf.py).
 By default, the tutorials within one subsection are sorted by filename.
+
+## Google Colab Integration
+
+All the TVM tutorials can be opened and used interactively in Google Colab by
+clicking the button at the top of the page. To do this, `sphinx-gallery` builds
+`.ipynb` files from each tutorial, which are automatically deployed to the
+[apache/tvm-site](https://github.com/apache/tvm-site/tree/asf-site) repo's
+`asf-site` branch by [@tvm-bot](https://github.com/tvm-bot).
+
+To make sure your tutorial runs correctly on Colab, any non-Python parts of
+the tutorial (e.g. dependency installations) should be prefixed by an
+[IPython magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html).
+These will not be included in the built `HTML` file. For example, to install
+Pytorch in your tutorial, add a ReStructured Text block like the following:
+
+```python
+######################################################################
+# To run this tutorial, we must install PyTorch:
+#
+# .. code-block:: bash
+#
+#     %%shell
+#     pip install torch
+#
+```
+
+### Interactive Bash Scripts
+
+In stock IPython, the `%%bash` magic command should be used to run shell
+commands. However, this command does not give real-time output - the
+tutorial's user will not see any output until the entire cell finishes
+running. When running commands that take several minutes (e.g. installing
+dependencies), this is annoying.
+
+Luckily, Google Colab has the `%%shell` magic command that does the same
+thing as `%%bash`, but gives output in real time. This command is specific
+to Colab, and its [source code](https://github.com/googlecolab/colabtools)
+is public. Thus, `%%shell` should be used instead of `%%bash` when writing
+TVM tutorials.
diff --git a/docs/conf.py b/docs/conf.py
index b4982f14c0..357df8cef1 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -29,15 +29,17 @@
 #
 # All configuration values have a default; values that are commented out
 # serve to show the default.
+from functools import partial
 import gc
+from importlib import import_module
 import inspect
+from hashlib import md5
 import os
 from pathlib import Path
 import re
 import sys
-
-import sphinx_gallery
-
+from textwrap import dedent, indent
+from unittest.mock import patch
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
@@ -84,6 +86,198 @@ from tvm import testing
 version = git_describe_version(tvm.__version__)
 release = version
 
+
+def monkey_patch(module_name, func_name):
+    """Helper function for monkey-patching library functions.
+
+    Used to modify a few sphinx-gallery behaviors to make the "Open in Colab" button work correctly.
+    Should be called as a decorator with arguments. Note this behaves differently from unittest's
+    @mock.patch, as our monkey_patch decorator should be placed on the new version of the function.
+    """
+    module = import_module(module_name)
+    original_func = getattr(module, func_name)
+
+    def decorator(function):
+        updated_func = partial(function, real_func=original_func)
+        setattr(module, func_name, updated_func)
+        return updated_func
+
+    return decorator
+
+
+CURRENT_FILE_CONF = None
+
+
+@monkey_patch("sphinx_gallery.py_source_parser", "split_code_and_text_blocks")
+def split_code_and_text_blocks(source_file, return_node, real_func):
+    """Monkey-patch split_code_and_text_blocks to expose sphinx-gallery's file-level config.
+
+    It's kinda gross, but we need access to file_conf to detect the requires_cuda flag.
+    """
+    global CURRENT_FILE_CONF
+    file_conf, blocks, node = real_func(source_file, return_node)
+    CURRENT_FILE_CONF = file_conf
+    return (file_conf, blocks, node)
+
+
+# This header replaces the default sphinx-gallery one in sphinx_gallery/gen_rst.py.
+COLAB_HTML_HEADER = """
+.. DO NOT EDIT. THIS FILE WAS AUTOMATICALLY GENERATED BY
+.. TVM'S MONKEY-PATCHED VERSION OF SPHINX-GALLERY. TO MAKE
+.. CHANGES, EDIT THE SOURCE PYTHON FILE:
+.. "{python_file}"
+
+.. only:: html
+
+    .. note::
+        :class: sphx-glr-download-link-note
+
+        This tutorial can be used interactively with Google Colab! You can also click
+        :ref:`here <sphx_glr_download_{ref_name}>` to run the Jupyter notebook locally.
+
+        .. image:: {button_svg}
+            :align: center
+            :target: {colab_url}
+            :width: 300px
+
+.. rst-class:: sphx-glr-example-title
+
+.. _sphx_glr_{ref_name}:
+
+"""
+
+# Google Colab allows opening .ipynb files on GitHub by appending a GitHub path to this base URL.
+COLAB_URL_BASE = "https://colab.research.google.com/github"
+
+# The GitHub path where the site is automatically deployed by tvm-bot.
+IPYTHON_GITHUB_BASE = "apache/tvm-site/blob/asf-site/docs/_downloads/"
+
+# The SVG image of the "Open in Colab" button.
+BUTTON = "https://raw.githubusercontent.com/apache/web-data/main/images/utilities/colab_button.svg"
+
+
+@monkey_patch("sphinx_gallery.gen_rst", "save_rst_example")
+def save_rst_example(example_rst, example_file, time_elapsed, memory_used, gallery_conf, real_func):
+    """Monkey-patch save_rst_example to include the "Open in Colab" button."""
+
+    # The url is the md5 hash of the notebook path.
+    example_fname = os.path.relpath(example_file, gallery_conf["src_dir"])
+    ref_fname = example_fname.replace(os.path.sep, "_")
+    notebook_path = example_fname[:-2] + "ipynb"
+    digest = md5(notebook_path.encode()).hexdigest()
+
+    # Fixed documentation versions must link to different (earlier) .ipynb notebooks.
+    colab_url = f"{COLAB_URL_BASE}/{IPYTHON_GITHUB_BASE}"
+    if "dev" not in version:
+        colab_url += version + "/"
+    colab_url += digest + "/" + os.path.basename(notebook_path)
+
+    new_header = COLAB_HTML_HEADER.format(
+        python_file=example_fname, ref_name=ref_fname, colab_url=colab_url, button_svg=BUTTON
+    )
+    with patch("sphinx_gallery.gen_rst.EXAMPLE_HEADER", new_header):
+        real_func(example_rst, example_file, time_elapsed, memory_used, gallery_conf)
+
+
+INCLUDE_DIRECTIVE_RE = re.compile(r"^([ \t]*)\.\. include::\s*(.+)\n", flags=re.M)
+COMMENT_DIRECTIVE_RE = re.compile(r"^\.\.(?: .*)?\n(?:(?:  .*)?\n)*", flags=re.M)
+ADMONITION_DIRECTIVE_RE = re.compile(rf"^\.\. admonition:: *(.*)\n((?:(?:  .*)?\n)*)\n", flags=re.M)
+
+
+@monkey_patch("sphinx_gallery.notebook", "rst2md")
+def rst2md(text, gallery_conf, target_dir, heading_levels, real_func):
+    """Monkey-patch rst2md to support comments and some include directives.
+
+    Currently, only include directives without any parameters are supported. Also, note that in
+    reStructuredText any unrecognized explicit markup block is treated as a comment (see
+    https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#comments).
+
+    For callouts, we only replace generic "admonition" directives. All others should be replaced by
+    sphinx-gallery's rst2md. Note that the "alert" and "alert-info" tags are support in most IPython
+    notebooks, but they render kinda funky on Colab.
+    """
+
+    def load_include(match):
+        full_path = os.path.join(target_dir, match.group(2))
+        with open(full_path) as f:
+            lines = f.read()
+        indented = indent(lines, match.group(1)) + "\n"
+        return indented
+
+    text = re.sub(INCLUDE_DIRECTIVE_RE, load_include, text)
+
+    # Replace generic, titled admonitions with indented text. Other admonitions (e.g. .. note::)
+    # will be handled by sphinx-gallery's
+    def rewrite_generic_admonition(match):
+        title, text = match.groups()
+        stripped_text = dedent(text).strip()
+        return f'<div class="alert alert-info"><h4>{title}</h4><p>{stripped_text}</p></div>'
+
+    text = re.sub(ADMONITION_DIRECTIVE_RE, rewrite_generic_admonition, text)
+
+    # Call the real function, and then strip any remaining directives (i.e. comments)
+    text = real_func(text, gallery_conf, target_dir, heading_levels)
+    text = re.sub(COMMENT_DIRECTIVE_RE, "", text)
+    return text
+
+
+INSTALL_TVM_DEV = f"""\
+%%shell
+# Installs the latest dev build of TVM from PyPI. If you wish to build
+# from source, see https://tvm.apache.org/docs/install/from_source.html
+pip install apache-tvm --pre"""
+
+INSTALL_TVM_FIXED = f"""\
+%%shell
+# Installs TVM version {version} from PyPI. If you wish to build
+# from source, see https://tvm.apache.org/docs/install/from_source.html
+pip install apache-tvm=={version}"""
+
+INSTALL_TVM_CUDA_DEV = f"""\
+%%shell
+# Installs the latest dev build of TVM from PyPI, with CUDA enabled. To use this,
+# you must request a Google Colab instance with a GPU by going to Runtime ->
+# Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
+# source, see see https://tvm.apache.org/docs/install/from_source.html
+pip install tlcpack-nightly-cu113 --pre -f https://tlcpack.ai/wheels"""
+
+INSTALL_TVM_CUDA_FIXED = f"""\
+%%shell
+# Installs TVM version {version} from PyPI, with CUDA enabled. To use this,
+# you must request a Google Colab instance with a GPU by going to Runtime ->
+# Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
+# source, see see https://tvm.apache.org/docs/install/from_source.html
+pip install apache-tvm-cu113=={version} -f https://tlcpack.ai/wheels"""
+
+
+@monkey_patch("sphinx_gallery.gen_rst", "jupyter_notebook")
+def jupyter_notebook(script_blocks, gallery_conf, target_dir, real_func):
+    """Monkey-patch sphinx-gallery to add a TVM import block to each IPython notebook.
+
+    If we had only one import block, we could skip the patching and just set first_notebook_cell.
+    However, how we import TVM depends on if we are using a fixed or dev version, and whether we
+    will use the GPU.
+
+    Tutorials requiring a CUDA-enabled build of TVM should use the flag:
+    # sphinx_gallery_requires_cuda = True
+    """
+
+    requires_cuda = CURRENT_FILE_CONF.get("requires_cuda", False)
+    fixed_version = not "dev" in version
+
+    if fixed_version and requires_cuda:
+        install_block = INSTALL_TVM_CUDA_FIXED
+    elif fixed_version and not requires_cuda:
+        install_block = INSTALL_TVM_FIXED
+    elif not fixed_version and requires_cuda:
+        install_block = INSTALL_TVM_CUDA_DEV
+    else:
+        install_block = INSTALL_TVM_DEV
+
+    new_conf = {**gallery_conf, "first_notebook_cell": install_block}
+    return real_func(script_blocks, new_conf, target_dir)
+
+
 # Add any Sphinx extension module names here, as strings. They can be
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
 extensions = [
@@ -506,6 +700,16 @@ def process_docstring(app, what, name, obj, options, lines):
 from legacy_redirect import build_legacy_redirect
 
 
+def strip_ipython_magic(app, docname, source):
+    """Prevents IPython magic commands from being rendered in HTML files.
+
+    TODO rework this function to remove IPython magic commands from include directives too.
+    """
+    for i in range(len(source)):
+        source[i] = re.sub(r"%%.*\n\s*", "", source[i])
+
+
 def setup(app):
+    app.connect("source-read", strip_ipython_magic)
     app.connect("autodoc-process-docstring", process_docstring)
     app.connect("build-finished", build_legacy_redirect(tvm_path))
diff --git a/gallery/how_to/compile_models/from_coreml.py b/gallery/how_to/compile_models/from_coreml.py
index 96d2967947..4d0eea2d8d 100644
--- a/gallery/how_to/compile_models/from_coreml.py
+++ b/gallery/how_to/compile_models/from_coreml.py
@@ -23,13 +23,12 @@ Compile CoreML Models
 
 This article is an introductory tutorial to deploy CoreML models with Relay.
 
-For us to begin with, coremltools module is required to be installed.
-
-A quick solution is to install via pip
+To begin, we must install coremltools:
 
 .. code-block:: bash
 
-    pip install -U coremltools --user
+    %%shell
+    pip install coremltools
 
 or please refer to official site
 https://github.com/apple/coremltools
diff --git a/gallery/how_to/compile_models/from_darknet.py b/gallery/how_to/compile_models/from_darknet.py
index c12a9e7e15..8397efa63b 100644
--- a/gallery/how_to/compile_models/from_darknet.py
+++ b/gallery/how_to/compile_models/from_darknet.py
@@ -27,8 +27,9 @@ Please install CFFI and CV2 before executing this script
 
 .. code-block:: bash
 
-  pip install cffi
-  pip install opencv-python
+  %%shell
+  pip install cffi opencv-python
+
 """
 
 # sphinx_gallery_start_ignore
diff --git a/gallery/how_to/compile_models/from_keras.py b/gallery/how_to/compile_models/from_keras.py
index 895a601ada..ac961ca16a 100644
--- a/gallery/how_to/compile_models/from_keras.py
+++ b/gallery/how_to/compile_models/from_keras.py
@@ -19,7 +19,7 @@ Compile Keras Models
 =====================
 **Author**: `Yuwei Hu <https://Huyuwei.github.io/>`_
 
-This article is an introductory tutorial to deploy keras models with Relay.
+This article is an introductory tutorial to deploy Keras models with Relay.
 
 For us to begin with, keras should be installed.
 Tensorflow is also required since it's used as the default backend of keras.
@@ -28,14 +28,15 @@ A quick solution is to install via pip
 
 .. code-block:: bash
 
-    pip install -U keras --user
-    pip install -U tensorflow --user
+    %%shell
+    pip install keras tensorflow
 
 or please refer to official site
 https://keras.io/#installation
 """
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/compile_models/from_mxnet.py b/gallery/how_to/compile_models/from_mxnet.py
index 3808461862..cfd66ecdb7 100644
--- a/gallery/how_to/compile_models/from_mxnet.py
+++ b/gallery/how_to/compile_models/from_mxnet.py
@@ -22,21 +22,19 @@ Compile MXNet Models
 **Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_, \
             `Kazutaka Morita <https://github.com/kazum>`_
 
-This article is an introductory tutorial to deploy mxnet models with Relay.
-
-For us to begin with, mxnet module is required to be installed.
-
-A quick solution is
+This article is an introductory tutorial to deploy mxnet models with Relay. To begin, we must install `mxnet`:
 
 .. code-block:: bash
 
-    pip install mxnet --user
+    %%shell
+    pip install mxnet
 
 or please refer to official installation guide.
 https://mxnet.apache.org/versions/master/install/index.html
 """
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/compile_models/from_oneflow.py b/gallery/how_to/compile_models/from_oneflow.py
index eb27c4b3e3..0925c9fe81 100644
--- a/gallery/how_to/compile_models/from_oneflow.py
+++ b/gallery/how_to/compile_models/from_oneflow.py
@@ -27,8 +27,9 @@ A quick solution is to install via pip
 
 .. code-block:: bash
 
+    %%shell
     pip install flowvision==0.1.0
-    python3 -m pip install -f https://release.oneflow.info oneflow==0.7.0+cpu
+    pip install -f https://release.oneflow.info oneflow==0.7.0+cpu
 
 or please refer to official site:
 https://github.com/Oneflow-Inc/oneflow
@@ -37,6 +38,7 @@ Currently, TVM supports OneFlow 0.7.0. Other versions may be unstable.
 """
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/compile_models/from_onnx.py b/gallery/how_to/compile_models/from_onnx.py
index f0256bc7d3..980091d391 100644
--- a/gallery/how_to/compile_models/from_onnx.py
+++ b/gallery/how_to/compile_models/from_onnx.py
@@ -21,15 +21,14 @@ Compile ONNX Models
 
 This article is an introductory tutorial to deploy ONNX models with Relay.
 
-For us to begin with, ONNX package must be installed.
-
-A quick solution is to install protobuf compiler, and
+To begin, install the ONNX package:
 
 .. code-block:: bash
 
-    pip install --user onnx onnxoptimizer
+    %%shell
+    pip install onnx onnxoptimizer
 
-or please refer to official site.
+Alternatively, you can refer to official site:
 https://github.com/onnx/onnx
 """
 
diff --git a/gallery/how_to/compile_models/from_paddle.py b/gallery/how_to/compile_models/from_paddle.py
index fecb1c48da..199547b814 100644
--- a/gallery/how_to/compile_models/from_paddle.py
+++ b/gallery/how_to/compile_models/from_paddle.py
@@ -20,14 +20,14 @@ Compile PaddlePaddle Models
 **Author**: `Ziyuan Ma <https://github.com/ZiyuanMa/>`_
 
 This article is an introductory tutorial to deploy PaddlePaddle models with Relay.
-For us to begin with, PaddlePaddle>=2.1.3 is required to be installed.
-A quick solution is
+To begin, we'll install PaddlePaddle>=2.1.3:
 
 .. code-block:: bash
 
+    %%shell
     pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
 
-or please refer to official site.
+For more details, refer to the official install instructions at:
 https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html
 """
 
diff --git a/gallery/how_to/compile_models/from_pytorch.py b/gallery/how_to/compile_models/from_pytorch.py
index 98b531fa6d..064ed70e46 100644
--- a/gallery/how_to/compile_models/from_pytorch.py
+++ b/gallery/how_to/compile_models/from_pytorch.py
@@ -21,15 +21,15 @@ Compile PyTorch Models
 
 This article is an introductory tutorial to deploy PyTorch models with Relay.
 
-For us to begin with, PyTorch should be installed.
-TorchVision is also required since we will be using it as our model zoo.
-
-A quick solution is to install via pip
+For us to begin, PyTorch should be installed.
+TorchVision is also required so we can use the model zoo.
+A quick solution is to install via pip:
 
 .. code-block:: bash
 
-    pip install torch==1.7.0
-    pip install torchvision==0.8.1
+    %%shell
+    pip install torch
+    pip install torchvision
 
 or please refer to official site
 https://pytorch.org/get-started/locally/
diff --git a/gallery/how_to/compile_models/from_tensorflow.py b/gallery/how_to/compile_models/from_tensorflow.py
index 9a32397815..b85b9e669a 100644
--- a/gallery/how_to/compile_models/from_tensorflow.py
+++ b/gallery/how_to/compile_models/from_tensorflow.py
@@ -21,6 +21,11 @@ This article is an introductory tutorial to deploy tensorflow models with TVM.
 
 For us to begin with, tensorflow python module is required to be installed.
 
+.. code-block:: bash
+
+    %%shell
+    pip install tensorflow
+
 Please refer to https://www.tensorflow.org/install
 """
 
diff --git a/gallery/how_to/compile_models/from_tflite.py b/gallery/how_to/compile_models/from_tflite.py
index d1b78f11d5..a248346c29 100644
--- a/gallery/how_to/compile_models/from_tflite.py
+++ b/gallery/how_to/compile_models/from_tflite.py
@@ -25,9 +25,8 @@ To get started, TFLite package needs to be installed as prerequisite.
 
 .. code-block:: bash
 
-    # install tflite
-    pip install tflite==2.1.0 --user
-
+    %%shell
+    pip install tflite==2.1.0
 
 or you could generate TFLite package yourself. The steps are the following:
 
diff --git a/gallery/how_to/deploy_models/deploy_model_on_adreno.py b/gallery/how_to/deploy_models/deploy_model_on_adreno.py
index d6ed1f1f99..8d25e50b56 100644
--- a/gallery/how_to/deploy_models/deploy_model_on_adreno.py
+++ b/gallery/how_to/deploy_models/deploy_model_on_adreno.py
@@ -31,6 +31,7 @@ A quick solution is to install it via pip:
 
 .. code-block:: bash
 
+  %%shell
   pip install torch
   pip install torchvision
 
diff --git a/gallery/how_to/deploy_models/deploy_model_on_nano.py b/gallery/how_to/deploy_models/deploy_model_on_nano.py
index 5e59dccf20..3d8a4a796f 100644
--- a/gallery/how_to/deploy_models/deploy_model_on_nano.py
+++ b/gallery/how_to/deploy_models/deploy_model_on_nano.py
@@ -26,6 +26,7 @@ it on Jetson Nano.
 """
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py b/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py
index 0d8d0f2867..ffde042e2b 100644
--- a/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py
+++ b/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py
@@ -27,8 +27,8 @@ A quick solution is to install via pip
 
 .. code-block:: bash
 
-    pip install torch==1.7.0
-    pip install torchvision==0.8.1
+    pip install torch
+    pip install torchvision
 
 or please refer to official site
 https://pytorch.org/get-started/locally/
diff --git a/gallery/how_to/extend_tvm/bring_your_own_datatypes.py b/gallery/how_to/extend_tvm/bring_your_own_datatypes.py
index 479269a224..bbd207dbac 100644
--- a/gallery/how_to/extend_tvm/bring_your_own_datatypes.py
+++ b/gallery/how_to/extend_tvm/bring_your_own_datatypes.py
@@ -47,7 +47,7 @@ Since we do not use any 3rdparty library, there is no setup needed.
 
 If you would like to try this with your own datatype library, first bring the library's functions into the process space with ``CDLL``:
 
-.. code-block :: python
+.. code-block:: python
 
     ctypes.CDLL('my-datatype-lib.so', ctypes.RTLD_GLOBAL)
 """
diff --git a/gallery/how_to/optimize_operators/opt_conv_cuda.py b/gallery/how_to/optimize_operators/opt_conv_cuda.py
index e5b452af66..33e5d98553 100644
--- a/gallery/how_to/optimize_operators/opt_conv_cuda.py
+++ b/gallery/how_to/optimize_operators/opt_conv_cuda.py
@@ -31,6 +31,7 @@ channel, batch.
 """
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/optimize_operators/opt_conv_tensorcore.py b/gallery/how_to/optimize_operators/opt_conv_tensorcore.py
index 8db20b9b9b..5734f064f0 100644
--- a/gallery/how_to/optimize_operators/opt_conv_tensorcore.py
+++ b/gallery/how_to/optimize_operators/opt_conv_tensorcore.py
@@ -52,6 +52,7 @@ convolution has a large batch. We strongly recommend covering the :ref:`opt-conv
 # NHWCnc memory layout.The following code defines the convolution algorithm in TVM.
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py b/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py
index 5d173e3812..7964694e68 100644
--- a/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py
+++ b/gallery/how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.py
@@ -38,6 +38,7 @@ __name__ == "__main__":` block.
 """
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py b/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py
index 4560cf881e..a73b97525f 100644
--- a/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py
+++ b/gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py
@@ -49,6 +49,7 @@ __name__ == "__main__":` block.
 # Now return to python code. Import packages.
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py b/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py
index 4cf397e256..7cb6cb8dd3 100644
--- a/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py
+++ b/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py
@@ -60,6 +60,7 @@ __name__ == "__main__":` block.
 # Now return to python code. Import packages.
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/work_with_microtvm/install_cmsis.rst b/gallery/how_to/work_with_microtvm/install_cmsis.rst
new file mode 100644
index 0000000000..2f1d2fb118
--- /dev/null
+++ b/gallery/how_to/work_with_microtvm/install_cmsis.rst
@@ -0,0 +1,35 @@
+..  Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+..  Boilerplate script for installing CMSIS-NN in the microTVM
+    tutorials that use it. Does not show up as a separate file
+    on the documentation website.
+
+Install CMSIS-NN
+----------------------------
+
+    .. code-block:: bash
+
+        %%shell
+        CMSIS_SHA="51263182d16c92649a48144ba56c0945f9fce60e"
+        CMSIS_URL="http://github.com/ARM-software/CMSIS_5/archive/${CMSIS_SHA}.tar.gz"
+        export CMSIS_PATH=/content/cmsis
+        DOWNLOAD_PATH="/content/${CMSIS_SHA}.tar.gz"
+        mkdir ${CMSIS_PATH}
+        wget ${CMSIS_URL} -O "${DOWNLOAD_PATH}"
+        tar -xf "${DOWNLOAD_PATH}" -C ${CMSIS_PATH} --strip-components=1
+        rm ${DOWNLOAD_PATH}
diff --git a/gallery/how_to/work_with_microtvm/install_dependencies.rst b/gallery/how_to/work_with_microtvm/install_dependencies.rst
new file mode 100644
index 0000000000..d1bee4176d
--- /dev/null
+++ b/gallery/how_to/work_with_microtvm/install_dependencies.rst
@@ -0,0 +1,33 @@
+..  Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+..  Boilerplate script for installing Zephyr in the microTVM
+    tutorials that use it. Does not show up as a separate file
+    on the documentation website.
+
+
+Install microTVM Python dependencies
+------------------------------------
+
+TVM does not include a package for Python serial communication, so
+we must install one before using microTVM. We will also need TFLite
+to load models.
+
+    .. code-block:: bash
+
+        %%shell
+        pip install pyserial==3.5 tflite==2.1
diff --git a/gallery/how_to/work_with_microtvm/install_zephyr.rst b/gallery/how_to/work_with_microtvm/install_zephyr.rst
new file mode 100644
index 0000000000..a4c412f0f7
--- /dev/null
+++ b/gallery/how_to/work_with_microtvm/install_zephyr.rst
@@ -0,0 +1,52 @@
+..  Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+..  Boilerplate script for installing Zephyr in the microTVM
+    tutorials that use it. Does not show up as a separate file
+    on the documentation website.
+
+Install Zephyr
+----------------------------
+
+    .. code-block:: bash
+
+        %%shell
+        # Install west and ninja
+        python3 -m pip install west
+        apt-get install -y ninja-build
+
+        # Install ZephyrProject
+        ZEPHYR_PROJECT_PATH="/content/zephyrproject"
+        export ZEPHYR_BASE=${ZEPHYR_PROJECT_PATH}/zephyr
+        west init ${ZEPHYR_PROJECT_PATH}
+        cd ${ZEPHYR_BASE}
+        git checkout v2.7-branch
+        cd ..
+        west update
+        west zephyr-export
+        chmod -R o+w ${ZEPHYR_PROJECT_PATH}
+
+        # Install Zephyr SDK
+        ZEPHYR_SDK_VERSION=0.13.2
+        ZEPHYR_SDK_FILE="/content/zephyr-sdk-linux-setup.run"
+        wget --no-verbose -O $ZEPHYR_SDK_FILE \
+            https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v${ZEPHYR_SDK_VERSION}/zephyr-sdk-${ZEPHYR_SDK_VERSION}-linux-x86_64-setup.run
+        chmod +x $ZEPHYR_SDK_FILE
+        "$ZEPHYR_SDK_FILE" -- -d /content/zephyr-sdk --quiet
+
+        # Install python dependencies
+        python3 -m pip install -r "${ZEPHYR_BASE}/scripts/requirements.txt"
diff --git a/gallery/how_to/work_with_microtvm/micro_aot.py b/gallery/how_to/work_with_microtvm/micro_aot.py
index 4d6890f8d9..8646b6d7ec 100644
--- a/gallery/how_to/work_with_microtvm/micro_aot.py
+++ b/gallery/how_to/work_with_microtvm/micro_aot.py
@@ -30,16 +30,42 @@ of time compilation. This tutorial can be executed on a x86 CPU using C runtime
 or on Zephyr platform on a microcontroller/board supported by Zephyr.
 """
 
+######################################################################
+#
+#     .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst
+#
+
 # sphinx_gallery_start_ignore
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
 # sphinx_gallery_end_ignore
 
+import os
+
+# By default, this tutorial runs on x86 CPU using TVM's C runtime. If you would like
+# to run on real Zephyr hardware, you must export the `TVM_MICRO_USE_HW` environment
+# variable. Otherwise (if you are using the C runtime), you can skip installing
+# Zephyr and CMSIS-NN. It takes ~20 minutes to install both of them.
+use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW"))
+
+######################################################################
+#
+#     .. include:: ../../../../gallery/how_to/work_with_microtvm/install_zephyr.rst
+#
+
+######################################################################
+#
+#     .. include:: ../../../../gallery/how_to/work_with_microtvm/install_cmsis.rst
+#
+
+######################################################################
+# Import Python dependencies
+# -------------------------------
+#
 import numpy as np
 import pathlib
 import json
-import os
 
 import tvm
 from tvm import relay
@@ -57,7 +83,6 @@ from tvm.contrib.download import download_testdata
 # **Note:** By default this tutorial runs on x86 CPU using CRT, if you would like to run on Zephyr platform
 # you need to export `TVM_MICRO_USE_HW` environment variable.
 #
-use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW"))
 MODEL_URL = "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/keyword_spotting_quant.tflite"
 MODEL_PATH = download_testdata(MODEL_URL, "keyword_spotting_quant.tflite", module="model")
 SAMPLE_URL = "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy"
@@ -139,6 +164,8 @@ if use_physical_hw:
         "board": BOARD,
         "serial_number": SERIAL,
         "config_main_stack_size": 4096,
+        "cmsis_path": os.getenv("CMSIS_PATH", default="/content/cmsis"),
+        "zephyr_base": os.getenv("ZEPHYR_BASE", default="/content/zephyrproject/zephyr"),
     }
 
 temp_dir = tvm.contrib.utils.tempdir()
diff --git a/gallery/how_to/work_with_microtvm/micro_autotune.py b/gallery/how_to/work_with_microtvm/micro_autotune.py
index 13bf4efac1..3dd4cab6c9 100644
--- a/gallery/how_to/work_with_microtvm/micro_autotune.py
+++ b/gallery/how_to/work_with_microtvm/micro_autotune.py
@@ -27,13 +27,37 @@ Autotuning with microTVM
 This tutorial explains how to autotune a model using the C runtime.
 """
 
+######################################################################
+#
+#     .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst
+#
+
 # sphinx_gallery_start_ignore
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
 # sphinx_gallery_end_ignore
 
+# You can skip the following two sections (installing Zephyr and CMSIS-NN) if the following flag is False.
+# Installing Zephyr takes ~20 min.
 import os
+
+use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW"))
+
+######################################################################
+#
+#     .. include:: ../../../../gallery/how_to/work_with_microtvm/install_zephyr.rst
+#
+
+######################################################################
+#
+#     .. include:: ../../../../gallery/how_to/work_with_microtvm/install_cmsis.rst
+#
+
+######################################################################
+# Import Python dependencies
+# -------------------------------
+#
 import json
 import numpy as np
 import pathlib
@@ -41,8 +65,6 @@ import pathlib
 import tvm
 from tvm.relay.backend import Runtime
 
-use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW"))
-
 ####################
 # Defining the model
 ####################
diff --git a/gallery/how_to/work_with_microtvm/micro_pytorch.py b/gallery/how_to/work_with_microtvm/micro_pytorch.py
index cd4af05fb5..f7f0c9209a 100644
--- a/gallery/how_to/work_with_microtvm/micro_pytorch.py
+++ b/gallery/how_to/work_with_microtvm/micro_pytorch.py
@@ -29,6 +29,11 @@ a PyTorch model. This tutorial can be executed on a x86 CPU using C runtime (CRT
 since the model would not fit on our current supported Zephyr boards.
 """
 
+######################################################################
+#
+#     .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst
+#
+
 # sphinx_gallery_start_ignore
 from tvm import testing
 
@@ -36,7 +41,6 @@ testing.utils.install_request_hook(depth=3)
 # sphinx_gallery_end_ignore
 
 import pathlib
-
 import torch
 import torchvision
 from torchvision import transforms
diff --git a/gallery/how_to/work_with_microtvm/micro_tflite.py b/gallery/how_to/work_with_microtvm/micro_tflite.py
index 5822a1a1e9..cbdf6cd6f4 100644
--- a/gallery/how_to/work_with_microtvm/micro_tflite.py
+++ b/gallery/how_to/work_with_microtvm/micro_tflite.py
@@ -26,101 +26,9 @@ model with Relay.
 """
 
 ######################################################################
-# .. note::
-#     If you want to run this tutorial on the microTVM Reference VM, download the Jupyter
-#     notebook using the link at the bottom of this page and save it into the TVM directory. Then:
 #
-#     #. Login to the reference VM with a modified ``vagrant ssh`` command:
+#     .. include:: ../../../../gallery/how_to/work_with_microtvm/install_dependencies.rst
 #
-#         ``$ vagrant ssh -- -L8888:localhost:8888``
-#
-#     #. Install jupyter:  ``pip install jupyterlab``
-#     #. ``cd`` to the TVM directory.
-#     #. Install tflite: poetry install -E importer-tflite
-#     #. Launch Jupyter Notebook: ``jupyter notebook``
-#     #. Copy the localhost URL displayed, and paste it into your browser.
-#     #. Navigate to saved Jupyter Notebook (``.ipynb`` file).
-#
-#
-# Setup
-# -----
-#
-# Install TFLite
-# ^^^^^^^^^^^^^^
-#
-# To get started, TFLite package needs to be installed as prerequisite. You can do this in two ways:
-#
-# 1. Install tflite with ``pip``
-#
-#     .. code-block:: bash
-#
-#       pip install tflite=2.1.0 --user
-#
-# 2. Generate the TFLite package yourself. The steps are the following:
-#
-#     Get the flatc compiler.
-#     Please refer to https://github.com/google/flatbuffers for details
-#     and make sure it is properly installed.
-#
-#     .. code-block:: bash
-#
-#       flatc --version
-#
-#     Get the TFLite schema.
-#
-#     .. code-block:: bash
-#
-#       wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbs
-#
-#     Generate TFLite package.
-#
-#     .. code-block:: bash
-#
-#       flatc --python schema.fbs
-#
-#     Add the current folder (which contains generated tflite module) to PYTHONPATH.
-#
-#     .. code-block:: bash
-#
-#       export PYTHONPATH=${PYTHONPATH:+$PYTHONPATH:}$(pwd)
-#
-# To validate that the TFLite package was installed successfully, ``python -c "import tflite"``
-#
-# Install Zephyr (physical hardware only)
-# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-#
-# When running this tutorial with a host simulation (the default), you can use the host ``gcc`` to
-# build a firmware image that simulates the device. When compiling to run on physical hardware, you
-# need to install a *toolchain* plus some target-specific dependencies. microTVM allows you to
-# supply any compiler and runtime that can launch the TVM RPC server, but to get started, this
-# tutorial relies on the Zephyr RTOS to provide these pieces.
-#
-# You can install Zephyr by following the
-# `Installation Instructions <https://docs.zephyrproject.org/latest/getting_started/index.html>`_.
-#
-# Aside: Recreating your own Pre-Trained TFLite model
-#  The tutorial downloads a pretrained TFLite model. When working with microcontrollers
-#  you need to be mindful these are highly resource constrained devices as such standard
-#  models like MobileNet may not fit into their modest memory.
-#
-#  For this tutorial, we'll make use of one of the TF Micro example models.
-#
-#  If you wish to replicate the training steps see:
-#  https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/hello_world/train
-#
-#    .. note::
-#
-#      If you accidentally download the example pretrained model from:
-#
-#      ``wget https://storage.googleapis.com/download.tensorflow.org/models/tflite/micro/hello_world_2020_04_13.zip``
-#
-#      this will fail due to an unimplemented opcode (114)
-#
-# Load and prepare the Pre-Trained Model
-# --------------------------------------
-#
-# Load the pretrained TFLite model from a file in your current
-# directory into a buffer
 
 # sphinx_gallery_start_ignore
 from tvm import testing
@@ -129,6 +37,27 @@ testing.utils.install_request_hook(depth=3)
 # sphinx_gallery_end_ignore
 
 import os
+
+# By default, this tutorial runs on x86 CPU using TVM's C runtime. If you would like
+# to run on real Zephyr hardware, you must export the `TVM_MICRO_USE_HW` environment
+# variable. Otherwise (if you are using the C runtime), you can skip installing
+# Zephyr and CMSIS-NN. It takes ~20 minutes to install both of them.
+use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW"))
+
+######################################################################
+#
+#     .. include:: ../../../../gallery/how_to/work_with_microtvm/install_zephyr.rst
+#
+
+######################################################################
+#
+#     .. include:: ../../../../gallery/how_to/work_with_microtvm/install_cmsis.rst
+#
+
+######################################################################
+# Import Python dependencies
+# -------------------------------
+#
 import json
 import tarfile
 import pathlib
@@ -140,7 +69,6 @@ from tvm import relay
 import tvm.contrib.utils
 from tvm.contrib.download import download_testdata
 
-use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW"))
 model_url = "https://people.linaro.org/~tom.gall/sine_model.tflite"
 model_file = "sine_model.tflite"
 model_path = download_testdata(model_url, model_file, module="data")
@@ -207,8 +135,7 @@ if use_physical_hw:
     boards_file = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json"
     with open(boards_file) as f:
         boards = json.load(f)
-
-    BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_f746zg")
+    BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi")
     SERIAL = os.getenv("TVM_MICRO_SERIAL", default=None)
     TARGET = tvm.target.target.micro(boards[BOARD]["model"])
 
@@ -292,7 +219,14 @@ project_options = {}  # You can use options to provide platform-specific options
 
 if use_physical_hw:
     template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr"))
-    project_options = {"project_type": "host_driven", "board": BOARD, "serial_number": SERIAL}
+    project_options = {
+        "project_type": "host_driven",
+        "board": BOARD,
+        "serial_number": SERIAL,
+        "config_main_stack_size": 4096,
+        "cmsis_path": os.getenv("CMSIS_PATH", default="/content/cmsis"),
+        "zephyr_base": os.getenv("ZEPHYR_BASE", default="/content/zephyrproject/zephyr"),
+    }
 
 # Create a temporary directory
 
diff --git a/gallery/how_to/work_with_microtvm/micro_train.py b/gallery/how_to/work_with_microtvm/micro_train.py
index 44e0dd5cb7..9b8a9a68dd 100644
--- a/gallery/how_to/work_with_microtvm/micro_train.py
+++ b/gallery/how_to/work_with_microtvm/micro_train.py
@@ -27,17 +27,6 @@ deployed to Arduino using TVM.
 """
 
 ######################################################################
-# .. note::
-#
-#   This tutorial is best viewed as a Jupyter Notebook. You can download and run it locally
-#   using the link at the bottom of this page, or open it online for free using Google Colab.
-#   Click the icon below to open in Google Colab.
-#
-# .. image:: https://raw.githubusercontent.com/tlc-pack/web-data/main/images/utilities/colab_button.png
-#      :align: center
-#      :target: https://colab.research.google.com/github/apache/tvm-site/blob/asf-site/docs/_downloads/a7c7ea4b5017ae70db1f51dd8e6dcd82/micro_train.ipynb
-#      :width: 300px
-#
 # Motivation
 # ----------
 # When building IOT devices, we often want them to **see and understand** the world around them.
@@ -71,7 +60,7 @@ deployed to Arduino using TVM.
 #
 #     .. code-block:: bash
 #
-#       %%bash
+#       %%shell
 #       pip install -q tensorflow tflite
 #       pip install -q tlcpack-nightly -f https://tlcpack.ai/wheels
 #       apt-get -qq install imagemagick curl
@@ -515,7 +504,7 @@ arduino_project = tvm.micro.generate_project(
 #
 #     .. code-block:: bash
 #
-#       %%bash
+#       %%shell
 #       mkdir -p ~/tests
 #       curl "https://i.imgur.com/JBbEhxN.png" -o ~/tests/car_224.png
 #       convert ~/tests/car_224.png -resize 64 ~/tests/car_64.png
diff --git a/gallery/how_to/work_with_pytorch/using_as_torch.py b/gallery/how_to/work_with_pytorch/using_as_torch.py
index 3528e754fd..e2351a0d7c 100644
--- a/gallery/how_to/work_with_pytorch/using_as_torch.py
+++ b/gallery/how_to/work_with_pytorch/using_as_torch.py
@@ -22,8 +22,16 @@ Wrap Your TVMScript as PyTorch Module
 
 This article is a tutorial on wrapping the TVMScript code as the PyTorch module.
 Using the decorator `as_torch`, users can wrap TVMScript code into a PyTorch nn.Module naturally.
+To follow the tutorial, PyTorch should be installed:
+
+.. code-block:: bash
+
+    %%shell
+    pip install torch
+
 """
 
+
 # sphinx_gallery_start_ignore
 from tvm import testing
 
diff --git a/gallery/how_to/work_with_pytorch/using_optimized_torch.py b/gallery/how_to/work_with_pytorch/using_optimized_torch.py
index dc6caf5d59..baf80541b9 100644
--- a/gallery/how_to/work_with_pytorch/using_optimized_torch.py
+++ b/gallery/how_to/work_with_pytorch/using_optimized_torch.py
@@ -21,10 +21,18 @@ Compile PyTorch Models
 `Yaoda Zhou <https://github.com/juda>`_
 
 This article is a tutorial to optimize PyTorch models by using decorator `optimize_torch`.
-To follow this tutorial, PyTorch, as well as TorchVision, should be installed.
+To follow this tutorial, PyTorch, as well as TorchVision, should be installed:
+
+.. code-block:: bash
+
+    %%shell
+    pip install torch
+    pip install torchvision
+
 """
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/work_with_relay/build_gcn.py b/gallery/how_to/work_with_relay/build_gcn.py
index 8953ffc2e4..e6106dd95b 100644
--- a/gallery/how_to/work_with_relay/build_gcn.py
+++ b/gallery/how_to/work_with_relay/build_gcn.py
@@ -25,7 +25,13 @@ In this tutorial, we will run our GCN on Cora dataset to demonstrate.
 Cora dataset is a common benchmark for Graph Neural Networks (GNN) and frameworks that support GNN training and inference.
 We directly load the dataset from DGL library to do the apples to apples comparison against DGL.
 
-Please refer to DGL doc for DGL installation at
+.. code-block:: bash
+
+    %%shell
+    pip install torch==1.9.0
+    pip install dgl==v0.7.2 -f https://data.dgl.ai/wheels/repo.html
+
+Please refer to DGL doc for installation at
 https://docs.dgl.ai/install/index.html.
 
 Please refer to PyTorch guide for PyTorch installation at
diff --git a/gallery/how_to/work_with_relay/using_relay_viz.py b/gallery/how_to/work_with_relay/using_relay_viz.py
index 2e68ce9028..ae22fe20e1 100644
--- a/gallery/how_to/work_with_relay/using_relay_viz.py
+++ b/gallery/how_to/work_with_relay/using_relay_viz.py
@@ -32,6 +32,13 @@ A default parser is provided. Users can implement their own renderers to render
 Here we use a renderer rendering graph in the text-form.
 It is a lightweight, AST-like visualizer, inspired by `clang ast-dump <https://clang.llvm.org/docs/IntroductionToTheClangAST.html>`_.
 We will introduce how to implement customized parsers and renderers through interface classes.
+To install dependencies, run:
+
+.. code-block:: bash
+
+    %%shell
+    pip install graphviz
+
 
 For more details, please refer to :py:mod:`tvm.contrib.relay_viz`.
 """
diff --git a/gallery/how_to/work_with_schedules/reduction.py b/gallery/how_to/work_with_schedules/reduction.py
index 432e9cd143..c084c45d38 100644
--- a/gallery/how_to/work_with_schedules/reduction.py
+++ b/gallery/how_to/work_with_schedules/reduction.py
@@ -29,6 +29,7 @@ from __future__ import absolute_import, print_function
 
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/how_to/work_with_schedules/scan.py b/gallery/how_to/work_with_schedules/scan.py
index d21673acd9..d523d5b995 100644
--- a/gallery/how_to/work_with_schedules/scan.py
+++ b/gallery/how_to/work_with_schedules/scan.py
@@ -26,6 +26,7 @@ from __future__ import absolute_import, print_function
 
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/tutorial/intro_topi.py b/gallery/tutorial/intro_topi.py
index e10a74c849..f2a4db6086 100644
--- a/gallery/tutorial/intro_topi.py
+++ b/gallery/tutorial/intro_topi.py
@@ -27,6 +27,7 @@ In this tutorial, we will see how TOPI can save us from writing boilerplate code
 """
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/tutorial/relay_quick_start.py b/gallery/tutorial/relay_quick_start.py
index 8910817c21..e59f0107f9 100644
--- a/gallery/tutorial/relay_quick_start.py
+++ b/gallery/tutorial/relay_quick_start.py
@@ -27,6 +27,7 @@ Notice that you need to build TVM with cuda and llvm enabled.
 """
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/gallery/tutorial/tensor_ir_blitz_course.py b/gallery/tutorial/tensor_ir_blitz_course.py
index a62fa39793..dc75a3fb94 100644
--- a/gallery/tutorial/tensor_ir_blitz_course.py
+++ b/gallery/tutorial/tensor_ir_blitz_course.py
@@ -30,6 +30,7 @@ TensorIR is a domain specific language for deep learning programs serving two br
 """
 
 # sphinx_gallery_start_ignore
+# sphinx_gallery_requires_cuda = True
 from tvm import testing
 
 testing.utils.install_request_hook(depth=3)
diff --git a/tests/lint/check_request_hook.py b/tests/lint/check_request_hook.py
index 35b1a85c3a..925af5597c 100644
--- a/tests/lint/check_request_hook.py
+++ b/tests/lint/check_request_hook.py
@@ -23,13 +23,27 @@ from typing import List, Optional
 
 
 REPO_ROOT = Path(__file__).resolve().parent.parent.parent
-EXPECTED = """
+
+EXPECTED_HOOK = """
 # sphinx_gallery_start_ignore
 from tvm import testing
 
-testing.utils.install_request_hook(depth=3)
+testing.utils.install_request_hook(depth=3)\
 # sphinx_gallery_end_ignore
-""".rstrip()
+"""
+
+# Extra sphinx-gallery config options may be passed inside the ignore block before the hook. This
+# is a workaround that can be removed once sphinx-gallery #1059 merges and the version is updated.
+EXPECTED_REGEX = re.compile(
+    r"""
+\# sphinx_gallery_start_ignore
+(?:.*\n)*from tvm import testing
+
+testing\.utils\.install_request_hook\(depth=3\)\
+\# sphinx_gallery_end_ignore
+""".rstrip(),
+    re.MULTILINE,
+)
 IGNORE_PATTERNS = ["*/micro_tvmc.py", "*/micro_train.py"]
 APACHE_HEADER_LINES = 16
 
@@ -84,14 +98,13 @@ if __name__ == "__main__":
         with open(file) as f:
             content = f.read()
 
-        if EXPECTED not in content:
+        regex_match = EXPECTED_REGEX.search(content)
+        if not regex_match:
             errors.append((file, None))
             continue
 
-        index = content.index(EXPECTED)
-        line = content.count("\n", 0, index) + EXPECTED.count("\n") + 2
+        line = content.count("\n", 0, regex_match.end()) + 2
         expected = find_code_block_line(content.split("\n"))
-
         if expected is not None and line < expected:
             errors.append((file, (line, expected)))
 
@@ -106,19 +119,19 @@ if __name__ == "__main__":
             if "from __future__" in content:
                 # Place after the last __future__ import
                 new_content = re.sub(
-                    r"((?:from __future__.*?\n)+)", r"\1\n" + EXPECTED, content, flags=re.MULTILINE
+                    r"((?:from __future__.*?\n)+)", r"\1\n" + EXPECTED_HOOK, content, flags=re.M
                 )
             else:
                 # Place in the first codeblock
                 lines = content.split("\n")
                 position = find_code_block_line(lines)
                 if position is None:
-                    new_content = "\n".join(lines) + EXPECTED + "\n"
+                    new_content = "\n".join(lines) + EXPECTED_HOOK + "\n"
                 else:
                     print(position)
                     new_content = (
                         "\n".join(lines[:position])
-                        + EXPECTED
+                        + EXPECTED_HOOK
                         + "\n\n"
                         + "\n".join(lines[position:])
                     )
@@ -134,7 +147,7 @@ if __name__ == "__main__":
                 "the whitespace is incorrect.\n"
                 "You can run 'python3 tests/lint/check_request_hook.py --fix' to "
                 "automatically fix these errors:\n"
-                f"{EXPECTED}\n\nFiles:"
+                f"{EXPECTED_HOOK}\n\nFiles:"
             )
             for file, line_info in errors:
                 if line_info is None: