You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2020/09/10 18:02:20 UTC

[GitHub] [incubator-tvm] comaniac commented on a change in pull request #6302: [tvmc] command line driver 'compile' (part 2/4)

comaniac commented on a change in pull request #6302:
URL: https://github.com/apache/incubator-tvm/pull/6302#discussion_r486517802



##########
File path: python/tvm/driver/tvmc/compiler.py
##########
@@ -0,0 +1,280 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import logging
+import os.path
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm.contrib import cc
+from tvm.contrib import util
+
+from . import common, frontends
+from .main import register_parser
+
+
+@register_parser
+def add_compile_parser(subparsers):
+    """ Include parser for 'compile' subcommand """
+
+    parser = subparsers.add_parser("compile", help="compile a model")
+    parser.set_defaults(func=drive_compile)
+    parser.add_argument(
+        "--cross-compiler",
+        default="",
+        help="the cross compiler to generate target libraries, e.g. 'aarch64-linux-gnu-gcc'",
+    )
+    parser.add_argument(
+        "--desired-layout",
+        choices=["NCHW", "NHWC"],
+        default=None,
+        help="change the data layout of the whole graph",
+    )
+    parser.add_argument(
+        "--dump-code",
+        metavar="FORMAT",
+        default="",
+        help="comma separarated list of formats to export, e.g. 'asm,ll,relay' "
+    )
+    parser.add_argument(
+        "--model-format",
+        choices=frontends.get_frontend_names(),
+        help="specify input model format",
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        default="module.tar",
+        help="output the compiled module to an archive",
+    )
+    parser.add_argument(
+        "--target",
+        help="compilation target as plain string, inline JSON or path to a JSON file",
+        required=True
+    )
+    parser.add_argument(
+        "--tuning-records",
+        metavar="PATH",
+        default="",
+        help="path to an auto-tuning log file from AutoTVM"

Review comment:
       ```suggestion
           help="path to an auto-tuning log file by AutoTVM. If not presented, the fallback/tophub configs will be used"
   ```

##########
File path: python/tvm/driver/tvmc/compiler.py
##########
@@ -0,0 +1,280 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import logging
+import os.path
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm.contrib import cc
+from tvm.contrib import util
+
+from . import common, frontends
+from .main import register_parser
+
+
+@register_parser
+def add_compile_parser(subparsers):
+    """ Include parser for 'compile' subcommand """
+
+    parser = subparsers.add_parser("compile", help="compile a model")
+    parser.set_defaults(func=drive_compile)
+    parser.add_argument(
+        "--cross-compiler",
+        default="",
+        help="the cross compiler to generate target libraries, e.g. 'aarch64-linux-gnu-gcc'",
+    )
+    parser.add_argument(
+        "--desired-layout",
+        choices=["NCHW", "NHWC"],
+        default=None,
+        help="change the data layout of the whole graph",
+    )
+    parser.add_argument(
+        "--dump-code",
+        metavar="FORMAT",
+        default="",
+        help="comma separarated list of formats to export, e.g. 'asm,ll,relay' "
+    )
+    parser.add_argument(
+        "--model-format",
+        choices=frontends.get_frontend_names(),
+        help="specify input model format",
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        default="module.tar",
+        help="output the compiled module to an archive",
+    )
+    parser.add_argument(
+        "--target",
+        help="compilation target as plain string, inline JSON or path to a JSON file",
+        required=True
+    )
+    parser.add_argument(
+        "--tuning-records",
+        metavar="PATH",
+        default="",
+        help="path to an auto-tuning log file from AutoTVM"
+    )
+    parser.add_argument(
+        "-v", "--verbose", action="count", default=0, help="increase verbosity"
+    )
+    #TODO (@leandron) This is a path to a physical file, but
+    #     can be improved in future to add integration with a modelzoo
+    #     or URL, for example.
+    parser.add_argument("FILE", help="path to the input model file")
+
+
+def drive_compile(args):
+    """ Invoke tvmc.compiler module with command line arguments
+
+    Parameters
+    ----------
+    args: argparse.Namespace
+        Arguments from command line parser.
+
+    Returns
+    --------
+    int
+        Zero if successfully completed
+
+    """
+
+    graph, lib, params, dumps = compile_model(
+        args.FILE,
+        args.target,
+        args.dump_code,
+        "",
+        args.model_format,
+        args.tuning_records,
+        args.tensor_layout,
+    )
+
+    if dumps:
+        save_dumps(args.output, dumps)
+
+    save_module(args.output, graph, lib, params, args.cross_compiler)
+    return 0
+
+
+def compile_model(
+        path,
+        target,
+        dump_code=None,
+        target_host=None,
+        model_format=None,
+        tuning_records=None,
+        alter_layout=None,
+):
+    """Compile a model from a supported framework into a TVM module.
+
+    This function takes a union of the arguments of both frontends.load_model
+    and compiler.compile_relay. The resulting TVM module can be executed using
+    the graph runtime.
+
+    Parameters
+    ----------
+    path: str
+        Path to a file
+    target : str
+        The target for which to compile. Can be a plain string or
+        a path.
+    dump_code : list, optional
+        Dump the generated code for the specified source types, on
+        the requested target.
+    target_host : str, optional
+        The target of the host machine if host-side code
+        needs to be generated.
+    model_format: str, optional
+        A string representing a name of a frontend to be used
+    tuning_records: str, optional
+        Name of the file produced by the tuning to be used during
+        compilation.
+    alter_layout: str, optional
+        The layout to convert the graph to. Note, the convert layout
+        pass doesn't currently guarantee the whole of the graph will
+        be converted to the chosen layout.
+
+    Returns
+    -------
+    graph : str
+        A JSON-serialized TVM execution graph.
+    lib : tvm.module.Module
+        A TVM module containing the compiled functions.
+    params : dict
+        The parameters (weights) for the TVM module.
+    dumps : dict
+        Dictionary containing the dumps specified.
+
+    """
+    dump_code = [x.strip() for x in  dump_code.split(',')] if dump_code else None
+    mod, params = frontends.load_model(path, model_format)
+
+    if alter_layout:
+        mod = common.convert_graph_layout(mod, alter_layout)
+
+    if os.path.exists(str(target)):

Review comment:
       1. You don't need `str()` as `target` is already a string.
   2. Add comments saying this is to handle the target file in JSON format.

##########
File path: python/tvm/driver/tvmc/compiler.py
##########
@@ -0,0 +1,280 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import logging
+import os.path
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm.contrib import cc
+from tvm.contrib import util
+
+from . import common, frontends
+from .main import register_parser
+
+
+@register_parser
+def add_compile_parser(subparsers):
+    """ Include parser for 'compile' subcommand """
+
+    parser = subparsers.add_parser("compile", help="compile a model")
+    parser.set_defaults(func=drive_compile)
+    parser.add_argument(
+        "--cross-compiler",
+        default="",
+        help="the cross compiler to generate target libraries, e.g. 'aarch64-linux-gnu-gcc'",
+    )
+    parser.add_argument(
+        "--desired-layout",
+        choices=["NCHW", "NHWC"],
+        default=None,
+        help="change the data layout of the whole graph",
+    )
+    parser.add_argument(
+        "--dump-code",
+        metavar="FORMAT",
+        default="",
+        help="comma separarated list of formats to export, e.g. 'asm,ll,relay' "
+    )
+    parser.add_argument(
+        "--model-format",
+        choices=frontends.get_frontend_names(),
+        help="specify input model format",
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        default="module.tar",
+        help="output the compiled module to an archive",
+    )
+    parser.add_argument(
+        "--target",
+        help="compilation target as plain string, inline JSON or path to a JSON file",
+        required=True
+    )
+    parser.add_argument(
+        "--tuning-records",
+        metavar="PATH",
+        default="",
+        help="path to an auto-tuning log file from AutoTVM"
+    )
+    parser.add_argument(
+        "-v", "--verbose", action="count", default=0, help="increase verbosity"
+    )
+    #TODO (@leandron) This is a path to a physical file, but
+    #     can be improved in future to add integration with a modelzoo
+    #     or URL, for example.
+    parser.add_argument("FILE", help="path to the input model file")
+
+
+def drive_compile(args):
+    """ Invoke tvmc.compiler module with command line arguments
+
+    Parameters
+    ----------
+    args: argparse.Namespace
+        Arguments from command line parser.
+
+    Returns
+    --------
+    int
+        Zero if successfully completed
+
+    """
+
+    graph, lib, params, dumps = compile_model(
+        args.FILE,
+        args.target,
+        args.dump_code,
+        "",

Review comment:
       ```suggestion
           None,
   ```

##########
File path: tests/python/driver/tvmc/test_frontends.py
##########
@@ -0,0 +1,196 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import os
+import tarfile
+
+import pytest
+
+from tvm.ir.module import IRModule
+
+from tvm.driver import tvmc
+from tvm.driver.tvmc.common import TVMCException
+
+
+def test_get_frontend_names_is_list():
+    sut = tvmc.frontends.get_frontend_names()
+    assert type(sut) is list
+
+
+def test_get_frontends_contains_only_strings():
+    sut = tvmc.frontends.get_frontend_names()
+    assert all([type(x) is str for x in sut]) is True
+
+
+def test_get_frontend_by_name_valid():
+    # some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    sut = tvmc.frontends.get_frontend_by_name("keras")
+    assert type(sut) is tvmc.frontends.KerasFrontend
+
+
+def test_get_frontend_by_name_invalid():
+    with pytest.raises(TVMCException) as e:
+        def f():
+            tvmc.frontends.get_frontend_by_name("unsupported_thing")
+        f()
+    assert 'unrecognized frontend' in str(e.value)
+
+
+def test_guess_frontend_tflite():
+    # some CI environments wont offer TFLite, so skip in case it is not present
+    pytest.importorskip('tflite')
+
+    sut = tvmc.frontends.guess_frontend("a_model.tflite")
+    assert type(sut) is tvmc.frontends.TFLiteFrontend
+
+
+def test_guess_frontend_onnx():
+    # some CI environments wont offer onnx, so skip in case it is not present
+    pytest.importorskip('onnx')
+
+    sut = tvmc.frontends.guess_frontend("a_model.onnx")
+    assert type(sut) is tvmc.frontends.OnnxFrontend
+
+
+def test_guess_frontend_pytorch():
+    # some CI environments wont offer pytorch, so skip in case it is not present
+    pytest.importorskip('torch')
+
+    sut = tvmc.frontends.guess_frontend("a_model.pth")
+    assert type(sut) is tvmc.frontends.PyTorchFrontend
+
+
+def test_guess_frontend_keras():
+    # some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    sut = tvmc.frontends.guess_frontend("a_model.h5")
+    assert type(sut) is tvmc.frontends.KerasFrontend
+
+
+def test_guess_frontend_tensorflow():
+    # some CI environments wont offer TensorFlow, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    sut = tvmc.frontends.guess_frontend("a_model.pb")
+    assert type(sut) is tvmc.frontends.TensorflowFrontend
+
+
+def test_guess_frontend_invalid():
+    with pytest.raises(TVMCException):
+        tvmc.frontends.guess_frontend("not/a/file.txt")
+
+
+def test_load_model__invalid_path__no_language():
+    # some CI environments wont offer TFLite, so skip in case it is not present
+    pytest.importorskip('tflite')
+
+    with pytest.raises(FileNotFoundError):
+        tvmc.frontends.load_model("not/a/file.tflite")
+
+
+def test_load_model__invalid_path__with_language():
+    # some CI environments wont offer onnx, so skip in case it is not present
+    pytest.importorskip('onnx')
+
+    with pytest.raises(FileNotFoundError):
+        tvmc.frontends.load_model("not/a/file.txt", model_format="onnx")
+
+
+def test_load_model__tflite(tflite_mobilenet_v1_1_quant):
+    # some CI environments wont offer TFLite, so skip in case it is not present
+    pytest.importorskip('tflite')
+
+    mod, params = tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant)
+    assert type(mod) is IRModule
+    assert type(params) is dict
+    # check whether one known value is part of the params dict
+    assert '_param_1' in params.keys()
+
+
+def test_load_model__keras(keras_resnet50):
+    # some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    mod, params = tvmc.frontends.load_model(keras_resnet50)
+    assert type(mod) is IRModule
+    assert type(params) is dict
+    ## check whether one known value is part of the params dict
+    assert '_param_1' in params.keys()
+
+
+def test_load_model__onnx(onnx_resnet50):
+    # some CI environments wont offer onnx, so skip in case it is not present
+    pytest.importorskip('onnx')
+
+    mod, params = tvmc.frontends.load_model(onnx_resnet50)
+    assert type(mod) is IRModule
+    assert type(params) is dict
+    ## check whether one known value is part of the params dict
+    assert 'resnetv24_batchnorm0_gamma' in params.keys()
+
+
+def test_load_model__pb(pb_mobilenet_v1_1_quant):
+    # some CI environments wont offer TensorFlow, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    mod, params = tvmc.frontends.load_model(pb_mobilenet_v1_1_quant)
+    assert type(mod) is IRModule
+    assert type(params) is dict
+    # check whether one known value is part of the params dict
+    assert 'MobilenetV1/Conv2d_0/weights' in params.keys()
+
+
+def test_load_model___wrong_language__to_keras(tflite_mobilenet_v1_1_quant):
+    # some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    with pytest.raises(OSError):
+        tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant, model_format="keras")
+
+
+def test_load_model___wrong_language__to_tflite(keras_resnet50):
+    # some CI environments wont offer TFLite, so skip in case it is not present
+    pytest.importorskip('tflite')
+
+    with pytest.raises(TVMCException) as e:
+        def f():
+            tvmc.frontends.load_model(keras_resnet50, model_format="tflite")
+        f()
+    assert 'input file not tflite' in str(e.value)
+
+
+def test_load_model___wrong_language__to_onnx(tflite_mobilenet_v1_1_quant):
+    # some CI environments wont offer onnx, so skip in case it is not present
+    pytest.importorskip('onnx')
+
+    from google.protobuf.message import DecodeError
+
+    with pytest.raises(DecodeError):
+        tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant, model_format="onnx")
+
+
+def test_load_model___wrong_language__to_pytorch(tflite_mobilenet_v1_1_quant):
+    # some CI environments wont offer pytorch, so skip in case it is not present
+    pytest.importorskip('torch')
+
+    with pytest.raises(RuntimeError) as e:
+        def f():
+            tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant, model_format="pytorch")
+        f()
+    assert 'PytorchStreamReader' in str(e.value)

Review comment:
       ditto.

##########
File path: python/tvm/driver/tvmc/frontends.py
##########
@@ -0,0 +1,417 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to parse models from different frameworks into Relay networks.
+
+Frontend classes do lazy-loading of modules on purpose, to reduce time spent on
+loading the tool.
+"""
+import logging
+import os
+import sys
+from abc import ABC
+from abc import abstractmethod
+from pathlib import Path
+
+import numpy as np
+
+from tvm import relay
+from tvm.driver.tvmc.common import TVMCException
+
+
+class Frontend(ABC):
+    """Abstract class for frontend"""

Review comment:
       Add more descriptions to clarify this frontend, as we already have Relay frontends. It seems to me that this frontend is used to preprocess models from each framework.

##########
File path: python/tvm/driver/tvmc/compiler.py
##########
@@ -0,0 +1,280 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import logging
+import os.path
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm.contrib import cc
+from tvm.contrib import util
+
+from . import common, frontends
+from .main import register_parser
+
+
+@register_parser
+def add_compile_parser(subparsers):
+    """ Include parser for 'compile' subcommand """
+
+    parser = subparsers.add_parser("compile", help="compile a model")
+    parser.set_defaults(func=drive_compile)
+    parser.add_argument(
+        "--cross-compiler",
+        default="",
+        help="the cross compiler to generate target libraries, e.g. 'aarch64-linux-gnu-gcc'",
+    )
+    parser.add_argument(
+        "--desired-layout",
+        choices=["NCHW", "NHWC"],
+        default=None,
+        help="change the data layout of the whole graph",
+    )
+    parser.add_argument(
+        "--dump-code",
+        metavar="FORMAT",
+        default="",
+        help="comma separarated list of formats to export, e.g. 'asm,ll,relay' "
+    )
+    parser.add_argument(
+        "--model-format",
+        choices=frontends.get_frontend_names(),
+        help="specify input model format",
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        default="module.tar",
+        help="output the compiled module to an archive",
+    )
+    parser.add_argument(
+        "--target",
+        help="compilation target as plain string, inline JSON or path to a JSON file",
+        required=True
+    )
+    parser.add_argument(
+        "--tuning-records",
+        metavar="PATH",
+        default="",
+        help="path to an auto-tuning log file from AutoTVM"
+    )
+    parser.add_argument(
+        "-v", "--verbose", action="count", default=0, help="increase verbosity"
+    )
+    #TODO (@leandron) This is a path to a physical file, but
+    #     can be improved in future to add integration with a modelzoo
+    #     or URL, for example.
+    parser.add_argument("FILE", help="path to the input model file")
+
+
+def drive_compile(args):
+    """ Invoke tvmc.compiler module with command line arguments
+
+    Parameters
+    ----------
+    args: argparse.Namespace
+        Arguments from command line parser.
+
+    Returns
+    --------
+    int
+        Zero if successfully completed
+
+    """
+
+    graph, lib, params, dumps = compile_model(
+        args.FILE,
+        args.target,
+        args.dump_code,
+        "",
+        args.model_format,
+        args.tuning_records,
+        args.tensor_layout,
+    )
+
+    if dumps:
+        save_dumps(args.output, dumps)
+
+    save_module(args.output, graph, lib, params, args.cross_compiler)
+    return 0
+
+
+def compile_model(
+        path,
+        target,
+        dump_code=None,
+        target_host=None,
+        model_format=None,
+        tuning_records=None,
+        alter_layout=None,
+):
+    """Compile a model from a supported framework into a TVM module.
+
+    This function takes a union of the arguments of both frontends.load_model
+    and compiler.compile_relay. The resulting TVM module can be executed using
+    the graph runtime.
+
+    Parameters
+    ----------
+    path: str
+        Path to a file
+    target : str
+        The target for which to compile. Can be a plain string or
+        a path.
+    dump_code : list, optional
+        Dump the generated code for the specified source types, on
+        the requested target.
+    target_host : str, optional
+        The target of the host machine if host-side code
+        needs to be generated.
+    model_format: str, optional
+        A string representing a name of a frontend to be used
+    tuning_records: str, optional
+        Name of the file produced by the tuning to be used during
+        compilation.
+    alter_layout: str, optional
+        The layout to convert the graph to. Note, the convert layout
+        pass doesn't currently guarantee the whole of the graph will
+        be converted to the chosen layout.
+
+    Returns
+    -------
+    graph : str
+        A JSON-serialized TVM execution graph.
+    lib : tvm.module.Module
+        A TVM module containing the compiled functions.
+    params : dict
+        The parameters (weights) for the TVM module.
+    dumps : dict
+        Dictionary containing the dumps specified.
+
+    """
+    dump_code = [x.strip() for x in  dump_code.split(',')] if dump_code else None
+    mod, params = frontends.load_model(path, model_format)
+
+    if alter_layout:
+        mod = common.convert_graph_layout(mod, alter_layout)
+
+    if os.path.exists(str(target)):
+        with open(target) as target_file:
+            logging.info("using target input from file: %s", target)
+            target = "".join(target_file.readlines())
+
+    # TODO(@leandron) We don't have an API to collect a list of supported
+    #       targets yet
+    logging.debug("creating target from input: %s", target)
+    tvm_target = tvm.target.create(target)

Review comment:
       ```suggestion
       tvm_target = tvm.target.Target(target)
   ```

##########
File path: python/setup.py
##########
@@ -162,6 +162,12 @@ def get_package_data_files():
         'attrs',
         'psutil',
         'typed_ast',
+        'tensorflow==2.1.0',
+        'tflite==2.1.0',
+        'onnx==1.6.0',
+        'onnxruntime==1.0.0',
+        'torch==1.4.0',
+        'torchvision==0.5.0'

Review comment:
       I'm not familiar with the version requirements for other frameworks. @masahi, @tqchen would you mind to help checking?

##########
File path: python/tvm/driver/tvmc/compiler.py
##########
@@ -0,0 +1,280 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import logging
+import os.path
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm.contrib import cc
+from tvm.contrib import util
+
+from . import common, frontends
+from .main import register_parser
+
+
+@register_parser
+def add_compile_parser(subparsers):
+    """ Include parser for 'compile' subcommand """
+
+    parser = subparsers.add_parser("compile", help="compile a model")
+    parser.set_defaults(func=drive_compile)
+    parser.add_argument(
+        "--cross-compiler",
+        default="",
+        help="the cross compiler to generate target libraries, e.g. 'aarch64-linux-gnu-gcc'",
+    )
+    parser.add_argument(
+        "--desired-layout",
+        choices=["NCHW", "NHWC"],
+        default=None,
+        help="change the data layout of the whole graph",
+    )
+    parser.add_argument(
+        "--dump-code",
+        metavar="FORMAT",
+        default="",
+        help="comma separarated list of formats to export, e.g. 'asm,ll,relay' "
+    )
+    parser.add_argument(
+        "--model-format",
+        choices=frontends.get_frontend_names(),
+        help="specify input model format",
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        default="module.tar",
+        help="output the compiled module to an archive",
+    )
+    parser.add_argument(
+        "--target",
+        help="compilation target as plain string, inline JSON or path to a JSON file",
+        required=True
+    )
+    parser.add_argument(
+        "--tuning-records",
+        metavar="PATH",
+        default="",
+        help="path to an auto-tuning log file from AutoTVM"
+    )
+    parser.add_argument(
+        "-v", "--verbose", action="count", default=0, help="increase verbosity"
+    )
+    #TODO (@leandron) This is a path to a physical file, but
+    #     can be improved in future to add integration with a modelzoo
+    #     or URL, for example.
+    parser.add_argument("FILE", help="path to the input model file")
+
+
+def drive_compile(args):
+    """ Invoke tvmc.compiler module with command line arguments
+
+    Parameters
+    ----------
+    args: argparse.Namespace
+        Arguments from command line parser.
+
+    Returns
+    --------
+    int
+        Zero if successfully completed
+
+    """
+
+    graph, lib, params, dumps = compile_model(
+        args.FILE,
+        args.target,
+        args.dump_code,
+        "",
+        args.model_format,
+        args.tuning_records,
+        args.tensor_layout,
+    )
+
+    if dumps:
+        save_dumps(args.output, dumps)
+
+    save_module(args.output, graph, lib, params, args.cross_compiler)
+    return 0
+
+
+def compile_model(
+        path,
+        target,
+        dump_code=None,
+        target_host=None,
+        model_format=None,
+        tuning_records=None,
+        alter_layout=None,
+):
+    """Compile a model from a supported framework into a TVM module.
+
+    This function takes a union of the arguments of both frontends.load_model
+    and compiler.compile_relay. The resulting TVM module can be executed using
+    the graph runtime.
+
+    Parameters
+    ----------
+    path: str
+        Path to a file
+    target : str
+        The target for which to compile. Can be a plain string or
+        a path.
+    dump_code : list, optional
+        Dump the generated code for the specified source types, on
+        the requested target.
+    target_host : str, optional
+        The target of the host machine if host-side code
+        needs to be generated.
+    model_format: str, optional
+        A string representing a name of a frontend to be used
+    tuning_records: str, optional
+        Name of the file produced by the tuning to be used during

Review comment:
       Path?

##########
File path: tests/python/driver/tvmc/test_frontends.py
##########
@@ -0,0 +1,196 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import os
+import tarfile
+
+import pytest
+
+from tvm.ir.module import IRModule
+
+from tvm.driver import tvmc
+from tvm.driver.tvmc.common import TVMCException
+
+
+def test_get_frontend_names_is_list():
+    sut = tvmc.frontends.get_frontend_names()
+    assert type(sut) is list
+
+
+def test_get_frontends_contains_only_strings():
+    sut = tvmc.frontends.get_frontend_names()
+    assert all([type(x) is str for x in sut]) is True
+
+
+def test_get_frontend_by_name_valid():
+    # some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    sut = tvmc.frontends.get_frontend_by_name("keras")
+    assert type(sut) is tvmc.frontends.KerasFrontend
+
+
+def test_get_frontend_by_name_invalid():
+    with pytest.raises(TVMCException) as e:
+        def f():
+            tvmc.frontends.get_frontend_by_name("unsupported_thing")
+        f()
+    assert 'unrecognized frontend' in str(e.value)
+
+
+def test_guess_frontend_tflite():
+    # some CI environments wont offer TFLite, so skip in case it is not present
+    pytest.importorskip('tflite')
+
+    sut = tvmc.frontends.guess_frontend("a_model.tflite")
+    assert type(sut) is tvmc.frontends.TFLiteFrontend
+
+
+def test_guess_frontend_onnx():
+    # some CI environments wont offer onnx, so skip in case it is not present
+    pytest.importorskip('onnx')
+
+    sut = tvmc.frontends.guess_frontend("a_model.onnx")
+    assert type(sut) is tvmc.frontends.OnnxFrontend
+
+
+def test_guess_frontend_pytorch():
+    # some CI environments wont offer pytorch, so skip in case it is not present
+    pytest.importorskip('torch')
+
+    sut = tvmc.frontends.guess_frontend("a_model.pth")
+    assert type(sut) is tvmc.frontends.PyTorchFrontend
+
+
+def test_guess_frontend_keras():
+    # some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    sut = tvmc.frontends.guess_frontend("a_model.h5")
+    assert type(sut) is tvmc.frontends.KerasFrontend
+
+
+def test_guess_frontend_tensorflow():
+    # some CI environments wont offer TensorFlow, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    sut = tvmc.frontends.guess_frontend("a_model.pb")
+    assert type(sut) is tvmc.frontends.TensorflowFrontend
+
+
+def test_guess_frontend_invalid():
+    with pytest.raises(TVMCException):
+        tvmc.frontends.guess_frontend("not/a/file.txt")
+
+
+def test_load_model__invalid_path__no_language():
+    # some CI environments wont offer TFLite, so skip in case it is not present
+    pytest.importorskip('tflite')
+
+    with pytest.raises(FileNotFoundError):
+        tvmc.frontends.load_model("not/a/file.tflite")
+
+
+def test_load_model__invalid_path__with_language():
+    # some CI environments wont offer onnx, so skip in case it is not present
+    pytest.importorskip('onnx')
+
+    with pytest.raises(FileNotFoundError):
+        tvmc.frontends.load_model("not/a/file.txt", model_format="onnx")
+
+
+def test_load_model__tflite(tflite_mobilenet_v1_1_quant):
+    # some CI environments wont offer TFLite, so skip in case it is not present
+    pytest.importorskip('tflite')
+
+    mod, params = tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant)
+    assert type(mod) is IRModule
+    assert type(params) is dict
+    # check whether one known value is part of the params dict
+    assert '_param_1' in params.keys()
+
+
+def test_load_model__keras(keras_resnet50):
+    # some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    mod, params = tvmc.frontends.load_model(keras_resnet50)
+    assert type(mod) is IRModule
+    assert type(params) is dict
+    ## check whether one known value is part of the params dict
+    assert '_param_1' in params.keys()
+
+
+def test_load_model__onnx(onnx_resnet50):
+    # some CI environments wont offer onnx, so skip in case it is not present
+    pytest.importorskip('onnx')
+
+    mod, params = tvmc.frontends.load_model(onnx_resnet50)
+    assert type(mod) is IRModule
+    assert type(params) is dict
+    ## check whether one known value is part of the params dict
+    assert 'resnetv24_batchnorm0_gamma' in params.keys()
+
+
+def test_load_model__pb(pb_mobilenet_v1_1_quant):
+    # some CI environments wont offer TensorFlow, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    mod, params = tvmc.frontends.load_model(pb_mobilenet_v1_1_quant)
+    assert type(mod) is IRModule
+    assert type(params) is dict
+    # check whether one known value is part of the params dict
+    assert 'MobilenetV1/Conv2d_0/weights' in params.keys()
+
+
+def test_load_model___wrong_language__to_keras(tflite_mobilenet_v1_1_quant):
+    # some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    with pytest.raises(OSError):
+        tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant, model_format="keras")
+
+
+def test_load_model___wrong_language__to_tflite(keras_resnet50):
+    # some CI environments wont offer TFLite, so skip in case it is not present
+    pytest.importorskip('tflite')
+
+    with pytest.raises(TVMCException) as e:
+        def f():
+            tvmc.frontends.load_model(keras_resnet50, model_format="tflite")
+        f()
+    assert 'input file not tflite' in str(e.value)

Review comment:
       ditto.

##########
File path: python/tvm/driver/tvmc/compiler.py
##########
@@ -0,0 +1,280 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import logging
+import os.path
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm.contrib import cc
+from tvm.contrib import util
+
+from . import common, frontends
+from .main import register_parser
+
+
+@register_parser
+def add_compile_parser(subparsers):
+    """ Include parser for 'compile' subcommand """
+
+    parser = subparsers.add_parser("compile", help="compile a model")
+    parser.set_defaults(func=drive_compile)
+    parser.add_argument(
+        "--cross-compiler",
+        default="",
+        help="the cross compiler to generate target libraries, e.g. 'aarch64-linux-gnu-gcc'",
+    )
+    parser.add_argument(
+        "--desired-layout",
+        choices=["NCHW", "NHWC"],
+        default=None,
+        help="change the data layout of the whole graph",
+    )
+    parser.add_argument(
+        "--dump-code",
+        metavar="FORMAT",
+        default="",
+        help="comma separarated list of formats to export, e.g. 'asm,ll,relay' "
+    )
+    parser.add_argument(
+        "--model-format",
+        choices=frontends.get_frontend_names(),
+        help="specify input model format",
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        default="module.tar",
+        help="output the compiled module to an archive",
+    )
+    parser.add_argument(
+        "--target",
+        help="compilation target as plain string, inline JSON or path to a JSON file",
+        required=True
+    )
+    parser.add_argument(
+        "--tuning-records",
+        metavar="PATH",
+        default="",
+        help="path to an auto-tuning log file from AutoTVM"
+    )
+    parser.add_argument(
+        "-v", "--verbose", action="count", default=0, help="increase verbosity"
+    )
+    #TODO (@leandron) This is a path to a physical file, but
+    #     can be improved in future to add integration with a modelzoo
+    #     or URL, for example.
+    parser.add_argument("FILE", help="path to the input model file")
+
+
+def drive_compile(args):
+    """ Invoke tvmc.compiler module with command line arguments
+
+    Parameters
+    ----------
+    args: argparse.Namespace
+        Arguments from command line parser.
+
+    Returns
+    --------
+    int
+        Zero if successfully completed
+
+    """
+
+    graph, lib, params, dumps = compile_model(
+        args.FILE,
+        args.target,
+        args.dump_code,
+        "",
+        args.model_format,
+        args.tuning_records,
+        args.tensor_layout,
+    )
+
+    if dumps:
+        save_dumps(args.output, dumps)
+
+    save_module(args.output, graph, lib, params, args.cross_compiler)
+    return 0
+
+
+def compile_model(
+        path,
+        target,
+        dump_code=None,
+        target_host=None,
+        model_format=None,
+        tuning_records=None,
+        alter_layout=None,
+):
+    """Compile a model from a supported framework into a TVM module.
+
+    This function takes a union of the arguments of both frontends.load_model
+    and compiler.compile_relay. The resulting TVM module can be executed using
+    the graph runtime.
+
+    Parameters
+    ----------
+    path: str
+        Path to a file
+    target : str
+        The target for which to compile. Can be a plain string or
+        a path.
+    dump_code : list, optional
+        Dump the generated code for the specified source types, on
+        the requested target.
+    target_host : str, optional
+        The target of the host machine if host-side code
+        needs to be generated.
+    model_format: str, optional
+        A string representing a name of a frontend to be used
+    tuning_records: str, optional
+        Name of the file produced by the tuning to be used during
+        compilation.
+    alter_layout: str, optional
+        The layout to convert the graph to. Note, the convert layout
+        pass doesn't currently guarantee the whole of the graph will
+        be converted to the chosen layout.
+
+    Returns
+    -------
+    graph : str
+        A JSON-serialized TVM execution graph.
+    lib : tvm.module.Module
+        A TVM module containing the compiled functions.
+    params : dict
+        The parameters (weights) for the TVM module.
+    dumps : dict
+        Dictionary containing the dumps specified.
+
+    """
+    dump_code = [x.strip() for x in  dump_code.split(',')] if dump_code else None
+    mod, params = frontends.load_model(path, model_format)
+
+    if alter_layout:
+        mod = common.convert_graph_layout(mod, alter_layout)
+
+    if os.path.exists(str(target)):
+        with open(target) as target_file:
+            logging.info("using target input from file: %s", target)
+            target = "".join(target_file.readlines())
+
+    # TODO(@leandron) We don't have an API to collect a list of supported
+    #       targets yet
+    logging.debug("creating target from input: %s", target)
+    tvm_target = tvm.target.create(target)
+    target_host = target_host or ""
+
+    if tuning_records:

Review comment:
       Need `os.path.exists(tuning_records)`

##########
File path: python/tvm/driver/tvmc/compiler.py
##########
@@ -0,0 +1,305 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import logging
+import os.path
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm.contrib import cc
+from tvm.contrib import util
+
+from . import common, frontends
+from .main import register_parser
+
+
+@register_parser
+def add_compile_parser(subparsers):
+    """ Include parser for 'compile' subcommand """
+
+    parser = subparsers.add_parser("compile", help="compile a model")
+    parser.set_defaults(func=drive_compile)
+    parser.add_argument(
+        "--cross-compiler",
+        default="",
+        help="the cross compiler to generate target libraries, e.g. 'aarch64-linux-gnu-gcc'",
+    )
+    parser.add_argument(
+        "--dump-code",
+        metavar="FORMAT",
+        default="",
+        help="comma separarated list of formats to export, e.g. 'asm,ll,relay' "
+    )
+    parser.add_argument(
+        "--model-format",
+        choices=frontends.get_frontends(),
+        help="specify input model format",
+    )
+    parser.add_argument(
+        "--input-shape",
+        type=common.parse_input_shapes,
+        metavar="INPUT_SHAPE,[INPUT_SHAPE]...",
+        help="for pytorch, e.g. '(1,3,224,224)'",
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        default="module.tar",
+        help="output the compiled module to an archive",
+    )
+    parser.add_argument(
+        "--target",
+        help="compilation target as plain string, inline JSON or path to a JSON file",
+        required=True
+    )
+    parser.add_argument(
+        "--tuning-records",
+        metavar="PATH",
+        default="",
+        help="path to an auto-tuning log file from AutoTVM"
+    )
+    parser.add_argument(
+        "--desired-layout",
+        choices=["NCHW", "NHWC"],
+        default=None,
+        help="change the data layout of the whole graph",
+    )
+    parser.add_argument(
+        "-v", "--verbose", action="count", default=0, help="increase verbosity"
+    )
+    parser.add_argument("FILE")
+
+
+def drive_compile(args):
+    """ Invoke tvmc.compiler module with command line arguments """
+
+    graph, lib, params, dumps = compile_model(
+        args.FILE,
+        args.target,
+        args.dump_code,
+        "",
+        args.model_format,
+        args.input_shape,
+        args.tuning_records,
+        args.tensor_layout,
+    )
+
+    if dumps:
+        save_dumps(args.output, dumps)
+
+    save_module(args.output, graph, lib, params, args.cross_compiler)
+    return 0
+
+
+def compile_model(
+        path,
+        target,
+        dump_sources=None,
+        target_host=None,
+        model_format=None,
+        shapes=None,
+        tuning_records=None,
+        alter_layout=None,
+):
+    """Compile a model from a supported framework into a TVM module.
+
+    This function takes a union of the arguments of both frontends.load_model
+    and compiler.compile_relay. The resulting TVM module can be executed using
+    the graph runtime.
+
+    Returns
+    -------
+    graph : str
+        A JSON-serialized TVM execution graph.
+    lib : tvm.module.Module
+        A TVM module containing the compiled functions.
+    params : dict
+        The parameters (weights) for the TVM module.
+    dumps : dict
+            Dictionary containing the dumps specified.
+
+    """
+    dump_sources = [x.strip() for x in  dump_sources.split(',')] if dump_sources else None
+    mod, params = frontends.load_model(path, model_format, shapes)
+
+    return compile_relay(
+        mod,
+        params,
+        target,
+        dump_sources=dump_sources,
+        target_host=target_host,
+        tuning_records=tuning_records,
+        alter_layout=alter_layout,
+    )
+
+
+def compile_relay(
+        mod,
+        params,
+        target,
+        dump_sources=None,
+        target_host=None,
+        tuning_records=None,
+        alter_layout=None,
+):
+    """Compile a relay module to a TVM module for the graph runtime.
+
+    Parameters
+    ----------
+    mod : tvm.relay.Module
+        The relay module to compile.
+    params : dict
+        The parameters (weights) for the relay module.
+    target : str
+        The target for which to compile. Can be a plain string or
+        a path.
+    dump_sources : list, optional
+        Dump the generated code for the specified source types, on
+        the requested target.
+    target_host : Union[str, tvm.target.Target], optional
+        The target of the host machine if host-side code
+        needs to be generated.
+    tuning_records: str, optional
+        Name of the file produced by the tuning to be used during
+        compilation.
+    alter_layout: str, optional
+        The layout to convert the graph to. Note, the convert layout
+        pass doesn't currently guarantee the whole of the graph will
+        be converted to the chosen layout.
+
+    Returns
+    -------
+    graph : str
+        A JSON-serialized TVM execution graph.
+    lib : tvm.module.Module
+        A TVM module containing the compiled functions.
+    params : dict
+        The parameters (weights) for the TVM module.
+    dumps : dict
+        Dictionary containing the dumps specified.
+
+    """
+
+    if alter_layout:
+        mod = common.convert_graph_layout(mod, alter_layout)
+
+    if os.path.exists(str(target)):
+        with open(target) as target_file:
+            logging.info("using target input from file: %s", target)
+            target = "".join(target_file.readlines())
+
+    # TODO: We don't have an API to collect a list of supported
+    #       targets yet. (@leandron)
+    logging.debug("creating target from input: %s", target)
+    tvm_target = tvm.target.create(target)
+    target_host = target_host or ""
+
+    if tuning_records:
+        logging.debug("tuning records file provided: %s", tuning_records)
+        with autotvm.apply_history_best(tuning_records):
+            with tvm.transform.PassContext(opt_level=3):
+                logging.debug("building relay graph with tuning records")
+                graph_module = relay.build(mod, tvm_target, params=params, target_host=tvm_target)

Review comment:
       The TODO doesn't really catch my point tho. When the graph tuner log is provided, we should use `ApplyGraphBest` instead of `ApplyHistoryBest`.

##########
File path: tests/python/driver/tvmc/test_frontends.py
##########
@@ -0,0 +1,196 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import os
+import tarfile
+
+import pytest
+
+from tvm.ir.module import IRModule
+
+from tvm.driver import tvmc
+from tvm.driver.tvmc.common import TVMCException
+
+
+def test_get_frontend_names_is_list():
+    sut = tvmc.frontends.get_frontend_names()
+    assert type(sut) is list
+
+
+def test_get_frontends_contains_only_strings():
+    sut = tvmc.frontends.get_frontend_names()
+    assert all([type(x) is str for x in sut]) is True
+
+
+def test_get_frontend_by_name_valid():
+    # some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
+    pytest.importorskip('tensorflow')
+
+    sut = tvmc.frontends.get_frontend_by_name("keras")
+    assert type(sut) is tvmc.frontends.KerasFrontend
+
+
+def test_get_frontend_by_name_invalid():
+    with pytest.raises(TVMCException) as e:
+        def f():
+            tvmc.frontends.get_frontend_by_name("unsupported_thing")
+        f()
+    assert 'unrecognized frontend' in str(e.value)

Review comment:
       Should be able to simplify to the following. If it raises an exception than this test is passed so we don't care what it printed; otherwise we will see a pytest error of not raising an expected exception.
   
   ```suggestion
           tvmc.frontends.get_frontend_by_name("unsupported_thing")
   ```

##########
File path: tests/python/driver/tvmc/test_frontends.py
##########
@@ -0,0 +1,196 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import os
+import tarfile
+
+import pytest
+
+from tvm.ir.module import IRModule
+
+from tvm.driver import tvmc
+from tvm.driver.tvmc.common import TVMCException
+
+
+def test_get_frontend_names_is_list():
+    sut = tvmc.frontends.get_frontend_names()
+    assert type(sut) is list

Review comment:
       This test seems not necessary as this function is used by multiple tests.

##########
File path: python/tvm/driver/tvmc/frontends.py
##########
@@ -0,0 +1,417 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to parse models from different frameworks into Relay networks.
+
+Frontend classes do lazy-loading of modules on purpose, to reduce time spent on
+loading the tool.
+"""
+import logging
+import os
+import sys
+from abc import ABC
+from abc import abstractmethod
+from pathlib import Path
+
+import numpy as np
+
+from tvm import relay
+from tvm.driver.tvmc.common import TVMCException
+
+
+class Frontend(ABC):
+    """Abstract class for frontend"""
+
+    @staticmethod
+    @abstractmethod
+    def name():
+        """Frontend name"""
+
+    @staticmethod
+    @abstractmethod
+    def suffixes():
+        """File suffixes (extensions) used by this frontend"""
+
+    @abstractmethod
+    def load(self, path):
+        """Load a model from a given path.
+
+        Parameters
+        ----------
+        path: str
+            Path to a file
+
+        Returns
+        -------
+        mod : tvm.relay.Module
+            The produced relay module.
+        params : dict
+            The parameters (weights) for the relay module.
+
+        """
+
+
+def import_keras():
+    """ Lazy import function for Keras"""
+    # Keras writes the message "Using TensorFlow backend." to stderr
+    # Redirect stderr during the import to disable this
+    stderr = sys.stderr
+    sys.stderr = open(os.devnull, "w")
+    try:
+        # pylint: disable=C0415
+        import tensorflow as tf
+        from tensorflow import keras
+
+        return tf, keras
+    finally:
+        sys.stderr = stderr
+
+
+class KerasFrontend(Frontend):
+    """ Keras frontend for TVMC """
+
+    @staticmethod
+    def name():
+        return "keras"
+
+    @staticmethod
+    def suffixes():
+        return ["h5"]
+
+    def load(self, path):
+        # pylint: disable=C0103
+        tf, keras = import_keras()
+
+        # tvm build currently imports keras directly instead of tensorflow.keras
+        try:
+            model = keras.models.load_model(path)
+        except ValueError as err:
+            raise TVMCException(str(err))
+
+        # There are two flavours of keras model, sequential and
+        # functional, TVM expects a functional model, so convert
+        # if required:
+        if self.is_sequential_p(model):
+            model = self.sequential_to_functional(model)
+
+        in_shapes = []
+        for layer in model._input_layers:
+            if tf.executing_eagerly():
+                in_shapes.append(
+                    tuple(dim if dim is not None else 1 for dim in layer.input.shape)
+                )
+            else:
+                in_shapes.append(
+                    tuple(
+                        dim.value if dim.value is not None else 1
+                        for dim in layer.input.shape
+                    )
+                )
+
+        inputs = [
+            np.random.uniform(size=shape, low=-1.0, high=1.0) for shape in in_shapes
+        ]
+        shape_dict = {name: x.shape for (name, x) in zip(model.input_names, inputs)}
+        return relay.frontend.from_keras(model, shape_dict, layout="NHWC")
+
+    def is_sequential_p(self, model):
+        _, keras = import_keras()
+        return isinstance(model, keras.models.Sequential)
+
+    def sequential_to_functional(self, model):
+        _, keras = import_keras()
+        assert self.is_sequential_p(model)
+        input_layer = keras.layers.Input(batch_shape=model.layers[0].input_shape)
+        prev_layer = input_layer
+        for layer in model.layers:
+            prev_layer = layer(prev_layer)
+        model = keras.models.Model([input_layer], [prev_layer])
+        return model
+
+
+class OnnxFrontend(Frontend):
+    """ ONNX frontend for TVMC """
+
+    @staticmethod
+    def name():
+        return "onnx"
+
+    @staticmethod
+    def suffixes():
+        return ["onnx"]
+
+    def load(self, path):
+        # pylint: disable=C0415
+        import onnx
+
+        model = onnx.load(path)
+
+        # pylint: disable=E1101
+        name = model.graph.input[0].name
+
+        # pylint: disable=E1101
+        proto_shape = model.graph.input[0].type.tensor_type.shape.dim
+        shape = [d.dim_value for d in proto_shape]
+
+        shape_dict = {name: shape}
+
+        return relay.frontend.from_onnx(model, shape_dict)
+
+
+class TensorflowFrontend(Frontend):
+    """ TensorFlow frontend for TVMC """
+
+    @staticmethod
+    def name():
+        return "pb"
+
+    @staticmethod
+    def suffixes():
+        return ["pb"]
+
+    def load(self, path):
+        # pylint: disable=C0415
+        import tensorflow as tf
+        import tvm.relay.testing.tf as tf_testing
+
+        with tf.io.gfile.GFile(path, "rb") as tf_graph:
+            content = tf_graph.read()
+
+        graph_def = tf.compat.v1.GraphDef()
+        graph_def.ParseFromString(content)
+        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
+
+        logging.debug("relay.frontend.from_tensorflow")
+        return relay.frontend.from_tensorflow(graph_def)
+
+
+class TFLiteFrontend(Frontend):
+    """ TFLite frontend for TVMC """
+
+    _tflite_m = {
+        0: "float32",
+        1: "float16",
+        2: "int32",
+        3: "uint8",
+        4: "int64",
+        5: "string",
+        6: "bool",
+        7: "int16",
+        8: "complex64",
+        9: "int8",
+    }
+
+    @staticmethod
+    def name():
+        return "tflite"
+
+    @staticmethod
+    def suffixes():
+        return ["tflite"]
+
+    def load(self, path):
+        # pylint: disable=C0415
+        import tflite.Model as model
+
+        with open(path, "rb") as tf_graph:
+            content = tf_graph.read()
+
+        # tflite.Model.Model is tflite.Model in 1.14 and 2.1.0
+        try:
+            tflite_model = model.Model.GetRootAsModel(content, 0)
+        except AttributeError:
+            tflite_model = model.GetRootAsModel(content, 0)
+
+        try:
+            version = tflite_model.Version()
+            logging.debug("tflite version %s", version)
+        except Exception:
+            raise TVMCException("input file not tflite")
+
+        if version != 3:
+            raise TVMCException("input file not tflite version 3")
+
+        logging.debug("tflite_input_type")
+        shape_dict, dtype_dict = TFLiteFrontend._input_type(tflite_model)
+
+        # parse TFLite model and convert into Relay computation graph
+        logging.debug("relay.frontend.from_tflite")
+        mod, params = relay.frontend.from_tflite(
+            tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict
+        )
+        return mod, params
+
+    @staticmethod
+    def _decode_type(n):
+        return TFLiteFrontend._tflite_m[n]
+
+    @staticmethod
+    def _input_type(model):
+        subgraph_count = model.SubgraphsLength()
+        assert subgraph_count > 0
+        shape_dict = {}
+        dtype_dict = {}
+        for subgraph_index in range(subgraph_count):
+            subgraph = model.Subgraphs(subgraph_index)
+            inputs_count = subgraph.InputsLength()
+            assert inputs_count >= 1
+            for input_index in range(inputs_count):
+                input_ = subgraph.Inputs(input_index)
+                assert subgraph.TensorsLength() > input_
+                tensor = subgraph.Tensors(input_)
+                input_shape = tuple(tensor.ShapeAsNumpy())
+                tensor_type = tensor.Type()
+                input_name = tensor.Name().decode("utf8")
+                shape_dict[input_name] = input_shape
+                dtype_dict[input_name] = TFLiteFrontend._decode_type(tensor_type)
+
+        return shape_dict, dtype_dict
+
+
+class PyTorchFrontend(Frontend):
+    """ PyTorch frontend for TVMC """
+
+    @staticmethod
+    def name():
+        return "pytorch"
+
+    @staticmethod
+    def suffixes():
+        # Torch Script is a zip file, but can be named pth
+        return ["pth", "zip"]
+
+    def load(self, path):
+        # pylint: disable=C0415
+        import torch
+
+        traced_model = torch.jit.load(path)
+
+        inputs = list(traced_model.graph.inputs())[1:]
+        input_shapes = [inp.type().sizes() for inp in inputs]
+
+        traced_model.eval()  # Switch to inference mode
+        input_shapes = [
+            ("input{}".format(idx), shape) for idx, shape in enumerate(shapes)
+        ]
+        logging.debug("relay.frontend.from_pytorch")
+        return relay.frontend.from_pytorch(traced_model, input_shapes)
+
+
+ALL_FRONTENDS = [
+    KerasFrontend,
+    OnnxFrontend,
+    TensorflowFrontend,
+    TFLiteFrontend,
+    PyTorchFrontend,
+]
+
+
+def get_frontend_names():
+    """Return the names of all supported frontends
+
+    Returns
+    -------
+    list : list of str
+        A list of frontend names as strings
+
+    """
+    return [frontend.name() for frontend in ALL_FRONTENDS]
+
+
+def get_frontend_by_name(name):
+    """
+    This function will try to get a frontend instance, based
+    on the name provided.
+
+    Parameters
+    ----------
+    name : str
+        the name of a given frontend
+
+    Returns
+    -------
+    frontend : tvm.driver.tvmc.Frontend
+        An instance of the frontend that matches with
+        the file extension provided in `path`.
+
+    """
+
+    for frontend in ALL_FRONTENDS:
+        if name == frontend.name():
+            return frontend()
+
+    raise TVMCException("unrecognized frontend")

Review comment:
       Also display the given name. It would be even better to provide a list of available frontends by calling `get_frontend_names` here.

##########
File path: python/tvm/driver/tvmc/frontends.py
##########
@@ -0,0 +1,417 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to parse models from different frameworks into Relay networks.
+
+Frontend classes do lazy-loading of modules on purpose, to reduce time spent on
+loading the tool.
+"""
+import logging
+import os
+import sys
+from abc import ABC
+from abc import abstractmethod
+from pathlib import Path
+
+import numpy as np
+
+from tvm import relay
+from tvm.driver.tvmc.common import TVMCException
+
+
+class Frontend(ABC):
+    """Abstract class for frontend"""
+
+    @staticmethod
+    @abstractmethod
+    def name():
+        """Frontend name"""
+
+    @staticmethod
+    @abstractmethod
+    def suffixes():
+        """File suffixes (extensions) used by this frontend"""
+
+    @abstractmethod
+    def load(self, path):
+        """Load a model from a given path.
+
+        Parameters
+        ----------
+        path: str
+            Path to a file
+
+        Returns
+        -------
+        mod : tvm.relay.Module
+            The produced relay module.
+        params : dict
+            The parameters (weights) for the relay module.
+
+        """
+
+
+def import_keras():
+    """ Lazy import function for Keras"""
+    # Keras writes the message "Using TensorFlow backend." to stderr
+    # Redirect stderr during the import to disable this
+    stderr = sys.stderr
+    sys.stderr = open(os.devnull, "w")
+    try:
+        # pylint: disable=C0415
+        import tensorflow as tf
+        from tensorflow import keras
+
+        return tf, keras
+    finally:
+        sys.stderr = stderr
+
+
+class KerasFrontend(Frontend):
+    """ Keras frontend for TVMC """
+
+    @staticmethod
+    def name():
+        return "keras"
+
+    @staticmethod
+    def suffixes():
+        return ["h5"]
+
+    def load(self, path):
+        # pylint: disable=C0103
+        tf, keras = import_keras()
+
+        # tvm build currently imports keras directly instead of tensorflow.keras
+        try:
+            model = keras.models.load_model(path)
+        except ValueError as err:
+            raise TVMCException(str(err))
+
+        # There are two flavours of keras model, sequential and
+        # functional, TVM expects a functional model, so convert
+        # if required:
+        if self.is_sequential_p(model):
+            model = self.sequential_to_functional(model)
+
+        in_shapes = []
+        for layer in model._input_layers:
+            if tf.executing_eagerly():
+                in_shapes.append(
+                    tuple(dim if dim is not None else 1 for dim in layer.input.shape)
+                )
+            else:
+                in_shapes.append(
+                    tuple(
+                        dim.value if dim.value is not None else 1
+                        for dim in layer.input.shape
+                    )
+                )
+
+        inputs = [
+            np.random.uniform(size=shape, low=-1.0, high=1.0) for shape in in_shapes
+        ]
+        shape_dict = {name: x.shape for (name, x) in zip(model.input_names, inputs)}
+        return relay.frontend.from_keras(model, shape_dict, layout="NHWC")
+
+    def is_sequential_p(self, model):
+        _, keras = import_keras()
+        return isinstance(model, keras.models.Sequential)
+
+    def sequential_to_functional(self, model):
+        _, keras = import_keras()
+        assert self.is_sequential_p(model)
+        input_layer = keras.layers.Input(batch_shape=model.layers[0].input_shape)
+        prev_layer = input_layer
+        for layer in model.layers:
+            prev_layer = layer(prev_layer)
+        model = keras.models.Model([input_layer], [prev_layer])
+        return model
+
+
+class OnnxFrontend(Frontend):
+    """ ONNX frontend for TVMC """
+
+    @staticmethod
+    def name():
+        return "onnx"
+
+    @staticmethod
+    def suffixes():
+        return ["onnx"]
+
+    def load(self, path):
+        # pylint: disable=C0415
+        import onnx
+
+        model = onnx.load(path)
+
+        # pylint: disable=E1101
+        name = model.graph.input[0].name
+
+        # pylint: disable=E1101
+        proto_shape = model.graph.input[0].type.tensor_type.shape.dim
+        shape = [d.dim_value for d in proto_shape]
+
+        shape_dict = {name: shape}
+
+        return relay.frontend.from_onnx(model, shape_dict)
+
+
+class TensorflowFrontend(Frontend):
+    """ TensorFlow frontend for TVMC """
+
+    @staticmethod
+    def name():
+        return "pb"
+
+    @staticmethod
+    def suffixes():
+        return ["pb"]
+
+    def load(self, path):
+        # pylint: disable=C0415
+        import tensorflow as tf
+        import tvm.relay.testing.tf as tf_testing
+
+        with tf.io.gfile.GFile(path, "rb") as tf_graph:
+            content = tf_graph.read()
+
+        graph_def = tf.compat.v1.GraphDef()
+        graph_def.ParseFromString(content)
+        graph_def = tf_testing.ProcessGraphDefParam(graph_def)
+
+        logging.debug("relay.frontend.from_tensorflow")
+        return relay.frontend.from_tensorflow(graph_def)
+
+
+class TFLiteFrontend(Frontend):
+    """ TFLite frontend for TVMC """
+
+    _tflite_m = {
+        0: "float32",
+        1: "float16",
+        2: "int32",
+        3: "uint8",
+        4: "int64",
+        5: "string",
+        6: "bool",
+        7: "int16",
+        8: "complex64",
+        9: "int8",
+    }
+
+    @staticmethod
+    def name():
+        return "tflite"
+
+    @staticmethod
+    def suffixes():
+        return ["tflite"]
+
+    def load(self, path):
+        # pylint: disable=C0415
+        import tflite.Model as model
+
+        with open(path, "rb") as tf_graph:
+            content = tf_graph.read()
+
+        # tflite.Model.Model is tflite.Model in 1.14 and 2.1.0
+        try:
+            tflite_model = model.Model.GetRootAsModel(content, 0)
+        except AttributeError:
+            tflite_model = model.GetRootAsModel(content, 0)
+
+        try:
+            version = tflite_model.Version()
+            logging.debug("tflite version %s", version)
+        except Exception:
+            raise TVMCException("input file not tflite")
+
+        if version != 3:
+            raise TVMCException("input file not tflite version 3")
+
+        logging.debug("tflite_input_type")
+        shape_dict, dtype_dict = TFLiteFrontend._input_type(tflite_model)
+
+        # parse TFLite model and convert into Relay computation graph
+        logging.debug("relay.frontend.from_tflite")
+        mod, params = relay.frontend.from_tflite(
+            tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict
+        )
+        return mod, params
+
+    @staticmethod
+    def _decode_type(n):
+        return TFLiteFrontend._tflite_m[n]
+
+    @staticmethod
+    def _input_type(model):
+        subgraph_count = model.SubgraphsLength()
+        assert subgraph_count > 0
+        shape_dict = {}
+        dtype_dict = {}
+        for subgraph_index in range(subgraph_count):
+            subgraph = model.Subgraphs(subgraph_index)
+            inputs_count = subgraph.InputsLength()
+            assert inputs_count >= 1
+            for input_index in range(inputs_count):
+                input_ = subgraph.Inputs(input_index)
+                assert subgraph.TensorsLength() > input_
+                tensor = subgraph.Tensors(input_)
+                input_shape = tuple(tensor.ShapeAsNumpy())
+                tensor_type = tensor.Type()
+                input_name = tensor.Name().decode("utf8")
+                shape_dict[input_name] = input_shape
+                dtype_dict[input_name] = TFLiteFrontend._decode_type(tensor_type)
+
+        return shape_dict, dtype_dict
+
+
+class PyTorchFrontend(Frontend):
+    """ PyTorch frontend for TVMC """
+
+    @staticmethod
+    def name():
+        return "pytorch"
+
+    @staticmethod
+    def suffixes():
+        # Torch Script is a zip file, but can be named pth
+        return ["pth", "zip"]
+
+    def load(self, path):
+        # pylint: disable=C0415
+        import torch
+
+        traced_model = torch.jit.load(path)
+
+        inputs = list(traced_model.graph.inputs())[1:]
+        input_shapes = [inp.type().sizes() for inp in inputs]
+
+        traced_model.eval()  # Switch to inference mode
+        input_shapes = [
+            ("input{}".format(idx), shape) for idx, shape in enumerate(shapes)
+        ]
+        logging.debug("relay.frontend.from_pytorch")
+        return relay.frontend.from_pytorch(traced_model, input_shapes)
+
+
+ALL_FRONTENDS = [
+    KerasFrontend,
+    OnnxFrontend,
+    TensorflowFrontend,
+    TFLiteFrontend,
+    PyTorchFrontend,
+]
+
+
+def get_frontend_names():
+    """Return the names of all supported frontends
+
+    Returns
+    -------
+    list : list of str
+        A list of frontend names as strings
+
+    """
+    return [frontend.name() for frontend in ALL_FRONTENDS]
+
+
+def get_frontend_by_name(name):
+    """
+    This function will try to get a frontend instance, based
+    on the name provided.
+
+    Parameters
+    ----------
+    name : str
+        the name of a given frontend
+
+    Returns
+    -------
+    frontend : tvm.driver.tvmc.Frontend
+        An instance of the frontend that matches with
+        the file extension provided in `path`.
+
+    """
+
+    for frontend in ALL_FRONTENDS:
+        if name == frontend.name():
+            return frontend()
+
+    raise TVMCException("unrecognized frontend")
+
+
+def guess_frontend(path):
+    """
+    This function will try to imply which framework is being used,
+    based on the extension of the file provided in the path parameter.
+
+    Parameters
+    ----------
+    path : str
+        The path to the model file.
+
+    Returns
+    -------
+    frontend : tvm.driver.tvmc.Frontend
+        An instance of the frontend that matches with
+        the file extension provided in `path`.
+
+    """
+
+    suffix = Path(path).suffix.lower()
+    if suffix.startswith("."):
+        suffix = suffix[1:]
+
+    for frontend in ALL_FRONTENDS:
+        if suffix in frontend.suffixes():
+            return frontend()
+
+    raise TVMCException("cannot guess model format")

Review comment:
       ```suggestion
       raise TVMCException("failed to infer the model format. Please specify --model-format")
   ```




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org