You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2020/09/03 09:17:54 UTC

[GitHub] [incubator-tvm] leandron commented on a change in pull request #6302: [tvmc] command line driver 'compile' (part 2/4)

leandron commented on a change in pull request #6302:
URL: https://github.com/apache/incubator-tvm/pull/6302#discussion_r482832429



##########
File path: python/tvm/driver/tvmc/compiler.py
##########
@@ -0,0 +1,305 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""
+Provides support to compile networks both AOT and JIT.
+"""
+import logging
+import os.path
+import tarfile
+from pathlib import Path
+
+import tvm
+from tvm import autotvm
+from tvm import relay
+from tvm.contrib import cc
+from tvm.contrib import util
+
+from . import common, frontends
+from .main import register_parser
+
+
+@register_parser
+def add_compile_parser(subparsers):
+    """ Include parser for 'compile' subcommand """
+
+    parser = subparsers.add_parser("compile", help="compile a model")
+    parser.set_defaults(func=drive_compile)
+    parser.add_argument(
+        "--cross-compiler",
+        default="",
+        help="the cross compiler to generate target libraries, e.g. 'aarch64-linux-gnu-gcc'",
+    )
+    parser.add_argument(
+        "--dump-code",
+        metavar="FORMAT",
+        default="",
+        help="comma separarated list of formats to export, e.g. 'asm,ll,relay' "
+    )
+    parser.add_argument(
+        "--model-format",
+        choices=frontends.get_frontends(),
+        help="specify input model format",
+    )
+    parser.add_argument(
+        "--input-shape",
+        type=common.parse_input_shapes,
+        metavar="INPUT_SHAPE,[INPUT_SHAPE]...",
+        help="for pytorch, e.g. '(1,3,224,224)'",
+    )
+    parser.add_argument(
+        "-o",
+        "--output",
+        default="module.tar",
+        help="output the compiled module to an archive",
+    )
+    parser.add_argument(
+        "--target",
+        help="compilation target as plain string, inline JSON or path to a JSON file",
+        required=True
+    )
+    parser.add_argument(
+        "--tuning-records",
+        metavar="PATH",
+        default="",
+        help="path to an auto-tuning log file from AutoTVM"
+    )
+    parser.add_argument(
+        "--desired-layout",
+        choices=["NCHW", "NHWC"],
+        default=None,
+        help="change the data layout of the whole graph",
+    )
+    parser.add_argument(
+        "-v", "--verbose", action="count", default=0, help="increase verbosity"
+    )
+    parser.add_argument("FILE")
+
+
+def drive_compile(args):
+    """ Invoke tvmc.compiler module with command line arguments """
+
+    graph, lib, params, dumps = compile_model(
+        args.FILE,
+        args.target,
+        args.dump_code,
+        "",
+        args.model_format,
+        args.input_shape,
+        args.tuning_records,
+        args.tensor_layout,
+    )
+
+    if dumps:
+        save_dumps(args.output, dumps)
+
+    save_module(args.output, graph, lib, params, args.cross_compiler)
+    return 0
+
+
+def compile_model(
+        path,
+        target,
+        dump_sources=None,
+        target_host=None,
+        model_format=None,
+        shapes=None,
+        tuning_records=None,
+        alter_layout=None,
+):
+    """Compile a model from a supported framework into a TVM module.
+
+    This function takes a union of the arguments of both frontends.load_model
+    and compiler.compile_relay. The resulting TVM module can be executed using
+    the graph runtime.
+
+    Returns
+    -------
+    graph : str
+        A JSON-serialized TVM execution graph.
+    lib : tvm.module.Module
+        A TVM module containing the compiled functions.
+    params : dict
+        The parameters (weights) for the TVM module.
+    dumps : dict
+            Dictionary containing the dumps specified.
+
+    """
+    dump_sources = [x.strip() for x in  dump_sources.split(',')] if dump_sources else None
+    mod, params = frontends.load_model(path, model_format, shapes)
+
+    return compile_relay(
+        mod,
+        params,
+        target,
+        dump_sources=dump_sources,
+        target_host=target_host,
+        tuning_records=tuning_records,
+        alter_layout=alter_layout,
+    )
+
+
+def compile_relay(
+        mod,
+        params,
+        target,
+        dump_sources=None,
+        target_host=None,
+        tuning_records=None,
+        alter_layout=None,
+):
+    """Compile a relay module to a TVM module for the graph runtime.
+
+    Parameters
+    ----------
+    mod : tvm.relay.Module
+        The relay module to compile.
+    params : dict
+        The parameters (weights) for the relay module.
+    target : str
+        The target for which to compile. Can be a plain string or
+        a path.
+    dump_sources : list, optional
+        Dump the generated code for the specified source types, on
+        the requested target.
+    target_host : Union[str, tvm.target.Target], optional
+        The target of the host machine if host-side code
+        needs to be generated.
+    tuning_records: str, optional
+        Name of the file produced by the tuning to be used during
+        compilation.
+    alter_layout: str, optional
+        The layout to convert the graph to. Note, the convert layout
+        pass doesn't currently guarantee the whole of the graph will
+        be converted to the chosen layout.
+
+    Returns
+    -------
+    graph : str
+        A JSON-serialized TVM execution graph.
+    lib : tvm.module.Module
+        A TVM module containing the compiled functions.
+    params : dict
+        The parameters (weights) for the TVM module.
+    dumps : dict
+        Dictionary containing the dumps specified.
+
+    """
+
+    if alter_layout:
+        mod = common.convert_graph_layout(mod, alter_layout)
+
+    if os.path.exists(str(target)):
+        with open(target) as target_file:
+            logging.info("using target input from file: %s", target)
+            target = "".join(target_file.readlines())
+
+    # TODO: We don't have an API to collect a list of supported
+    #       targets yet. (@leandron)
+    logging.debug("creating target from input: %s", target)
+    tvm_target = tvm.target.create(target)
+    target_host = target_host or ""
+
+    if tuning_records:
+        logging.debug("tuning records file provided: %s", tuning_records)
+        with autotvm.apply_history_best(tuning_records):
+            with tvm.transform.PassContext(opt_level=3):
+                logging.debug("building relay graph with tuning records")
+                graph_module = relay.build(mod, tvm_target, params=params, target_host=tvm_target)

Review comment:
       It will be introduce in the next PR, related to `tune` subcommand, in parallel with the `run` PR.
   
   I added a TODO.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org