You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2020/12/15 16:27:32 UTC

[GitHub] [tvm] tkonolige commented on a change in pull request #7107: [Tutorial] Add output validation to sparse tutorial

tkonolige commented on a change in pull request #7107:
URL: https://github.com/apache/tvm/pull/7107#discussion_r543488900



##########
File path: tutorials/frontend/deploy_sparse.py
##########
@@ -210,29 +211,28 @@ def import_graphdef(
 # the weights are sparse, we won't see any speedup because we are using
 # regular dense matrix multiplications on these dense (but mostly zero)
 # tensors instead of sparse aware kernels.
-def run_relay_graph(mod, params, shape_dict, target, ctx):
+def run_relay_graph(mod, params, shape_dict, target, ctx, input):
     with relay.build_config(opt_level=3):
         lib = relay.build(mod, target=target, params=params)
-    input_shape = shape_dict["input_1"]
-    dummy_data = np.random.uniform(size=input_shape, low=0, high=input_shape[1]).astype("int32")
 
     m = graph_runtime.GraphModule(lib["default"](ctx))
-    m.set_input(0, dummy_data)
+    m.set_input(0, input)
     m.run()
     tvm_output = m.get_output(0)
 
-    ftimer = m.module.time_evaluator("run", ctx, repeat=5, number=5)
-    prof_res = np.array(ftimer().results) * 1000
-    print(
-        "%-20s %-19s (%s)"
-        % ("Runtime:", "%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res))
-    )
-    return tvm_output
+    if benchmark:
+        ftimer = m.module.time_evaluator("run", ctx, repeat=5, number=5)
+        prof_res = np.array(ftimer().results) * 1000
+        print(
+            "%-20s %-19s (%s)"
+            % ("Runtime:", "Avg-%.2f ms" % np.mean(prof_res), "Std-%.2f ms" % np.std(prof_res))
+        )
+    return tvm_output.asnumpy()
 
 
-def run_dense(mod, params, shape_dict, target, ctx):
-    print("Dense Model Benchmark:")
-    return run_relay_graph(mod, params, shape_dict, target, ctx)
+def run_dense(mod, params, shape_dict, target, ctx, input):
+    print("Dense Model Inference begins.")

Review comment:
       How about
   ```suggestion
       print("Starting inference for dense model.")
   ```

##########
File path: tutorials/frontend/deploy_sparse.py
##########
@@ -297,13 +297,13 @@ def deepcopy(param_dic):
     return new_params
 
 
-def run_sparse(mod, params, shape_dict, target, ctx, bs_r, sparsity, gen_weights):
+def run_sparse(mod, params, shape_dict, target, ctx, bs_r, sparsity, gen_weights, input):
     mod, params = ddo.simplify_fc_transpose.convert(mod["main"], params)
     if gen_weights:
         params = random_sparse_bert_params(mod, params, BS_R=bs_r, BS_C=1, density=1 - sparsity)
     mod, params = ddo.bsr_dense.convert(mod, params, (bs_r, 1), sparsity_threshold=0.8)
-    print("Block Sparse Model with {blocksize}x1 blocks:".format(blocksize=bs_r))
-    return run_relay_graph(mod, params, shape_dict, target, ctx)
+    print("Block Sparse Model with {blocksize}x1 blocks Inference begins.".format(blocksize=bs_r))

Review comment:
       How about
   ```suggestion
       print("Starting inference for block sparse model with {blocksize}x1 blocks.".format(blocksize=bs_r))
   ```

##########
File path: tutorials/frontend/deploy_sparse.py
##########
@@ -312,15 +312,22 @@ def run_sparse(mod, params, shape_dict, target, ctx, bs_r, sparsity, gen_weights
 # And that's it! Now we'll simply call all the needed function to benchmark

Review comment:
       Could you update this comment please

##########
File path: tutorials/frontend/deploy_sparse.py
##########
@@ -119,6 +117,9 @@
 # determines how sparse the generated weights should be. The higher
 # the sparsity, the faster the result.
 sparsity = 0.85
+# Running benchmarking mode might overload CI,
+# so it is disabled by default.
+benchmark = False

Review comment:
       Could you say something like "Switch this to true to run the benchmark yourself"?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org