You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2021/04/23 17:15:26 UTC

[GitHub] [tvm] areusch commented on a change in pull request #7903: [Graph Executor Debugger] Fix parameter dump

areusch commented on a change in pull request #7903:
URL: https://github.com/apache/tvm/pull/7903#discussion_r619374951



##########
File path: src/runtime/graph_executor/debug/graph_executor_debug.cc
##########
@@ -282,17 +324,17 @@ class GraphExecutorDebug : public GraphExecutor {
 PackedFunc GraphExecutorDebug::GetFunction(const std::string& name,
                                            const ObjectPtr<Object>& sptr_to_self) {
   // return member functions during query.
-  if (name == "get_output_by_layer") {
+  if (name == "debug_get_node_output") {
     return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
-      *rv = this->GetOutputByLayer(args[0], args[1]);
+      *rv = this->DebugGetNodeOutput(args[0]);
     });
-  } else if (name == "debug_get_output") {
+  } else if (name == "exectue_next_node_get_output") {

Review comment:
       nit: execute

##########
File path: src/runtime/graph_executor/debug/graph_executor_debug.cc
##########
@@ -208,25 +201,74 @@ class GraphExecutorDebug : public GraphExecutor {
   }
 
   /*!
-   * \brief Copy index-th node to data_out.
+   * \brief Get number of outputs of the node.
+   * \param index The index of the node.
+   * \return The number of outputs.
+   */
+  size_t NodeGetNumOutputs(size_t index) {
+    if (nodes_[index].op_type != "tvm_op") return 1;
+    return static_cast<size_t>(nodes_[index].param.num_outputs);
+  }
+
+  /*!
+   * \brief Execute network to a specific node and return result.
    *
    * This method will do a partial run of the the graph
    * from begining upto the index-th node and return output of index-th node.
    * This is costly operation and suggest to use only for debug porpose.
    *
-   * \param index: The  index of the node.
-   * \param data_out the node data.
+   * \param index: The index of the node.
+   * \return Node output array.
    */
-  void DebugGetNodeOutput(int index, DLTensor* data_out) {
+  Array<NDArray> DebugGetNodeOutput(int index) {
     ICHECK_LT(static_cast<size_t>(index), op_execs_.size());
-    uint32_t eid = index;
+    Array<NDArray> results;
 
-    for (size_t i = 0; i < op_execs_.size(); ++i) {
+    for (size_t i = 0; i <= static_cast<size_t>(index); i++) {
       if (op_execs_[i]) op_execs_[i]();
-      if (static_cast<int>(i) == index) break;
     }
+    for (size_t j = 0; j < NodeGetNumOutputs(index); j++) {
+      results.push_back(data_entry_[entry_id(index, j)].CopyTo({kDLCPU, 0}));
+    }
+    return results;
+  }
+
+  /*!
+   * \brief Execute next node in the network.
+   *
+   * This method will execute next node assuming
+   * previous nodes has been executed and return output of index-th node.
+   *
+   * \param index: The index of the node.
+   * \param eid The Entry id of the op.

Review comment:
       this isn't the entry_id, it's the index of the output. i think it'd be great to separate execute from get-output for nodes which output tuples

##########
File path: python/tvm/contrib/debugger/debug_executor.py
##########
@@ -170,19 +173,44 @@ def _create_debug_env(self, graph_json, device):
         # init the debug dumping environment
         self.debug_datum = debug_result.DebugResult(graph_json, self._dump_path)
 
+    def _execute_next_node(self, node_index, entry_id):
+        """Execute node assuming all previous nodes has been executed.
+        Return the output of this node.
+
+        Parameters
+        ----------
+        node_index : int
+            The node index
+
+        entry_id : int
+            The entry id.
+        """
+        out_tensor = self._execute_next_node_get_output(node_index, entry_id)
+        return array(out_tensor)
+
+    def _run_per_layer(self):
+        """Execute up to each node and each debug output will be
+        copied to the buffer.
+
+        """
+        for i, node in enumerate(self.debug_datum.get_graph_nodes()):
+            num_outputs = self.debug_datum.get_graph_node_output_num(node)
+            for j in range(num_outputs):
+                logging.info("running: output=%d of node_name: %s", j, node["name"])
+                out_tensor = self._execute_next_node(i, j)
+                self.debug_datum._output_tensor_list.append(out_tensor)

Review comment:
       could you make this a function on debug_datum? I don't like us appending to private class members here.

##########
File path: python/tvm/contrib/debugger/debug_executor.py
##########
@@ -231,6 +258,22 @@ def run(self, **input_dict):
         # Step 4. Display the collected information
         self.debug_datum.display_debug_result()
 
+    def get_per_layer_outputs(self, **input_dict):

Review comment:
       can we just fit the modifications into run()?

##########
File path: python/tvm/contrib/debugger/debug_executor.py
##########
@@ -196,20 +224,19 @@ def debug_get_output(self, node, out=None):
             The output array container
         """
         if isinstance(node, str):
-            output_tensors = self.debug_datum.get_output_tensors()
-            try:
-                out = output_tensors[node]
-            except KeyError:
-                node_list = output_tensors.keys()
-                raise RuntimeError(
-                    "Node " + node + " not found, available nodes are: " + str(node_list) + "."
-                )
+            node_index = None
+            for i, graph_node in enumerate(self.debug_datum.get_graph_nodes()):
+                if graph_node["name"] == node:
+                    node_index = i
+                    break
+            if not node_index:

Review comment:
       you can use `else:` here instead (matches for-loop, not the inner if).




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org