You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2021/01/27 21:16:13 UTC

[GitHub] [tvm] trevor-m commented on a change in pull request #7162: Fix Segmentation Fault For Tensorrt BYOC when TVM_TENSORRT_CACHE_DIR is Set

trevor-m commented on a change in pull request #7162:
URL: https://github.com/apache/tvm/pull/7162#discussion_r565629078



##########
File path: src/runtime/contrib/tensorrt/tensorrt_ops.cc
##########
@@ -921,7 +921,7 @@ class ReshapeOpConverter : public TensorRTOpConverter {
 
   void Convert(TensorRTOpConverterParams* params) const {
     auto input = params->inputs.at(0).tensor;
-    ICHECK_EQ(std::stoi(params->node.GetAttr<std::vector<std::string>>("reverse")[0]), false);
+    //ICHECK_EQ(std::stoi(params->node.GetAttr<std::vector<std::string>>("reverse")[0]), false);

Review comment:
       Please rebase so you don't need to comment out this line.

##########
File path: src/runtime/contrib/tensorrt/tensorrt_runtime.cc
##########
@@ -83,8 +83,8 @@ class TensorRTRuntime : public JSONRuntimeBase {
     ICHECK_EQ(consts.size(), const_idx_.size())
         << "The number of input constants must match the number of required.";
     LoadGlobalAttributes();
-    if (GetCachedEnginesFromDisk()) return;
     SetupConstants(consts);
+    if (GetCachedEnginesFromDisk()) return;

Review comment:
       Since `GetCachedEnginesFromDisk` is now at the end of the function, we dont need the `if` and `return`.

##########
File path: src/runtime/contrib/tensorrt/tensorrt_runtime.cc
##########
@@ -178,7 +178,14 @@ class TensorRTRuntime : public JSONRuntimeBase {
    */
   void BuildEngine() {
     batch_size_ = data_entry_[input_var_eid_[0]]->shape[0];
-    if (trt_engine_cache_.count(std::make_pair(symbol_name_, batch_size_))) return;
+    if (trt_engine_cache_.count(std::make_pair(symbol_name_, batch_size_))) {
+      TensorRTEngineAndContext& engine_and_context =
+          trt_engine_cache_.at(std::make_pair(symbol_name_, batch_size_));
+      size_t binding_num = engine_and_context.engine->getNbBindings();
+      if (engine_and_context.device_buffers.size() == binding_num) {

Review comment:
       This could be `!engine_and_context.device_buffers.empty()` instead, it maybe communicates the purpose of this check better.

##########
File path: src/runtime/contrib/tensorrt/tensorrt_builder.cc
##########
@@ -185,6 +185,17 @@ TensorRTEngineAndContext TensorRTBuilder::BuildEngine() {
   return {engine, context, network_input_names_, network_output_names_, device_buffers};
 }
 
+void TensorRTBuilder::CreateDeviceBuffers(TensorRTEngineAndContext* engine_and_context) {

Review comment:
       The code in this function is a duplicate of the code in `BuildEngine()` - can you call this new function from BuildEngine to avoid the duplication?

##########
File path: src/runtime/contrib/tensorrt/tensorrt_runtime.cc
##########
@@ -211,6 +218,16 @@ class TensorRTRuntime : public JSONRuntimeBase {
       builder.AddOutput(outputs_[i], EntryID(outputs_[i]));
     }
 
+    // Allocate Device Buffers
+    if (trt_engine_cache_.count(std::make_pair(symbol_name_, batch_size_))) {
+      TensorRTEngineAndContext& engine_and_context =
+          trt_engine_cache_.at(std::make_pair(symbol_name_, batch_size_));
+      if (engine_and_context.device_buffers.size() == 0) {
+        builder.CreateDeviceBuffers(&engine_and_context);
+        return;

Review comment:
       We also shouldnt have to rebuild the whole nextwork just to allocate the buffers.

##########
File path: src/runtime/contrib/tensorrt/tensorrt_runtime.cc
##########
@@ -211,6 +218,16 @@ class TensorRTRuntime : public JSONRuntimeBase {
       builder.AddOutput(outputs_[i], EntryID(outputs_[i]));
     }
 
+    // Allocate Device Buffers
+    if (trt_engine_cache_.count(std::make_pair(symbol_name_, batch_size_))) {
+      TensorRTEngineAndContext& engine_and_context =
+          trt_engine_cache_.at(std::make_pair(symbol_name_, batch_size_));
+      if (engine_and_context.device_buffers.size() == 0) {
+        builder.CreateDeviceBuffers(&engine_and_context);
+        return;

Review comment:
       We are building the TRT network in the TensorRTBuilder, but exiting before `BuildEngine` is called. This means the resources used by `builder` won't ever be freed (`TensorRTBuilder::CleanUp()`) needs to be called.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org