You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2017/12/13 01:07:04 UTC

[GitHub] rahul003 closed pull request #9037: Fix memory leaks and some other fixes identified by coverity

rahul003 closed pull request #9037: Fix memory leaks and some other fixes identified by coverity
URL: https://github.com/apache/incubator-mxnet/pull/9037
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/cpp-package/example/charRNN.cpp b/cpp-package/example/charRNN.cpp
index 218d11efc9..c0239f9694 100644
--- a/cpp-package/example/charRNN.cpp
+++ b/cpp-package/example/charRNN.cpp
@@ -501,6 +501,7 @@ void train(const string file, int batch_size, int max_epoch, int start_epoch) {
     string filepath = prefix + "-" + to_string(epoch) + ".params";
     SaveCheckpoint(filepath, RNN, exe);
   }
+  delete exe;
 }
 
 /*The original example, rnn_cell_demo.py, uses default Xavier as initalizer, which relies on
@@ -580,6 +581,7 @@ void trainWithBuiltInRNNOp(const string file, int batch_size, int max_epoch, int
     string filepath = prefix + "-" + to_string(epoch) + ".params";
     SaveCheckpoint(filepath, RNN, exe);
   }
+  delete exe;
 }
 
 void predict(wstring* ptext, int sequence_length, const string param_file,
@@ -642,6 +644,7 @@ void predict(wstring* ptext, int sequence_length, const string param_file,
     next = charIndices[n];
     ptext->push_back(next);
   }
+  delete exe;
 }
 
 void predictWithBuiltInRNNOp(wstring* ptext, int sequence_length, const string param_file,
@@ -696,6 +699,7 @@ void predictWithBuiltInRNNOp(wstring* ptext, int sequence_length, const string p
     next = charIndices[n];
     ptext->push_back(next);
   }
+  delete exe;
 }
 
 int main(int argc, char** argv) {
diff --git a/src/engine/profiler.cc b/src/engine/profiler.cc
index 13f8cca37b..49fecacc6c 100644
--- a/src/engine/profiler.cc
+++ b/src/engine/profiler.cc
@@ -110,7 +110,8 @@ OprExecStat *Profiler::AddOprStat(int dev_type, uint32_t dev_id) {
       idx = cpu_num_ + gpu_num_;
       break;
     default:
-      LOG(FATAL) << "Unknown dev_type: " << dev_type;
+      delete opr_stat;
+      LOG(FATAL) << "Unkown dev_type: " << dev_type;
       return NULL;
   }
 
diff --git a/src/engine/stream_manager.h b/src/engine/stream_manager.h
index 432bccf27d..68a3e0b3eb 100644
--- a/src/engine/stream_manager.h
+++ b/src/engine/stream_manager.h
@@ -89,10 +89,12 @@ RunContext StreamManager<kNumGpus, kStreams>::GetRunContext(
       break;
 #else
       LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR;
+      break;
 #endif  // MXNET_USE_CUDA
-    default:
-      LOG(FATAL) << "Not Reached";
     }
+    default:
+      LOG(FATAL) << "Undefined dev_mask " << ctx.dev_mask();
+      break;
   }
   return ret;
 }
@@ -118,10 +120,12 @@ RunContext StreamManager<kNumGpus, kStreams>::GetIORunContext(
       break;
 #else
       LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR;
+      break;
 #endif  // MXNET_USE_CUDA
-    default:
-      LOG(FATAL) << "Not Reached";
     }
+    default:
+      LOG(FATAL) << "Undefined dev_mask " << ctx.dev_mask();
+      break;
   }
   return ret;
 }
diff --git a/src/operator/nn/upsampling.cc b/src/operator/nn/upsampling.cc
index 8942e35ab3..95347ac61a 100644
--- a/src/operator/nn/upsampling.cc
+++ b/src/operator/nn/upsampling.cc
@@ -53,6 +53,7 @@ Operator *CreateOp<cpu>(UpSamplingParam param, int dtype) {
       p.stride = TShape(shape, shape + 2);
       shape[0] = shape[1] = pad;
       p.pad = TShape(shape, shape + 2);
+      p.cudnn_off = false;
       op = new DeconvolutionOp<cpu, DType>(p);
     } else {
       LOG(FATAL) << "Unknown sample type";


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services