You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by jx...@apache.org on 2017/11/06 18:13:15 UTC

[incubator-mxnet] branch master updated: * [cpp-package] fix for issue #7725 (#8551)

This is an automated email from the ASF dual-hosted git repository.

jxie pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 3f37577  * [cpp-package] fix for issue #7725 (#8551)
3f37577 is described below

commit 3f3757741416a927d63f2204b6ff9f302bba83d6
Author: Ade <Ad...@users.noreply.github.com>
AuthorDate: Tue Nov 7 02:13:07 2017 +0800

    * [cpp-package] fix for issue #7725 (#8551)
    
    * remove some unused include (MxNetCpp.h already included op.h)
    * use same mnist data path avoiding duplication
---
 cpp-package/example/alexnet.cpp               |  3 +-
 cpp-package/example/charRNN.cpp               |  2 -
 cpp-package/example/googlenet.cpp             | 12 +++---
 cpp-package/example/inception_bn.cpp          |  4 +-
 cpp-package/example/lenet.cpp                 |  4 +-
 cpp-package/example/lenet_with_mxdataiter.cpp | 57 +++++++++++++++++++--------
 cpp-package/example/mlp.cpp                   |  3 +-
 cpp-package/example/mlp_cpu.cpp               |  8 ++--
 cpp-package/example/resnet.cpp                |  4 +-
 9 files changed, 55 insertions(+), 42 deletions(-)

diff --git a/cpp-package/example/alexnet.cpp b/cpp-package/example/alexnet.cpp
index 4194b5b..dd5d2b4 100644
--- a/cpp-package/example/alexnet.cpp
+++ b/cpp-package/example/alexnet.cpp
@@ -23,8 +23,7 @@
 #include <map>
 #include <string>
 #include "mxnet-cpp/MxNetCpp.h"
-// Allow IDE to parse the types
-#include "../include/mxnet-cpp/op.h"
+
 
 using namespace std;
 using namespace mxnet::cpp;
diff --git a/cpp-package/example/charRNN.cpp b/cpp-package/example/charRNN.cpp
index f5fff85..218d11e 100644
--- a/cpp-package/example/charRNN.cpp
+++ b/cpp-package/example/charRNN.cpp
@@ -43,8 +43,6 @@
 #include <chrono>
 #include "mxnet-cpp/MxNetCpp.h"
 
-// Allow IDE to parse the types
-#include "../include/mxnet-cpp/op.h"
 
 using namespace std;
 using namespace mxnet::cpp;
diff --git a/cpp-package/example/googlenet.cpp b/cpp-package/example/googlenet.cpp
index ac0585e..fe5dea6 100644
--- a/cpp-package/example/googlenet.cpp
+++ b/cpp-package/example/googlenet.cpp
@@ -22,10 +22,8 @@
 #include <string>
 #include <vector>
 #include <map>
-
 #include "mxnet-cpp/MxNetCpp.h"
-// Allow IDE to parse the types
-#include "../include/mxnet-cpp/op.h"
+
 
 using namespace mxnet::cpp;
 
@@ -159,8 +157,8 @@ int main(int argc, char const *argv[]) {
     train_iter.Reset();
     while (train_iter.Next()) {
       auto data_batch = train_iter.GetDataBatch();
-      args_map["data"] = data_batch.data.Copy(Context::gpu());
-      args_map["data_label"] = data_batch.label.Copy(Context::gpu());
+      data_batch.data.CopyTo(&args_map["data"]);
+      data_batch.label.CopyTo(&args_map["data_label"]);
       NDArray::WaitAll();
       exec->Forward(true);
       exec->Backward();
@@ -174,8 +172,8 @@ int main(int argc, char const *argv[]) {
     val_iter.Reset();
     while (val_iter.Next()) {
       auto data_batch = val_iter.GetDataBatch();
-      args_map["data"] = data_batch.data.Copy(Context::gpu());
-      args_map["data_label"] = data_batch.label.Copy(Context::gpu());
+      data_batch.data.CopyTo(&args_map["data"]);
+      data_batch.label.CopyTo(&args_map["data_label"]);
       NDArray::WaitAll();
       exec->Forward(false);
       NDArray::WaitAll();
diff --git a/cpp-package/example/inception_bn.cpp b/cpp-package/example/inception_bn.cpp
index de21aad..e6f4790 100644
--- a/cpp-package/example/inception_bn.cpp
+++ b/cpp-package/example/inception_bn.cpp
@@ -19,13 +19,11 @@
 
 /*!
  */
-#include <iostream>
 #include <map>
 #include <string>
 #include <vector>
 #include "mxnet-cpp/MxNetCpp.h"
-// Allow IDE to parse the types
-#include "../include/mxnet-cpp/op.h"
+
 
 using namespace mxnet::cpp;
 
diff --git a/cpp-package/example/lenet.cpp b/cpp-package/example/lenet.cpp
index 05cc451..4c5a1f1 100644
--- a/cpp-package/example/lenet.cpp
+++ b/cpp-package/example/lenet.cpp
@@ -19,14 +19,12 @@
 
 /*!
  */
-#include <iostream>
 #include <fstream>
 #include <map>
 #include <string>
 #include <vector>
 #include "mxnet-cpp/MxNetCpp.h"
-// Allow IDE to parse the types
-#include "../include/mxnet-cpp/op.h"
+
 
 using namespace std;
 using namespace mxnet::cpp;
diff --git a/cpp-package/example/lenet_with_mxdataiter.cpp b/cpp-package/example/lenet_with_mxdataiter.cpp
index 077f556..04f5cbc 100644
--- a/cpp-package/example/lenet_with_mxdataiter.cpp
+++ b/cpp-package/example/lenet_with_mxdataiter.cpp
@@ -19,14 +19,12 @@
 
 /*!
  */
-#include <iostream>
-#include <fstream>
 #include <map>
 #include <string>
 #include <vector>
+#include <chrono>
 #include "mxnet-cpp/MxNetCpp.h"
-// Allow IDE to parse the types
-#include "../include/mxnet-cpp/op.h"
+
 
 using namespace std;
 using namespace mxnet::cpp;
@@ -89,15 +87,15 @@ int main(int argc, char const *argv[]) {
   args_map["fc2_b"] = 0;
 
   auto train_iter = MXDataIter("MNISTIter")
-      .SetParam("image", "./train-images-idx3-ubyte")
-      .SetParam("label", "./train-labels-idx1-ubyte")
+      .SetParam("image", "./mnist_data/train-images-idx3-ubyte")
+      .SetParam("label", "./mnist_data/train-labels-idx1-ubyte")
       .SetParam("batch_size", batch_size)
       .SetParam("shuffle", 1)
       .SetParam("flat", 0)
       .CreateDataIter();
   auto val_iter = MXDataIter("MNISTIter")
-      .SetParam("image", "./t10k-images-idx3-ubyte")
-      .SetParam("label", "./t10k-labels-idx1-ubyte")
+      .SetParam("image", "./mnist_data/t10k-images-idx3-ubyte")
+      .SetParam("label", "./mnist_data/t10k-labels-idx1-ubyte")
       .CreateDataIter();
 
   Optimizer* opt = OptimizerRegistry::Find("ccsgd");
@@ -111,35 +109,62 @@ int main(int argc, char const *argv[]) {
   auto *exec = lenet.SimpleBind(Context::gpu(), args_map);
   auto arg_names = lenet.ListArguments();
 
+  // Create metrics
+  Accuracy train_acc, val_acc;
+
   for (int iter = 0; iter < max_epoch; ++iter) {
-    LG << "Epoch: " << iter;
-    train_iter.Reset();
-    while (train_iter.Next()) {
+      int samples = 0;
+      train_iter.Reset();
+      train_acc.Reset();
+
+      auto tic = chrono::system_clock::now();
+
+     while (train_iter.Next()) {
+      samples += batch_size;
       auto data_batch = train_iter.GetDataBatch();
-      args_map["data"] = data_batch.data.Copy(Context::gpu());
-      args_map["data_label"] = data_batch.label.Copy(Context::gpu());
+
+      data_batch.data.CopyTo(&args_map["data"]);
+      data_batch.label.CopyTo(&args_map["data_label"]);
       NDArray::WaitAll();
+
+      // Compute gradients
       exec->Forward(true);
       exec->Backward();
+
       // Update parameters
       for (size_t i = 0; i < arg_names.size(); ++i) {
         if (arg_names[i] == "data" || arg_names[i] == "data_label") continue;
         opt->Update(i, exec->arg_arrays[i], exec->grad_arrays[i]);
       }
+
+      // Update metric
+      train_acc.Update(data_batch.label, exec->outputs[0]);
     }
 
+     // one epoch of training is finished
+     auto toc = chrono::system_clock::now();
+     float duration = chrono::duration_cast<chrono::milliseconds>(toc - tic).count() / 1000.0;
+     LG << "Epoch[" << iter << "] " << samples / duration \
+         << " samples/sec " << "Train-Accuracy=" << train_acc.Get();;
+
+      val_iter.Reset();
+      val_acc.Reset();
+
     Accuracy acu;
     val_iter.Reset();
     while (val_iter.Next()) {
       auto data_batch = val_iter.GetDataBatch();
-      args_map["data"] = data_batch.data.Copy(Context::gpu());
-      args_map["data_label"] = data_batch.label.Copy(Context::gpu());
+      data_batch.data.CopyTo(&args_map["data"]);
+      data_batch.label.CopyTo(&args_map["data_label"]);
       NDArray::WaitAll();
+
+      // Only forward pass is enough as no gradient is needed when evaluating
       exec->Forward(false);
       NDArray::WaitAll();
       acu.Update(data_batch.label, exec->outputs[0]);
+      val_acc.Update(data_batch.label, exec->outputs[0]);
     }
-    LG << "Accuracy: " << acu.Get();
+    LG << "Epoch[" << iter << "] Val-Accuracy=" << val_acc.Get();
   }
 
   delete exec;
diff --git a/cpp-package/example/mlp.cpp b/cpp-package/example/mlp.cpp
index c9c4ff2..b40328d 100644
--- a/cpp-package/example/mlp.cpp
+++ b/cpp-package/example/mlp.cpp
@@ -24,8 +24,7 @@
 #include <vector>
 #include <string>
 #include "mxnet-cpp/MxNetCpp.h"
-// Allow IDE to parse the types
-#include "../include/mxnet-cpp/op.h"
+
 
 using namespace std;
 using namespace mxnet::cpp;
diff --git a/cpp-package/example/mlp_cpu.cpp b/cpp-package/example/mlp_cpu.cpp
index 748c32e..051bad1 100644
--- a/cpp-package/example/mlp_cpu.cpp
+++ b/cpp-package/example/mlp_cpu.cpp
@@ -106,8 +106,8 @@ int main(int argc, char** argv) {
       samples += batch_size;
       auto data_batch = train_iter.GetDataBatch();
       // Set data and label
-      args["X"] = data_batch.data;
-      args["label"] = data_batch.label;
+      data_batch.data.CopyTo(&args["X"]);
+      data_batch.label.CopyTo(&args["label"]);
 
       // Compute gradients
       exec->Forward(true);
@@ -124,8 +124,8 @@ int main(int argc, char** argv) {
     val_iter.Reset();
     while (val_iter.Next()) {
       auto data_batch = val_iter.GetDataBatch();
-      args["X"] = data_batch.data;
-      args["label"] = data_batch.label;
+      data_batch.data.CopyTo(&args["X"]);
+      data_batch.label.CopyTo(&args["label"]);
       // Forward pass is enough as no gradient is needed when evaluating
       exec->Forward(false);
       acc.Update(data_batch.label, exec->outputs[0]);
diff --git a/cpp-package/example/resnet.cpp b/cpp-package/example/resnet.cpp
index ca5643d..03b3d72 100644
--- a/cpp-package/example/resnet.cpp
+++ b/cpp-package/example/resnet.cpp
@@ -19,13 +19,11 @@
 
 /*!
  */
-#include <iostream>
 #include <map>
 #include <string>
 #include <vector>
 #include "mxnet-cpp/MxNetCpp.h"
-// Allow IDE to parse the types
-#include "../include/mxnet-cpp/op.h"
+
 
 using namespace mxnet::cpp;
 

-- 
To stop receiving notification emails like this one, please contact
['"commits@mxnet.apache.org" <co...@mxnet.apache.org>'].