You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by qk...@apache.org on 2017/08/10 15:19:32 UTC

[incubator-mxnet] branch master updated: [R][MISC] update Makefile/Jenkinsfile; use mx.ctx.default() in R test (#7401)

This is an automated email from the ASF dual-hosted git repository.

qkou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 613132e  [R][MISC] update Makefile/Jenkinsfile; use mx.ctx.default() in R test (#7401)
613132e is described below

commit 613132e2b6c6573b184b467aed5cf50dbb91aa47
Author: Qiang Kou (KK) <qk...@qkou.info>
AuthorDate: Thu Aug 10 15:19:28 2017 +0000

    [R][MISC] update Makefile/Jenkinsfile; use mx.ctx.default() in R test (#7401)
---
 Jenkinsfile                             |   4 +-
 Makefile                                |   1 +
 R-package/README.md                     |   6 --
 R-package/tests/testthat.R              |   4 --
 R-package/tests/testthat/get_data.R     |  13 ++++
 R-package/tests/testthat/test_img_seg.R |   7 +-
 R-package/tests/testthat/test_lstm.R    |   7 +-
 R-package/tests/testthat/test_model.R   | 117 +++++++++++++++++++++++++++++---
 R-package/tests/testthat/test_ndarray.R |   9 ++-
 example/captcha/README.md               |   2 +-
 10 files changed, 142 insertions(+), 28 deletions(-)

diff --git a/Jenkinsfile b/Jenkinsfile
index 370c2b3..632789a 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -325,11 +325,11 @@ try {
             init_git()
             unpack_lib('gpu')
             timeout(time: max_time, unit: 'MINUTES') {
-              sh "${docker_run} cpu rm -rf .Renviron"
+              sh "${docker_run} gpu rm -rf .Renviron"
               sh "${docker_run} gpu mkdir -p /workspace/ut-r-gpu/site-library"
               sh "${docker_run} gpu make rpkg USE_BLAS=openblas R_LIBS=/workspace/ut-r-gpu/site-library"
               sh "${docker_run} gpu R CMD INSTALL --library=/workspace/ut-r-gpu/site-library mxnet_current_r.tar.gz"
-              sh "${docker_run} gpu make rpkgtest R_LIBS=/workspace/ut-r-gpu/site-library"
+              sh "${docker_run} gpu make rpkgtest R_LIBS=/workspace/ut-r-gpu/site-library R_GPU_ENABLE=1"
             }
           }
         }
diff --git a/Makefile b/Makefile
index ed74214..560b77a 100644
--- a/Makefile
+++ b/Makefile
@@ -398,6 +398,7 @@ rpkg:
 	Rscript -e "require(roxygen2); roxygen2::roxygenise('R-package')"
 	R CMD build --no-build-vignettes R-package
 	rm -rf mxnet_current_r.tar.gz
+	rm -rf R-package/src/image_recordio.h
 	mv mxnet_*.tar.gz mxnet_current_r.tar.gz
 
 rpkgtest:
diff --git a/R-package/README.md b/R-package/README.md
index e150f17..6576700 100644
--- a/R-package/README.md
+++ b/R-package/README.md
@@ -1,7 +1,5 @@
 <img src=https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/mxnetR.png width=155/> Deep Learning for R
 ==========================
-[![Build Status](https://travis-ci.org/dmlc/mxnet.svg?branch=master)](https://travis-ci.org/dmlc/mxnet)
-[![Documentation Status](https://readthedocs.org/projects/mxnet/badge/?version=latest)](http://mxnet.readthedocs.io/en/latest/api/r/index.html)
 
 You have found MXNet R Package! The MXNet R packages brings flexible and efficient GPU
 computing and state-of-art deep learning to R.
@@ -12,10 +10,6 @@ computing and state-of-art deep learning to R.
 
 Sounds exciting? This page contains links to all the related documentation of the R package.
 
-Resources
----------
-* [MXNet R Package Document](http://mxnet.io/get_started/install.html)
-  - Check this out for detailed documents, examples and installation guides.
 
 Installation
 ------------
diff --git a/R-package/tests/testthat.R b/R-package/tests/testthat.R
deleted file mode 100644
index f002e4c..0000000
--- a/R-package/tests/testthat.R
+++ /dev/null
@@ -1,4 +0,0 @@
-library(testthat)
-library(mxnet)
-
-test_check("mxnet")
diff --git a/R-package/tests/testthat/get_data.R b/R-package/tests/testthat/get_data.R
index 555e5e9..6d8de85 100644
--- a/R-package/tests/testthat/get_data.R
+++ b/R-package/tests/testthat/get_data.R
@@ -92,3 +92,16 @@ GetISBI_data <- function() {
     file.remove('data/ISBI.zip')
   }
 }
+
+GetCaptcha_data <- function() {
+  if (!dir.exists("data")) {
+    dir.create("data/")
+  }
+  if (!file.exists('data/captcha_example/captcha_train.rec') |
+      !file.exists('data/captcha_example/captcha_test.rec')) {
+    download.file('https://s3-us-west-2.amazonaws.com/apache-mxnet/R/data/captcha_example.zip',
+                  destfile = 'data/captcha_example.zip')
+    unzip('data/captcha_example.zip', exdir = 'data/')
+    file.remove('data/captcha_example.zip')
+  }
+}
diff --git a/R-package/tests/testthat/test_img_seg.R b/R-package/tests/testthat/test_img_seg.R
index ba5c9cd..fbca92e 100644
--- a/R-package/tests/testthat/test_img_seg.R
+++ b/R-package/tests/testthat/test_img_seg.R
@@ -2,6 +2,11 @@ require(mxnet)
 
 source("get_data.R")
 
+if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == 1) {
+  mx.ctx.default(new = mx.gpu())
+  message("Using GPU for testing.")
+}
+
 print_inferred_shape <- function(net) {
   slist <- mx.symbol.infer.shape(symbol = net, data = c(168, 168, 1, 2))
   print(slist$out.shapes)
@@ -116,7 +121,7 @@ test_that("UNET", {
   train.y.array = train.y
   dim(train.y.array) = c(IMG_SIZE, IMG_SIZE, 1, 30)
   
-  devices <- mx.cpu()
+  devices <- mx.ctx.default()
   mx.set.seed(0)
   
   net <- get_unet()
diff --git a/R-package/tests/testthat/test_lstm.R b/R-package/tests/testthat/test_lstm.R
index 24b1a59..4a5cdbe 100644
--- a/R-package/tests/testthat/test_lstm.R
+++ b/R-package/tests/testthat/test_lstm.R
@@ -1,5 +1,10 @@
 require(mxnet)
 
+if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == 1) {
+  mx.ctx.default(new = mx.gpu())
+  message("Using GPU for testing.")
+}
+
 context("lstm models")
 
 get.nll <- function(s) {
@@ -26,7 +31,7 @@ test_that("training error decreasing", {
     X.train <- list(data=array(1:16, dim=c(2,8)), label=array(2:17, dim=c(2,8)))
 
     s <- capture.output(model <- mx.lstm( X.train, 
-                                          ctx=mx.cpu(),
+                                          ctx=mx.ctx.default(),
                                           num.round=num.round, 
                                           update.period=update.period,
                                           num.lstm.layer=num.lstm.layer, 
diff --git a/R-package/tests/testthat/test_model.R b/R-package/tests/testthat/test_model.R
index 73a2127..8cdd396 100644
--- a/R-package/tests/testthat/test_model.R
+++ b/R-package/tests/testthat/test_model.R
@@ -4,6 +4,11 @@ source("get_data.R")
 
 context("models")
 
+if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == 1) {
+  mx.ctx.default(new = mx.gpu())
+  message("Using GPU for testing.")
+}
+
 test_that("MNIST", {
 #   # Network configuration
    GetMNIST_ubyte()
@@ -36,13 +41,10 @@ test_that("MNIST", {
      silent=0)
    
    mx.set.seed(0)
-   devices = lapply(1:2, function(i) {
-     mx.cpu(i)
-   })
-   
+
    # create the model
    model <- mx.model.FeedForward.create(softmax, X=dtrain, eval.data=dtest,
-                                        ctx=devices, num.round=1,
+                                        ctx = mx.ctx.default(), num.round=1,
                                         learning.rate=0.1, momentum=0.9,
                                         initializer=mx.init.uniform(0.07),
                                         epoch.end.callback=mx.callback.save.checkpoint("chkpt"),
@@ -83,12 +85,30 @@ test_that("Regression", {
   })
   mx.set.seed(0)
   model <- mx.model.FeedForward.create(lro, X = train.x, y = train.y,
-                                       ctx = mx.cpu(), num.round = 5,
+                                       ctx = mx.ctx.default(), num.round = 5,
                                        array.batch.size = 20,
                                        learning.rate = 2e-6,
                                        momentum = 0.9,
                                        eval.metric = demo.metric.mae)
   
+  train.x <- data.matrix(BostonHousing[train.ind, -(13:14)])
+  train.y <- BostonHousing[train.ind, c(13:14)]
+  test.x <- data.matrix(BostonHousing[-train.ind, -(13:14)])
+  test.y <- BostonHousing[-train.ind, c(13:14)]
+  
+  data <- mx.symbol.Variable("data")
+  fc2 <- mx.symbol.FullyConnected(data, num_hidden=2)
+  lro2 <- mx.symbol.LinearRegressionOutput(fc2)
+  
+  mx.set.seed(0)
+  train_iter = mx.io.arrayiter(data = t(train.x), label = t(train.y))
+  
+  model <- mx.model.FeedForward.create(lro2, X = train_iter,
+                                       ctx = mx.ctx.default(),
+                                       num.round = 50,
+                                       array.batch.size = 20,
+                                       learning.rate = 2e-6,
+                                       momentum = 0.9)
 })
 
 
@@ -141,7 +161,7 @@ test_that("Fine-tune", {
   arg_params_new[["fc1_bias"]] <- fc1_bias_new
 
   #model <- mx.model.FeedForward.create(symbol = new_soft, X = train_iter, eval.data = val_iter,
-  #                                     ctx = mx.cpu(), eval.metric = mx.metric.accuracy,
+  #                                     ctx = mx.ctx.default(), eval.metric = mx.metric.accuracy,
   #                                     num.round = 2, learning.rate = 0.05, momentum = 0.9,
   #                                     wd = 0.00001, kvstore = "local",
   #                                     batch.end.callback = mx.callback.log.train.metric(50),
@@ -171,9 +191,7 @@ test_that("Matrix Factorization", {
   pred1 <- mx.symbol.sum_axis(pred, axis = 1, name = "pred1")
   pred2 <- mx.symbol.Flatten(pred1, name = "pred2")
   pred3 <- mx.symbol.LinearRegressionOutput(data = pred2, label = score, name = "pred3")
-  devices = lapply(1:2, function(i) {
-    mx.cpu(i)
-  })
+
   mx.set.seed(123)
   
   CustomIter <- setRefClass( "CustomIter", fields = c("iter1", "iter2"),
@@ -216,7 +234,7 @@ test_that("Matrix Factorization", {
   
   train_iter <- CustomIter$new(user_iter, item_iter)
   
-  model <- mx.model.FeedForward.create(pred3, X = train_iter, ctx = devices,
+  model <- mx.model.FeedForward.create(pred3, X = train_iter, ctx = mx.ctx.default(),
                                        num.round = 5, initializer = mx.init.uniform(0.07),
                                        learning.rate = 0.07,
                                        eval.metric = mx.metric.rmse,
@@ -225,3 +243,80 @@ test_that("Matrix Factorization", {
                                        input.names = c("user", "item"),
                                        output.names = "score")
 })
+
+test_that("Captcha", {
+  GetCaptcha_data()
+  data.shape <- c(80, 30, 3)
+  batch_size <- 40
+  train <- mx.io.ImageRecordIter(
+    path.imgrec   = "./data/captcha_example/captcha_train.rec",
+    path.imglist  = "./data/captcha_example/captcha_train.lst",
+    batch.size    = batch_size,
+    label.width   = 4,
+    data.shape    = data.shape,
+    mean.img      = "mean.bin")
+  
+  val <- mx.io.ImageRecordIter(
+    path.imgrec   = "./data/captcha_example/captcha_test.rec",
+    path.imglist  = "./data/captcha_example/captcha_test.lst",
+    batch.size    = batch_size,
+    label.width   = 4,
+    data.shape    = data.shape,
+    mean.img      = "mean.bin")
+  
+  data <- mx.symbol.Variable("data")
+  label <- mx.symbol.Variable("label")
+  conv1 <- mx.symbol.Convolution(data = data, kernel = c(5, 5), num_filter = 32)
+  pool1 <- mx.symbol.Pooling(data = conv1, pool_type = "max", kernel = c(2, 2), stride = c(1, 1))
+  relu1 <- mx.symbol.Activation(data = pool1, act_type = "relu")
+  
+  conv2 <- mx.symbol.Convolution(data = relu1, kernel = c(5, 5), num_filter = 32)
+  pool2 <- mx.symbol.Pooling(data = conv2, pool_type = "avg", kernel = c(2, 2), stride = c(1, 1))
+  relu2 <- mx.symbol.Activation(data = pool2, act_type = "relu")
+  
+  flatten <- mx.symbol.Flatten(data = relu2)
+  fc1 <- mx.symbol.FullyConnected(data = flatten, num_hidden = 120)
+  fc21 <- mx.symbol.FullyConnected(data = fc1, num_hidden = 10)
+  fc22 <- mx.symbol.FullyConnected(data = fc1, num_hidden = 10)
+  fc23 <- mx.symbol.FullyConnected(data = fc1, num_hidden = 10)
+  fc24 <- mx.symbol.FullyConnected(data = fc1, num_hidden = 10)
+  fc2 <- mx.symbol.Concat(c(fc21, fc22, fc23, fc24), dim = 0, num.args = 4)
+  label <- mx.symbol.transpose(data = label)
+  label <- mx.symbol.Reshape(data = label, target_shape = c(0))
+  captcha_net <- mx.symbol.SoftmaxOutput(data = fc2, label = label, name = "softmax")
+  
+  mx.metric.acc2 <- mx.metric.custom("accuracy", function(label, pred) {
+    ypred <- max.col(t(pred)) - 1
+    ypred <- matrix(ypred, nrow = nrow(label), ncol = ncol(label), byrow = TRUE)
+    return(sum(colSums(label == ypred) == 4)/ncol(label))
+  })
+  
+  mx.set.seed(42)
+  
+  train$reset()
+  train$iter.next()
+  
+  input.names <- "data"
+  input.shape <- sapply(input.names, function(n){dim(train$value()[[n]])}, simplify = FALSE)
+  arg_names <- arguments(captcha_net)
+  output.names <- "label"
+  output.shape <- sapply(output.names, function(n){dim(train$value()[[n]])}, simplify = FALSE)
+  params <- mx.model.init.params(captcha_net, input.shape, output.shape, 
+                                 mx.init.Xavier(factor_type = "in", magnitude = 2.34),
+                                 mx.cpu())
+
+  #model <- mx.model.FeedForward.create(
+  #  X                  = train,
+  #  eval.data          = val,
+  #  ctx                = mx.ctx.default(),
+  #  symbol             = captcha_net,
+  #  eval.metric        = mx.metric.acc2,
+  #  num.round          = 1,
+  #  learning.rate      = 1e-04,
+  #  momentum           = 0.9,
+  #  wd                 = 1e-05,
+  #  batch.end.callback = mx.callback.log.train.metric(50),
+  #  initializer        = mx.init.Xavier(factor_type = "in", magnitude = 2.34),
+  #  optimizer          = "sgd",
+  #  clip_gradient      = 10)
+})
diff --git a/R-package/tests/testthat/test_ndarray.R b/R-package/tests/testthat/test_ndarray.R
index 142c87e..326ea6c 100644
--- a/R-package/tests/testthat/test_ndarray.R
+++ b/R-package/tests/testthat/test_ndarray.R
@@ -2,9 +2,14 @@ require(mxnet)
 
 context("ndarray")
 
+if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == 1) {
+  mx.ctx.default(new = mx.gpu())
+  message("Using GPU for testing.")
+}
+
 test_that("element-wise calculation for vector", {
   x = 1:10
-  mat = mx.nd.array(as.array(x), mx.cpu(0))
+  mat = mx.nd.array(as.array(x), mx.ctx.default())
   expect_equal(x, as.array(mat))
   expect_equal(x + 1, as.array(mat + 1))
   expect_equal(x - 10, as.array(mat - 10))
@@ -26,7 +31,7 @@ test_that("element-wise calculation for vector", {
 
 test_that("element-wise calculation for matrix", {
   x = matrix(1:4, 2, 2)
-  mat = mx.nd.array(as.array(x), mx.cpu(0))
+  mat = mx.nd.array(as.array(x), mx.ctx.default())
   expect_equal(x, as.array(mat))
   expect_equal(x + 1, as.array(mat + 1))
   expect_equal(x - 10, as.array(mat - 10))
diff --git a/example/captcha/README.md b/example/captcha/README.md
index 588d626..02e8726 100644
--- a/example/captcha/README.md
+++ b/example/captcha/README.md
@@ -2,4 +2,4 @@ This is the R version of [captcha recognition](http://blog.xlvector.net/2016-05/
 
 ![](captcha_example.png)
 
-You can download the images and `.rec` files from [here](https://drive.google.com/open?id=0B_52ppM3wSXBdHctQmhUdmlTbDQ). Since each image has 4 labels, please remember to use `label_width=4` when generating the `.rec` files.
+You can download the images and `.rec` files from [here](https://s3-us-west-2.amazonaws.com/apache-mxnet/R/data/captcha_example.zip). Since each image has 4 labels, please remember to use `label_width=4` when generating the `.rec` files.

-- 
To stop receiving notification emails like this one, please contact
['"commits@mxnet.apache.org" <co...@mxnet.apache.org>'].