You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by cj...@apache.org on 2017/12/06 03:34:50 UTC

[incubator-mxnet] branch master updated: [R] various small changes for efficiency and robustness in R package (#8118)

This is an automated email from the ASF dual-hosted git repository.

cjolivier01 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new b909769  [R] various small changes for efficiency and robustness in R package (#8118)
b909769 is described below

commit b909769be11237e808c18e8afff55e7ab8877be9
Author: Bernie Gray <bf...@users.noreply.github.com>
AuthorDate: Tue Dec 5 22:34:42 2017 -0500

    [R] various small changes for efficiency and robustness in R package (#8118)
    
    * various small changes for efficiency and robustness in r package
    
    * remove library(methods)
    
    * general cleanup
    
    * revert whitespace
    
    * varios reversions
---
 R-package/R/callback.R          | 22 +++++------
 R-package/R/initializer.R       | 21 ++++-------
 R-package/R/lr_scheduler.R      | 16 +++-----
 R-package/R/metric.R            |  2 +-
 R-package/R/mlp.R               | 16 +++-----
 R-package/R/model.R             | 84 ++++++++++++++++++-----------------------
 R-package/R/model.rnn.R         | 42 ++++++++++-----------
 R-package/R/mx.io.bucket.iter.R | 22 +++++------
 R-package/R/optimizer.R         | 28 +++++---------
 R-package/R/rnn.graph.R         | 44 ++++++++++-----------
 R-package/R/rnn.infer.R         |  4 +-
 R-package/R/util.R              |  2 +-
 R-package/R/viz.graph.R         | 24 ++++++------
 13 files changed, 142 insertions(+), 185 deletions(-)

diff --git a/R-package/R/callback.R b/R-package/R/callback.R
index a056238..bc96d77 100644
--- a/R-package/R/callback.R
+++ b/R-package/R/callback.R
@@ -11,8 +11,8 @@ mx.callback.log.train.metric <- function(period, logger=NULL) {
   function(iteration, nbatch, env, verbose=TRUE) {
     if (nbatch %% period == 0 && !is.null(env$metric)) {
       result <- env$metric$get(env$train.metric)
-      if (nbatch != 0 & verbose)
-        message(paste0("Batch [", nbatch, "] Train-", result$name, "=", result$value))
+      if (nbatch != 0 && verbose)
+        message("Batch [", nbatch, "] Train-", result$name, "=", result$value)
       if (!is.null(logger)) {
         if (class(logger) != "mx.metric.logger") {
           stop("Invalid mx.metric.logger.")
@@ -20,8 +20,8 @@ mx.callback.log.train.metric <- function(period, logger=NULL) {
         logger$train <- c(logger$train, result$value)
         if (!is.null(env$eval.metric)) {
           result <- env$metric$get(env$eval.metric)
-          if (nbatch != 0 & verbose)
-            message(paste0("Batch [", nbatch, "] Validation-", result$name, "=", result$value))
+          if (nbatch != 0 && verbose)
+            message("Batch [", nbatch, "] Validation-", result$name, "=", result$value)
           logger$eval <- c(logger$eval, result$value)
         }
       }
@@ -48,9 +48,9 @@ mx.callback.log.speedometer <- function(batch.size, frequency=50){
         time <- as.double(difftime(Sys.time(), env$tic, units = "secs"))
         speed <- frequency*batch.size/time
         result <- env$metric$get(env$train.metric)
-        if (nbatch != 0 & verbose)
-          message(paste0("Batch [", nbatch, "] Speed: ", speed, " samples/sec Train-",
-                     result$name, "=", result$value))
+        if (nbatch != 0 && verbose)
+          message("Batch [", nbatch, "] Speed: ", speed, " samples/sec Train-",
+                     result$name, "=", result$value)
         env$tic = Sys.time()
       }      
     } else {
@@ -95,7 +95,7 @@ mx.callback.early.stop <- function(train.metric = NULL, eval.metric = NULL, bad.
     if (!is.null(env$metric)) {
       if (!is.null(train.metric)) {
         result <- env$metric$get(env$train.metric)
-        if ((maximize == F & result$value < train.metric) | (maximize == TRUE & result$value > train.metric)) {
+        if ((! maximize && result$value < train.metric) || (maximize && result$value > train.metric)) {
           return(FALSE)
         }
       }
@@ -104,7 +104,7 @@ mx.callback.early.stop <- function(train.metric = NULL, eval.metric = NULL, bad.
       if (!is.null(eval.metric)) {
         if (!is.null(env$eval.metric)) {
           result <- env$metric$get(env$eval.metric)
-          if ((maximize == F & result$value < eval.metric) | (maximize == TRUE & result$value > eval.metric)) {
+          if ((!maximize && result$value < eval.metric) || (maximize && result$value > eval.metric)) {
             return(FALSE)
           }
         }
@@ -135,11 +135,11 @@ mx.callback.early.stop <- function(train.metric = NULL, eval.metric = NULL, bad.
         
         result <- env$metric$get(env$eval.metric)
         
-        if ((maximize == F & result$value > mx.best.score) | (maximize == TRUE & result$value < mx.best.score)) {
+        if ((! maximize && result$value > mx.best.score) || (maximize && result$value < mx.best.score)) {
           
           if (mx.best.iter == bad.steps) {
             if (verbose) {
-              message(paste0("Best score=", mx.best.score, ", iteration [", iteration - bad.steps, "]"))
+              message("Best score=", mx.best.score, ", iteration [", iteration - bad.steps, "]")
             }
             return(FALSE)
           } else {
diff --git a/R-package/R/initializer.R b/R-package/R/initializer.R
index 9f5e75b..4071243 100644
--- a/R-package/R/initializer.R
+++ b/R-package/R/initializer.R
@@ -61,17 +61,12 @@ mx.init.Xavier <- function(rnd_type = "uniform", factor_type = "avg",
     
     fan_out = shape[length(shape)]
     fan_in  = prod(shape[-length(shape)])
-    factor_val  = 1
-    if (factor_type == "avg") {
-      factor_val = (fan_in + fan_out) / 2
-    } else if (factor_type == "in"){
-      factor_val = fan_in
-    } else if (factor_type == "out"){
-      factor_val = fan_out
-    } else {
-      stop("Not supported factor type. See usage of function mx.init.Xavier")
-    }
-    
+    factor_val <- switch(factor_type,
+                         "avg" = (fan_in + fan_out) / 2,
+                         "in" = fan_in,
+                         "out" = fan_out,
+                         stop("Not supported factor type. See usage of function mx.init.Xavier"))
+
     scale = sqrt(magnitude / factor_val)
     
     if (rnd_type == "uniform"){
@@ -95,9 +90,7 @@ mx.init.Xavier <- function(rnd_type = "uniform", factor_type = "avg",
 mx.init.create <- function(initializer, shape.array, ctx=NULL, skip.unknown=TRUE) {
   if (length(shape.array) == 0) return(list())
   names = names(shape.array)
-  ret <- lapply(1 : length(names), function(i) {
-    initializer(names[[i]], shape.array[[i]], ctx, allow.unknown=skip.unknown)
-  })
+  ret <- lapply(seq_along(names), function(i) initializer(names[[i]], shape.array[[i]], ctx, allow.unknown=skip.unknown))
   names(ret) <- names
   if (skip.unknown) {
     ret <- mx.util.filter.null(ret)
diff --git a/R-package/R/lr_scheduler.R b/R-package/R/lr_scheduler.R
index bc89721..8b032cd 100644
--- a/R-package/R/lr_scheduler.R
+++ b/R-package/R/lr_scheduler.R
@@ -19,12 +19,10 @@ mx.lr_scheduler.FactorScheduler <- function(step, factor_val, stop_factor_lr=1e-
       lr    <- lr * factor_val
       if(lr < stop_factor_lr){
         lr <- stop_factor_lr
-        if(verbose) message(paste0("Update[", num_update, 
-                               "]: now learning rate arrived at ", lr, 
-                               "will not change in the future"))
+        if(verbose) message("Update[", num_update, "]: now learning rate arrived at ",
+                            lr, "will not change in the future")
       } else{
-        if(verbose) message(paste0("Update[", num_update, 
-                               "]: learning rate is changed to ", lr))
+        if(verbose) message("Update[", num_update, "]: learning rate is changed to ", lr)
       }
       optimizerEnv$lr    <- lr
       optimizerEnv$count <- count      
@@ -62,12 +60,10 @@ mx.lr_scheduler.MultiFactorScheduler <- function(step, factor_val, stop_factor_l
         lr <-  lr * factor_val
         if(lr < stop_factor_lr){
           lr <- stop_factor_lr
-          if(verbose) message(paste0("Update[", num_update, 
-                                 "]: now learning rate arrived at ", lr, 
-                                 "will not change in the future"))
+          if(verbose) message("Update[", num_update, "]: now learning rate arrived at ",
+                              lr, "will not change in the future")
         } else{
-          if(verbose) message(paste0("Update[", num_update, 
-                                 "]: learning rate is changed to ", lr))
+          if(verbose) message("Update[", num_update, "]: learning rate is changed to ", lr)
           
         }
         optimizerEnv$lr           <- lr
diff --git a/R-package/R/metric.R b/R-package/R/metric.R
index 02572f4..f8d9c33 100644
--- a/R-package/R/metric.R
+++ b/R-package/R/metric.R
@@ -39,7 +39,7 @@ mx.metric.top_k_accuracy <- mx.metric.custom("top_k_accuracy", function(label, p
   if(top_k == 1){
     return(mx.metric.accuracy(label,pred))
   } else{
-    ypred <- apply(pred,2,function(x) order(x, decreasing=TRUE)[1:top_k])
+    ypred <- apply(pred,2,function(x) order(x, decreasing=TRUE)[seq_len(top_k)])
     ans <- apply(ypred, 2, is.num.in.vect, num = as.array(label + 1))
     acc <- sum(ans)/length(label)  
     return(acc)
diff --git a/R-package/R/mlp.R b/R-package/R/mlp.R
index 33134ff..ecc3099 100644
--- a/R-package/R/mlp.R
+++ b/R-package/R/mlp.R
@@ -47,7 +47,7 @@ mx.mlp <- function(data, label, hidden_node = 1, out_node, dropout = NULL,
       stop(paste("Length of activation should be",m))
     }
   }
-  for (i in 1:m) {
+  for (i in seq_len(m)) {
     fc <- mx.symbol.FullyConnected(act, num_hidden=hidden_node[i])
     act <- mx.symbol.Activation(fc, act_type=activation[i])
     if (i == m && !is.null(dropout)) {
@@ -55,15 +55,11 @@ mx.mlp <- function(data, label, hidden_node = 1, out_node, dropout = NULL,
     }
   }
   fc <- mx.symbol.FullyConnected(act, num_hidden=out_node)
-  if (out_activation == "rmse") {
-    out <- mx.symbol.LinearRegressionOutput(fc)
-  } else if (out_activation == "softmax") {
-    out <- mx.symbol.SoftmaxOutput(fc)
-  } else if (out_activation == "logistic") {
-    out <- mx.symbol.LogisticRegressionOutput(fc)
-  } else {
-    stop("Not supported yet.")
-  }
+  out <- switch(out_activation,
+                "rmse" = mx.symbol.LinearRegressionOutput(fc),
+                "softmax" = mx.symbol.SoftmaxOutput(fc),
+                "logistic" = mx.symbol.LogisticRegressionOutput(fc),
+                stop("Not supported yet."))
   model <- mx.model.FeedForward.create(out, X=data, y=label, ctx = ctx, ...)
   return(model)
 }
diff --git a/R-package/R/model.R b/R-package/R/model.R
index f607ebb..01b5ed7 100644
--- a/R-package/R/model.R
+++ b/R-package/R/model.R
@@ -4,7 +4,7 @@ mx.model.slice.shape <- function(shape, nsplit) {
     ndim <- length(shape)
     batchsize <- shape[[ndim]]
     step <- as.integer((batchsize + nsplit - 1) / nsplit)
-    lapply(0:(nsplit - 1), function(k) {
+    lapply(seq_len(nsplit) - 1, function(k) {
       begin = min(k * step, batchsize)
       end = min((k + 1) * step, batchsize)
       s <- shape
@@ -16,7 +16,7 @@ mx.model.slice.shape <- function(shape, nsplit) {
     ndim <- length(shape[[1]])
     batchsize <- shape[[1]][[ndim]]
     step <- as.integer((batchsize + nsplit - 1) / nsplit)
-    lapply(0:(nsplit - 1), function(k) {
+    lapply(seq_len(nsplit) - 1, function(k) {
       begin = min(k * step, batchsize)
       end = min((k + 1) * step, batchsize)
       s <- lapply(shape, function(s) {
@@ -58,7 +58,7 @@ mx.model.extract.model <- function(symbol, train.execs) {
   # Get the parameters
   ndevice <- length(train.execs)
   narg <- length(train.execs[[1]]$ref.arg.arrays)
-  arg.params <- lapply(1:narg, function(k) {
+  arg.params <- lapply(seq_len(narg), function(k) {
     if (is.null(train.execs[[1]]$ref.grad.arrays[[k]])) {
       result <- NULL
     } else {
@@ -73,7 +73,7 @@ mx.model.extract.model <- function(symbol, train.execs) {
   # Get the auxiliary
   naux <- length(train.execs[[1]]$ref.aux.arrays)
   if (naux != 0) {
-    aux.params <- lapply(1:naux, function(k) {
+    aux.params <- lapply(seq_len(naux), function(k) {
       reduce.sum(lapply(train.execs, function(texec) {
         mx.nd.copyto(texec$ref.aux.arrays[[k]], mx.cpu())
       })) / ndevice
@@ -95,13 +95,13 @@ mx.model.create.kvstore <- function(kvstore, arg.params, ndevice, verbose=TRUE)
   }
   if (ndevice == 1) return (NULL)
   if (kvstore == "local") {
-    max.size <- max(as.integer(lapply(arg.params, length)))
+    max.size <- max(lengths(arg.params))
     if (max.size < 1024 * 1024 * 16) {
       kvstore <- 'local_update_cpu'
     } else {
       kvstore <- 'local_allreduce_cpu'
     }
-    if(verbose) message(paste0("Auto-select kvstore type = ", kvstore))
+    if(verbose) message("Auto-select kvstore type = ", kvstore)
   }
   return(mx.kv.create(kvstore))
 }
@@ -114,7 +114,7 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
                            epoch.end.callback, batch.end.callback,
                            kvstore, fixed.param = NULL, verbose = TRUE) {
   ndevice <- length(ctx)
-  if(verbose) message(paste0("Start training with ", ndevice, " devices"))
+  if(verbose) message("Start training with ", ndevice, " devices")
   # create the executors
   input_slice <- mx.model.slice.shape(input.shape, ndevice)
   output_slice <- mx.model.slice.shape(output.shape, ndevice)
@@ -122,7 +122,7 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
   arg_names <- arguments(symbol)
   output.names <- names(output.shape)
   #label_name <- arg_names[endsWith(arg_names, "label")]
-  train.execs <- lapply(1:ndevice, function(i) {
+  train.execs <- lapply(seq_len(ndevice), function(i) {
     arg_lst <- list(symbol = symbol, ctx = ctx[[i]], grad.req = "write")
     arg_lst <- append(arg_lst, input_slice[[i]]$shape)
     arg_lst <- append(arg_lst, output_slice[[i]]$shape)
@@ -137,7 +137,7 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
   # KVStore related stuffs
   params.index <-
     as.integer(mx.util.filter.null(
-      lapply(1:length(train.execs[[1]]$ref.grad.arrays), function(k) {
+      lapply(seq_along(train.execs[[1]]$ref.grad.arrays), function(k) {
         if (!is.null(train.execs[[1]]$ref.grad.arrays[[k]])) k else NULL
       })))
   update.on.kvstore <- FALSE
@@ -145,7 +145,7 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
     update.on.kvstore <- TRUE
     kvstore$set.optimizer(optimizer)
   } else {
-    updaters <- lapply(1:ndevice, function(i) {
+    updaters <- lapply(seq_len(ndevice), function(i) {
       mx.opt.get.updater(optimizer, train.execs[[i]]$ref.arg.arrays)
     })
   }
@@ -162,13 +162,13 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
     while (train.data$iter.next()) {
       # Get input data slice
       dlist <- train.data$value()
-      slices <- lapply(1:ndevice, function(i) {
+      slices <- lapply(seq_len(ndevice), function(i) {
         s <- input_slice[[i]]
         ret <- sapply(names(dlist), function(n) {mx.nd.slice(dlist[[n]], s$begin, s$end)})
         return(ret)
       })
       # copy data to executor
-      for (i in 1:ndevice) {
+      for (i in seq_len(ndevice)) {
         s <- slices[[i]]
         if (endsWith(output.names, "label")) {
           names(s)[endsWith(names(s), "label")] = output.names 
@@ -205,16 +205,16 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
             texec$ref.grad.arrays[params.index]
           }), -params.index)
         }
-        arg.blocks <- lapply(1:ndevice, function(i) {
+        arg.blocks <- lapply(seq_len(ndevice), function(i) {
           updaters[[i]](train.execs[[i]]$ref.arg.arrays, train.execs[[i]]$ref.grad.arrays)
         })
-        for (i in 1:ndevice) {
+        for (i in seq_len(ndevice)) {
           mx.exec.update.arg.arrays(train.execs[[i]], arg.blocks[[i]], skip.null=TRUE)
         }
       }
       # Update the evaluation metrics
       if (!is.null(metric)) {
-        for (i in 1 : ndevice) {
+        for (i in seq_len(ndevice)) {
           train.metric <- metric$update(slices[[i]][[length(slices[[i]])]], out.preds[[i]], train.metric)
         }
       }
@@ -227,7 +227,7 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
     train.data$reset()
     if (!is.null(metric)) {
       result <- metric$get(train.metric)
-      if(verbose) message(paste0("[", iteration, "] Train-", result$name, "=", result$value))
+      if(verbose) message("[", iteration, "] Train-", result$name, "=", result$value)
     }
     if (!is.null(eval.data)) {
       if (!is.null(metric)) {
@@ -235,12 +235,12 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
       }
       while (eval.data$iter.next()) {
         dlist <- eval.data$value()
-        slices <- lapply(1:ndevice, function(i) {
+        slices <- lapply(seq_len(ndevice), function(i) {
           s <- input_slice[[i]]
           ret <- sapply(names(dlist), function(n) {mx.nd.slice(dlist[[n]], s$begin, s$end)})
           return(ret)
         })
-        for (i in 1:ndevice) {
+        for (i in seq_len(ndevice)) {
           s <- slices[[i]]
           if (endsWith(output.names, "label")) {
             names(s)[endsWith(names(s), "label")] = output.names 
@@ -254,7 +254,7 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
           mx.nd.copyto(texec$ref.outputs[[1]], mx.cpu())
         })
         if (!is.null(metric)) {
-          for (i in 1 : ndevice) {
+          for (i in seq_len(ndevice)) {
             eval.metric <- metric$update(slices[[i]][[length(slices[[i]])]] , out.preds[[i]], eval.metric)
           }
         }
@@ -262,7 +262,7 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
       eval.data$reset()
       if (!is.null(metric)) {
         result <- metric$get(eval.metric)
-        if(verbose) message(paste0("[", iteration, "] Validation-", result$name, "=", result$value))
+        if(verbose) message("[", iteration, "] Validation-", result$name, "=", result$value)
       }
     } else {
       eval.metric <- NULL
@@ -290,7 +290,7 @@ mx.model.train <- function(symbol, ctx, input.shape, output.shape,
 #' @param ctx mx.context. The devices used to perform initialization.
 #' @export
 mx.model.init.params <- function(symbol, input.shape, output.shape, initializer, ctx) {
-  if (!is.MXSymbol(symbol)) stop("symbol need to be MXSymbol")
+  if (!is.MXSymbol(symbol)) stop("symbol needs to be MXSymbol")
 
   arg_lst <- list(symbol = symbol)
   arg_lst <- append(arg_lst, input.shape)
@@ -310,7 +310,7 @@ mx.model.init.iter <- function(X, y, batch.size, is.train) {
     if (is.train) stop("Need to provide parameter y for training with R arrays.")
     shape <- dim(X)
     ndim <- length(shape)
-    y <- c(1:shape[[ndim]]) * 0
+    y <- rep.int(0, times = shape[[ndim]])
   }
   batch.size <- min(length(y), batch.size)
   return(mx.io.arrayiter(X, y, batch.size=batch.size, shuffle=is.train))
@@ -349,21 +349,16 @@ mx.model.select.layout.predict <- function(X, model) {
   ret <- mx.symbol.infer.shape(model$symbol, data=c(dimX[[2]], 1))
   if (!is.null(ret)) {
     names = names(model$arg.params)
-    for (i in 1:length(names)) {
-      if (any(ret$arg.shapes[[names[i]]] != dim(model$arg.params[[i]]))) {
-        rowmajor <- 0
-      }
-    }
+    if (any(vapply(seq_along(names),
+                   function(i) any(ret$arg.shapes[[names[i]]] != dim(model$arg.params[[i]])),
+                   logical(1)))) rowmajor <- 0
   }
   # try col major
   ret <- mx.symbol.infer.shape(model$symbol, data=c(dimX[[1]], 1))
   if (!is.null(ret)) {
-    names = names(model$arg.params)
-    for (i in 1:length(names)) {
-      if (any(ret$arg.shapes[[names[i]]] != dim(model$arg.params[[i]]))) {
-        colmajor <- 0
-      }
-    }
+    if (any(vapply(seq_along(names),
+                   function(i) any(ret$arg.shapes[[names[i]]] != dim(model$arg.params[[i]])),
+                   logical(1)))) colmajor <- 0
   }
   if (rowmajor + colmajor != 1) {
     stop("Cannot auto select array.layout, please specify this parameter")
@@ -589,27 +584,20 @@ predict.MXFeedForwardModel <- function(model, X, ctx = NULL, array.batch.size =
 mx.model.load <- function(prefix, iteration) {
   symbol <- mx.symbol.load(path.expand(paste0(prefix, "-symbol.json")))
   save.dict <- mx.nd.load(path.expand(sprintf("%s-%04d.params", prefix, iteration)))
-  names <- names(save.dict)
-  arg.index <- as.integer(mx.util.filter.null(lapply(1:length(names), function(i) {
-    if (startsWith(names[[i]], "arg:")) i else NULL
-  })))
-  aux.index <- as.integer(mx.util.filter.null(lapply(1:length(names), function(i) {
-    if (startsWith(names[[i]], "aux:")) i else NULL
-  })))
+  nms <- names(save.dict)
+  
+  arg.index <- startsWith(nms, "arg:")
+  aux.index <- startsWith(nms, "aux:")
 
-  if (length(arg.index) != 0) {
+  if (any(arg.index)) {
     arg.params <- save.dict[arg.index]
-    names(arg.params) <- as.character(lapply(names[arg.index], function(nm) {
-      substr(nm, 5, nchar(nm))
-    }))
+    names(arg.params) <- substr(nms[arg.index], 5, nchar(nms[arg.index]))
   } else {
     arg.params <- list()
   }
-  if (length(aux.index) != 0) {
+  if (any(aux.index)) {
     aux.params <- save.dict[aux.index]
-    names(aux.params) <- as.character(lapply(names[aux.index], function(nm) {
-      substr(nm, 5, nchar(nm))
-    }))
+    names(aux.params) <- substr(nms[aux.index], 5, nchar(nms[aux.index]))
   } else {
     aux.params <- list()
   }
diff --git a/R-package/R/model.rnn.R b/R-package/R/model.rnn.R
index 8f3ab8c..78a125e 100644
--- a/R-package/R/model.rnn.R
+++ b/R-package/R/model.rnn.R
@@ -7,18 +7,18 @@ mx.model.train.buckets <- function(symbol, ctx, train.data, eval.data,
   
   ndevice <- length(ctx)
   if (verbose) 
-    message(paste0("Start training with ", ndevice, " devices"))
+    message("Start training with ", ndevice, " devices")
   
   input.names <- names(dlist)
   arg.params.names <- names(arg.params)
   
   if (is.list(symbol)) sym_ini <- symbol[[names(train.data$bucketID)]] else sym_ini <- symbol
   
-  slices <- lapply(1:ndevice, function(i) {
-    sapply(names(dlist), function(n) mx.nd.split(data=dlist[[n]], num_outputs = ndevice, axis = 0, squeeze_axis = F))
+  slices <- lapply(seq_len(ndevice), function(i) {
+    sapply(names(dlist), function(n) mx.nd.split(data=dlist[[n]], num_outputs = ndevice, axis = 0, squeeze_axis = FALSE))
   })
   
-  train.execs <- lapply(1:ndevice, function(i) {
+  train.execs <- lapply(seq_len(ndevice), function(i) {
     s <- slices[[i]]
     mx.symbol.bind(symbol = sym_ini, arg.arrays = c(s, arg.params)[arg.update.idx], 
                            aux.arrays = aux.params, ctx = ctx[[i]], grad.req = grad.req)
@@ -27,7 +27,7 @@ mx.model.train.buckets <- function(symbol, ctx, train.data, eval.data,
   # KVStore related stuffs
   params.index <- as.integer(
     mx.util.filter.null(
-      lapply(1:length(train.execs[[1]]$ref.grad.arrays), function(k) {
+      lapply(seq_along(train.execs[[1]]$ref.grad.arrays), function(k) {
         if (!is.null(train.execs[[1]]$ref.grad.arrays[[k]])) k else NULL}
       )))
   
@@ -36,7 +36,7 @@ mx.model.train.buckets <- function(symbol, ctx, train.data, eval.data,
     update.on.kvstore <- TRUE
     kvstore$set.optimizer(optimizer)
   } else {
-    updaters <- lapply(1:ndevice, function(i) {
+    updaters <- lapply(seq_len(ndevice), function(i) {
       mx.opt.get.updater(optimizer, train.execs[[i]]$ref.arg.arrays)
     })
   }
@@ -58,20 +58,20 @@ mx.model.train.buckets <- function(symbol, ctx, train.data, eval.data,
       dlist <- train.data$value()[input.names]
       
       # Slice inputs for multi-devices
-      slices <- lapply(1:ndevice, function(i) {
+      slices <- lapply(seq_len(ndevice), function(i) {
         sapply(names(dlist), function(n) mx.nd.split(data=dlist[[n]], num_outputs = ndevice, axis = 0, squeeze_axis = F))
       })
       
       # Assign input to each executor - bug on inference if using BatchNorm
       if (is.list(symbol)) {
-        train.execs <- lapply(1:ndevice, function(i) {
+        train.execs <- lapply(seq_len(ndevice), function(i) {
           s <- slices[[i]]
           mx.symbol.bind(symbol = symbol[[names(train.data$bucketID)]], 
                                  arg.arrays = c(s, train.execs[[i]]$arg.arrays[arg.params.names])[arg.update.idx],
                                  aux.arrays = train.execs[[i]]$aux.arrays, ctx = ctx[[i]], grad.req = grad.req)
         })
       } else {
-        for (i in 1:ndevice) {
+        for (i in seq_len(ndevice)) {
           s <- slices[[i]]
           mx.exec.update.arg.arrays(train.execs[[i]], s, match.name=TRUE)
         }
@@ -107,17 +107,17 @@ mx.model.train.buckets <- function(symbol, ctx, train.data, eval.data,
             texec$ref.grad.arrays[params.index]
           }), -params.index)
         }
-        arg.blocks <- lapply(1:ndevice, function(i) {
+        arg.blocks <- lapply(seq_len(ndevice), function(i) {
           updaters[[i]](train.execs[[i]]$ref.arg.arrays, train.execs[[i]]$ref.grad.arrays)
         })
-        for (i in 1:ndevice) {
+        for (i in seq_len(ndevice)) {
           mx.exec.update.arg.arrays(train.execs[[i]], arg.blocks[[i]], skip.null = TRUE)
         }
       }
       
       # Update the evaluation metrics
       if (!is.null(metric)) {
-        for (i in 1:ndevice) {
+        for (i in seq_len(ndevice)) {
           train.metric <- metric$update(label = slices[[i]][[length(slices[[i]])]], 
                                         pred = out.preds[[i]], state = train.metric)
         }
@@ -133,7 +133,7 @@ mx.model.train.buckets <- function(symbol, ctx, train.data, eval.data,
     if (!is.null(metric)) {
       result <- metric$get(train.metric)
       if (verbose) 
-        message(paste0("[", iteration, "] Train-", result$name, "=", result$value))
+        message("[", iteration, "] Train-", result$name, "=", result$value)
     }
     
     if (!is.null(eval.data)) {
@@ -147,20 +147,20 @@ mx.model.train.buckets <- function(symbol, ctx, train.data, eval.data,
         dlist <- eval.data$value()[input.names]
         
         # Slice input to multiple devices
-        slices <- lapply(1:ndevice, function(i) {
-          sapply(names(dlist), function(n) mx.nd.split(data=dlist[[n]], num_outputs = ndevice, axis = 0, squeeze_axis = F))
+        slices <- lapply(seq_len(ndevice), function(i) {
+          sapply(names(dlist), function(n) mx.nd.split(data=dlist[[n]], num_outputs = ndevice, axis = 0, squeeze_axis = FALSE))
         })
         
         # Assign input to each executor - bug on inference if using BatchNorm
         if (is.list(symbol)) {
-          train.execs <- lapply(1:ndevice, function(i) {
+          train.execs <- lapply(seq_len(ndevice), function(i) {
             s <- slices[[i]]
             mx.symbol.bind(symbol = symbol[[names(eval.data$bucketID)]], 
                                    arg.arrays = c(s, train.execs[[i]]$arg.arrays[arg.params.names])[arg.update.idx],
                                    aux.arrays = train.execs[[i]]$aux.arrays, ctx = ctx[[i]], grad.req = grad.req)
           })
         } else {
-          for (i in 1:ndevice) {
+          for (i in seq_len(ndevice)) {
             s <- slices[[i]]
             mx.exec.update.arg.arrays(train.execs[[i]], s, match.name=TRUE)
           }
@@ -176,7 +176,7 @@ mx.model.train.buckets <- function(symbol, ctx, train.data, eval.data,
         })
         
         if (!is.null(metric)) {
-          for (i in 1:ndevice) {
+          for (i in seq_len(ndevice)) {
             eval.metric <- metric$update(slices[[i]][[length(slices[[i]])]], 
                                          out.preds[[i]], eval.metric)
           }
@@ -186,8 +186,8 @@ mx.model.train.buckets <- function(symbol, ctx, train.data, eval.data,
       if (!is.null(metric)) {
         result <- metric$get(eval.metric)
         if (verbose) {
-          message(paste0("[", iteration, "] Validation-", result$name, "=", 
-                         result$value))
+          message("[", iteration, "] Validation-", result$name, "=", 
+                         result$value)
         }
       }
     } else {
@@ -266,7 +266,7 @@ mx.model.buckets <- function(symbol, train.data, eval.data = NULL, metric = NULL
     optimizer <- mx.opt.create(optimizer, rescale.grad = (1/batchsize), ...)
   }
   
-  if (is.list(symbol)) sym_ini <- symbol[[names(train.data$bucketID)]] else sym_ini <- symbol
+  sym_ini <- if (is.list(symbol)) symbol[[names(train.data$bucketID)]] else symbol
   
   arguments <- sym_ini$arguments
   input.names <- intersect(names(train.data$value()), arguments)
diff --git a/R-package/R/mx.io.bucket.iter.R b/R-package/R/mx.io.bucket.iter.R
index 8e5ab59..22ac1fa 100644
--- a/R-package/R/mx.io.bucket.iter.R
+++ b/R-package/R/mx.io.bucket.iter.R
@@ -20,7 +20,7 @@ BucketIter <- setRefClass("BucketIter", fields = c("buckets", "bucket.names", "b
                               .self
                             }, reset = function() {
                               buckets_nb <- length(bucket.names)
-                              buckets_id <- 1:buckets_nb
+                              buckets_id <- seq_len(buckets_nb)
                               buckets.size <- sapply(.self$buckets, function(x) {
                                 dim(x$data)[length(dim(x$data)) - 1]
                               })
@@ -36,7 +36,7 @@ BucketIter <- setRefClass("BucketIter", fields = c("buckets", "bucket.names", "b
                               
                               if (.self$shuffle) {
                                 set.seed(.self$seed)
-                                bucket_plan_names <- sample(rep(names(.self$batch.per.bucket), times = .self$batch.per.bucket))
+                                bucket_plan_names <- sample(rep.int(names(.self$batch.per.bucket), times = .self$batch.per.bucket))
                                 .self$bucket.plan <- ave(bucket_plan_names == bucket_plan_names, bucket_plan_names, 
                                                          FUN = cumsum)
                                 names(.self$bucket.plan) <- bucket_plan_names
@@ -44,7 +44,7 @@ BucketIter <- setRefClass("BucketIter", fields = c("buckets", "bucket.names", "b
                                 .self$bucketID <- .self$bucket.plan[1]
                                 
                                 .self$buckets <- lapply(.self$buckets, function(x) {
-                                  shuffle_id <- sample(dim(x$data)[length(dim(x$data)) - 1])
+                                  shuffle_id <- sample.int(dim(x$data)[length(dim(x$data)) - 1])
                                   if (length(dim(x$label)) == 0) {
                                     list(data = x$data[shuffle_id, ], label = x$label[shuffle_id])
                                   } else {
@@ -52,7 +52,7 @@ BucketIter <- setRefClass("BucketIter", fields = c("buckets", "bucket.names", "b
                                   }
                                 })
                               } else {
-                                bucket_plan_names <- rep(names(.self$batch.per.bucket), times = .self$batch.per.bucket)
+                                bucket_plan_names <- rep.int(names(.self$batch.per.bucket), times = .self$batch.per.bucket)
                                 .self$bucket.plan <- ave(bucket_plan_names == bucket_plan_names, bucket_plan_names, 
                                                          FUN = cumsum)
                                 names(.self$bucket.plan) <- bucket_plan_names
@@ -60,29 +60,25 @@ BucketIter <- setRefClass("BucketIter", fields = c("buckets", "bucket.names", "b
                             }, iter.next = function() {
                               .self$batch <- .self$batch + 1
                               .self$bucketID <- .self$bucket.plan[batch]
-                              if (.self$batch > .self$batch.per.epoch) {
-                                return(FALSE)
-                              } else {
-                                return(TRUE)
-                              }
+                              return(.self$batch < .self$batch.per.epoch)
                             }, value = function() {
                               # bucketID is a named integer: the integer indicates the batch id for the given
                               # bucket (used to fetch appropriate samples within the bucket) the name is the a
                               # character containing the sequence length of the bucket (used to unroll the rnn
                               # to appropriate sequence length)
-                              idx <- (.self$bucketID - 1) * (.self$batch.size) + (1:batch.size)
+                              idx <- (.self$bucketID - 1) * (.self$batch.size) + seq_len(batch.size)
                               
                               ### reuse first idx for padding
                               if (bucketID == .self$batch.per.bucket[names(.self$bucketID)] & !.self$last.batch.pad[names(.self$bucketID)] == 0) {
-                                idx <- c(idx[1:(.self$batch.size - .self$last.batch.pad[names(.self$bucketID)])], 1:(.self$last.batch.pad[names(.self$bucketID)]))
+                                idx <- c(idx[seq_len(.self$batch.size - .self$last.batch.pad[names(.self$bucketID)])], seq_len(.self$last.batch.pad[names(.self$bucketID)]))
                               }
                               
-                              data <- .self$buckets[[names(.self$bucketID)]]$data[idx, , drop = F]
+                              data <- .self$buckets[[names(.self$bucketID)]]$data[idx, , drop = FALSE]
                               seq.mask <- as.integer(names(bucketID)) - apply(data==.self$data.mask.element, 1, sum)
                               if (length(dim(.self$buckets[[names(.self$bucketID)]]$label)) == 0) {
                                 label <- .self$buckets[[names(.self$bucketID)]]$label[idx]
                               } else {
-                                label <- .self$buckets[[names(.self$bucketID)]]$label[idx, , drop = F]
+                                label <- .self$buckets[[names(.self$bucketID)]]$label[idx, , drop = FALSE]
                               }
                               return(list(data = mx.nd.array(data), seq.mask = mx.nd.array(seq.mask), 
                                           label = mx.nd.array(label)))
diff --git a/R-package/R/optimizer.R b/R-package/R/optimizer.R
index 52fc1f2..253f031 100644
--- a/R-package/R/optimizer.R
+++ b/R-package/R/optimizer.R
@@ -396,22 +396,13 @@ mx.opt.adadelta <- function(rho=0.90,
 #'
 #' @export
 mx.opt.create <- function(name, ...) {
-  if (name == "sgd") {
-    return(mx.opt.sgd(...))
-  }
-  else if (name == "rmsprop") {
-    return (mx.opt.rmsprop(...))
-  }
-  else if (name == "adam") {
-    return (mx.opt.adam(...))
-  }
-  else if (name == "adagrad") {
-    return (mx.opt.adagrad(...))
-  }
-  else if (name == "adadelta") {
-    return (mx.opt.adadelta(...))
-  }
-  stop(paste("Unknown optimizer ", name))
+  switch(name,
+         "sgd" = mx.opt.sgd(...),
+         "rmsprop" = mx.opt.rmsprop(...),
+         "adam" = mx.opt.adam(...),
+         "adagrad" = mx.opt.adagrad(...),
+         "adadelta" = mx.opt.adadelta(...),
+         stop("Unknown optimizer ", name))
 }
 
 #' Get an updater closure that can take list of weight and gradient
@@ -422,16 +413,15 @@ mx.opt.create <- function(name, ...) {
 #'
 #' @export
 mx.opt.get.updater <- function(optimizer, weights) {
-  n <- length(weights)
   # This is the list to keep track of internal states of optimzer
-  state.list <- lapply(1:n, function(i) {
+  state.list <- lapply(seq_along(weights), function(i) {
     if (is.null(weights[[i]])) return(NULL)
     optimizer$create.state(i, weights[[i]])
   })
   update <- optimizer$update
 
   update.closure <- function(weight, grad) {
-    ulist <- lapply(1:n, function(i) {
+    ulist <- lapply(seq_along(weights), function(i) {
       if (!is.null(grad[[i]])) {
         update(i, weight[[i]], grad[[i]], state.list[[i]])
       } else {
diff --git a/R-package/R/rnn.graph.R b/R-package/R/rnn.graph.R
index 2c099f0..5197882 100644
--- a/R-package/R/rnn.graph.R
+++ b/R-package/R/rnn.graph.R
@@ -21,8 +21,8 @@ rnn.graph <- function(num.rnn.layer,
                       loss_output = NULL, 
                       config,
                       cell.type,
-                      masking = F,
-                      output_last_state = F) {
+                      masking = FALSE,
+                      output_last_state = FALSE) {
   
   # define input arguments
   data <- mx.symbol.Variable("data")
@@ -48,17 +48,17 @@ rnn.graph <- function(num.rnn.layer,
   
   # RNN cells
   if (cell.type == "lstm") {
-    rnn <- mx.symbol.RNN(data=data, state=rnn.state, state_cell = rnn.state.cell, parameters=rnn.params.weight, state.size=num.hidden, num.layers=num.rnn.layer, bidirectional=F, mode=cell.type, state.outputs=output_last_state, p=dropout, name=paste(cell.type, num.rnn.layer, "layer", sep="_"))
+    rnn <- mx.symbol.RNN(data=data, state=rnn.state, state_cell = rnn.state.cell, parameters=rnn.params.weight, state.size=num.hidden, num.layers=num.rnn.layer, bidirectional=FALSE, mode=cell.type, state.outputs=output_last_state, p=dropout, name=paste(cell.type, num.rnn.layer, "layer", sep="_"))
     
   } else {
-    rnn <- mx.symbol.RNN(data=data, state=rnn.state, parameters=rnn.params.weight, state.size=num.hidden, num.layers=num.rnn.layer, bidirectional=F, mode=cell.type, state.outputs=output_last_state, p=dropout, name=paste(cell.type, num.rnn.layer, "layer", sep="_"))
+    rnn <- mx.symbol.RNN(data=data, state=rnn.state, parameters=rnn.params.weight, state.size=num.hidden, num.layers=num.rnn.layer, bidirectional=FALSE, mode=cell.type, state.outputs=output_last_state, p=dropout, name=paste(cell.type, num.rnn.layer, "layer", sep="_"))
   }
   
   # Decode
   if (config=="seq-to-one") {
     
-    if (masking) mask <- mx.symbol.SequenceLast(data=rnn[[1]], use.sequence.length = T, sequence_length = seq.mask, name = "mask") else
-      mask <- mx.symbol.SequenceLast(data=rnn[[1]], use.sequence.length = F, name = "mask")
+    if (masking) mask <- mx.symbol.SequenceLast(data=rnn[[1]], use.sequence.length = TRUE, sequence_length = seq.mask, name = "mask") else
+      mask <- mx.symbol.SequenceLast(data=rnn[[1]], use.sequence.length = FALSE, name = "mask")
     
     decode <- mx.symbol.FullyConnected(data=mask,
                                        weight=cls.weight,
@@ -77,7 +77,7 @@ rnn.graph <- function(num.rnn.layer,
     
   } else if (config=="one-to-one"){
     
-    if (masking) mask <- mx.symbol.SequenceMask(data = rnn[[1]], use.sequence.length = T, sequence_length = seq.mask, value = 0, name = "mask") else
+    if (masking) mask <- mx.symbol.SequenceMask(data = rnn[[1]], use.sequence.length = TRUE, sequence_length = seq.mask, value = 0, name = "mask") else
       mask <- mx.symbol.identity(data = rnn[[1]], name = "mask")
     
     mask = mx.symbol.reshape(mask, shape=c(num.hidden, -1))
@@ -120,7 +120,7 @@ lstm.cell <- function(num.hidden, indata, prev.state, param, seqidx, layeridx, d
     gates <- i2h
   }
   
-  split.gates <- mx.symbol.split(gates, num.outputs = 4, axis = 1, squeeze.axis = F, 
+  split.gates <- mx.symbol.split(gates, num.outputs = 4, axis = 1, squeeze.axis = FALSE, 
                                  name = paste0("t", seqidx, ".l", layeridx, ".slice"))
   
   in.gate <- mx.symbol.Activation(split.gates[[1]], act.type = "sigmoid")
@@ -157,7 +157,7 @@ gru.cell <- function(num.hidden, indata, prev.state, param, seqidx, layeridx, dr
     gates <- i2h
   }
   
-  split.gates <- mx.symbol.split(gates, num.outputs = 2, axis = 1, squeeze.axis = F, 
+  split.gates <- mx.symbol.split(gates, num.outputs = 2, axis = 1, squeeze.axis = FALSE, 
                                  name = paste0("t", seqidx, ".l", layeridx, ".split"))
   
   update.gate <- mx.symbol.Activation(split.gates[[1]], act.type = "sigmoid")
@@ -166,15 +166,11 @@ gru.cell <- function(num.hidden, indata, prev.state, param, seqidx, layeridx, dr
   htrans.i2h <- mx.symbol.FullyConnected(data = indata, weight = param$trans.i2h.weight, 
                                          bias = param$trans.i2h.bias, num.hidden = num.hidden, 
                                          name = paste0("t", seqidx, ".l", layeridx, ".trans.i2h"))
-  
-  if (is.null(prev.state)) {
-    h.after.reset <- reset.gate * 0
-  } else {
-    h.after.reset <- prev.state$h * reset.gate
-  }
-  
-  htrans.h2h <- mx.symbol.FullyConnected(data = h.after.reset, weight = param$trans.h2h.weight, 
-                                         bias = param$trans.h2h.bias, num.hidden = num.hidden, 
+
+  h.after.reset <- reset.gate * (if (is.null(prev.state)) 0 else prev.state$h)
+
+  htrans.h2h <- mx.symbol.FullyConnected(data = h.after.reset, weight = param$trans.h2h.weight,
+                                         bias = param$trans.h2h.bias, num.hidden = num.hidden,
                                          name = paste0("t", seqidx, ".l", layeridx, ".trans.h2h"))
   
   h.trans <- htrans.i2h + htrans.h2h
@@ -205,8 +201,8 @@ rnn.graph.unroll <- function(num.rnn.layer,
                              init.state = NULL,
                              config,
                              cell.type = "lstm", 
-                             masking = F, 
-                             output_last_state = F) {
+                             masking = FALSE, 
+                             output_last_state = FALSE) {
   
   
   if (!is.null(num.embed)) embed.weight <- mx.symbol.Variable("embed.weight")
@@ -214,7 +210,7 @@ rnn.graph.unroll <- function(num.rnn.layer,
   cls.weight <- mx.symbol.Variable("cls.weight")
   cls.bias <- mx.symbol.Variable("cls.bias")
   
-  param.cells <- lapply(1:num.rnn.layer, function(i) {
+  param.cells <- lapply(seq_len(num.rnn.layer), function(i) {
     
     if (cell.type=="lstm"){
       cell <- list(i2h.weight = mx.symbol.Variable(paste0("l", i, ".i2h.weight")),
@@ -244,15 +240,15 @@ rnn.graph.unroll <- function(num.rnn.layer,
                                 weight=embed.weight, output_dim = num.embed, name = "embed")
   }
   
-  data <- mx.symbol.split(data = data, axis = 0, num.outputs = seq.len, squeeze_axis = T)
+  data <- mx.symbol.split(data = data, axis = 0, num.outputs = seq.len, squeeze_axis = TRUE)
   
   last.hidden <- list()
   last.states <- list()
   
-  for (seqidx in 1:seq.len) {
+  for (seqidx in seq_len(seq.len)) {
     hidden <- data[[seqidx]]
     
-    for (i in 1:num.rnn.layer) {
+    for (i in seq_len(num.rnn.layer)) {
       
       if (seqidx==1) prev.state<- init.state[[i]] else prev.state <- last.states[[i]]
       
diff --git a/R-package/R/rnn.infer.R b/R-package/R/rnn.infer.R
index c9ccecb..a22bae0 100644
--- a/R-package/R/rnn.infer.R
+++ b/R-package/R/rnn.infer.R
@@ -52,7 +52,7 @@ mx.infer.buckets <- function(infer.data, model, ctx = mx.cpu()) {
   arg.params.fix <- arguments.ini[arg.params.fix.names]
   
   # Grad request
-  grad.req <- rep("null", length(arguments))
+  grad.req <- rep.int("null", length(arguments))
   
   # Arg array order
   update_names <- c(input.names, arg.params.fix.names, arg.params.names)
@@ -138,7 +138,7 @@ mx.infer.buckets.one <- function(infer.data,
   aux.params <- aux.params
   
   # Grad request
-  grad.req <- rep("null", length(arguments))
+  grad.req <- rep.int("null", length(arguments))
   
   # Arg array order
   update_names <- c(input.names, arg.params.fix.names, arg.params.names)
diff --git a/R-package/R/util.R b/R-package/R/util.R
index acc9510..8eddb5d 100644
--- a/R-package/R/util.R
+++ b/R-package/R/util.R
@@ -1,6 +1,6 @@
 # filter out null, keep the names
 mx.util.filter.null <- function(lst) {
-  lst[!sapply(lst, is.null)]
+  Filter(Negate(is.null), lst)
 }
 
 #' Internal function to generate mxnet_generated.R
diff --git a/R-package/R/viz.graph.R b/R-package/R/viz.graph.R
index 6d13de0..49f978a 100644
--- a/R-package/R/viz.graph.R
+++ b/R-package/R/viz.graph.R
@@ -65,12 +65,14 @@ graph.viz <- function(symbol, shape=NULL, direction="TD", type="graph", graph.wi
   
   model_list<- fromJSON(symbol$as.json())
   model_nodes<- model_list$nodes
-  model_nodes$id<- 1:nrow(model_nodes)-1
+  model_nodes$id<- seq_len(nrow(model_nodes))-1
   model_nodes$level<- model_nodes$ID
   
   # extract IDs from string list
-  tuple_str <- function(str) sapply(str_extract_all(str, "\\d+"), function(x) paste0(x, collapse="X"))
-  
+  tuple_str <- function(str) vapply(str_extract_all(str, "\\d+"),
+                                    function(x) paste0(x, collapse="X"),
+                                    character(1))
+
   ### substitute op for heads
   op_id<- sort(unique(model_list$heads[1,]+1))
   op_null<- which(model_nodes$op=="null")
@@ -104,23 +106,23 @@ graph.viz <- function(symbol, shape=NULL, direction="TD", type="graph", graph.wi
   
   ### remapping for DiagrammeR convention
   nodes_df$id<- nodes_df$id
-  nodes_df$id_graph<- 1:nrow(nodes_df)
+  nodes_df$id_graph<- seq_len(nrow(nodes_df))
   id_dic<- nodes_df$id_graph
   names(id_dic)<- as.character(nodes_df$id)
   
-  edges_id<- model_nodes$id[!sapply(model_nodes$inputs, length)==0 & !model_nodes$op=="null"]
+  edges_id<- model_nodes$id[lengths(model_nodes$inputs)!=0 & model_nodes$op!="null"]
   edges_id<- id_dic[as.character(edges_id)]
-  edges<- model_nodes$inputs[!sapply(model_nodes$inputs, length)==0 & !model_nodes$op=="null"]
-  edges<- sapply(edges, function(x)intersect(as.numeric(x[, 1]), id.to.keep), simplify = F)
+  edges<- model_nodes$inputs[lengths(model_nodes$inputs)!=0 & model_nodes$op!="null"]
+  edges<- sapply(edges, function(x)intersect(as.numeric(x[, 1]), id.to.keep), simplify = FALSE)
   names(edges)<- edges_id
   
   edges_df<- data.frame(
     from=unlist(edges),
-    to=rep(names(edges), time=sapply(edges, length)),
+    to=rep(names(edges), time=lengths(edges)),
     arrows = "to",
     color="black",
     from_name_output=paste0(model_nodes$name[unlist(edges)+1], "_output"), 
-    stringsAsFactors=F)
+    stringsAsFactors=FALSE)
   edges_df$from<- id_dic[as.character(edges_df$from)]
   
   nodes_df_new<- create_node_df(n = nrow(nodes_df), label=nodes_df$label, shape=nodes_df$shape, type="base", penwidth=2, color=nodes_df$color, style="filled", 
@@ -133,14 +135,14 @@ graph.viz <- function(symbol, shape=NULL, direction="TD", type="graph", graph.wi
     } else edges_labels_raw<- symbol$get.internals()$infer.shape(list(data=shape))$out.shapes
     if (!is.null(edges_labels_raw)){
       edge_label_str <- function(x) paste0(x, collapse="X")
-      edges_labels_raw<- sapply(edges_labels_raw, edge_label_str)
+      edges_labels_raw<- vapply(edges_labels_raw, edge_label_str, character(1))
       names(edges_labels_raw)[names(edges_labels_raw)=="data"]<- "data_output"
       edge_df_new$label<- edges_labels_raw[edges_df$from_name_output]
       edge_df_new$rel<- edge_df_new$label
     }
   }
   
-  graph<- create_graph(nodes_df = nodes_df_new, edges_df = edge_df_new, directed = T) %>% 
+  graph<- create_graph(nodes_df = nodes_df_new, edges_df = edge_df_new, directed = TRUE) %>% 
     set_global_graph_attrs("layout", value = "dot", attr_type = "graph") %>% 
     add_global_graph_attrs("rankdir", value = direction, attr_type = "graph")
   

-- 
To stop receiving notification emails like this one, please contact
['"commits@mxnet.apache.org" <co...@mxnet.apache.org>'].