You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/06/27 22:30:22 UTC

[GitHub] jeremiedb commented on a change in pull request #11374: [MXNET-563] Refactor R optimizers to fix memory leak

jeremiedb commented on a change in pull request #11374: [MXNET-563] Refactor R optimizers to fix memory leak
URL: https://github.com/apache/incubator-mxnet/pull/11374#discussion_r198660820
 
 

 ##########
 File path: R-package/R/optimizer.R
 ##########
 @@ -40,105 +78,116 @@ mx.opt.sgd <- function(learning.rate,
         sgd$num_update <- max(sgd$num_update, sgd[[indexKey]])
       }
     }
-    grad <- grad * rescale.grad
-    if (!is.null(clip_gradient)){
-      if(clip_gradient >= 0){
-        grad <- mx.nd.clip(grad, -clip_gradient, clip_gradient)
-      } else {
-        stop("Error: clip_gradient should be positive number.")
-      }
-    }
-    if (is.null(state)) {
-      weight <- weight - lr * (grad + wd * weight)
-    } else {
-      mom <- state
-      mom <- mom * momentum
-      mom <- mom - lr * (grad + wd * weight)
-      weight <- weight + mom
-      state <- mom
-    }
-    return(list(weight=weight, state=state))
+    
+    mx.exec.update.arg.arrays(exec_w, arg.arrays = list(weight = weight,grad = grad), match.name = T)
+    mx.exec.forward(exec_w, is.train = F)
+    return(exec_w$ref.outputs$w_output)
   }
-  return(list(create.state=create.state, update=update))
+  return(list(create_exec = create_exec, update = update))
 }
 
 #' Create an RMSProp optimizer with respective parameters.
 #' Reference: Tieleman T, Hinton G. Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude[J]. COURSERA: Neural Networks for Machine Learning, 2012, 4(2).
 #' The code follows: http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013.
-#' 
-#' @param learning.rate float, default=0.002
-#'      Step size.
-#' @param gamma1 float, default=0.95
+#'
+#' @param learning.rate float, default=1e-3
 
 Review comment:
   I wanted to align with the Python's package default. I'll revert to existing default if you see if you see more harms from this change. 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services