You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by ha...@apache.org on 2019/02/15 00:08:55 UTC
[incubator-mxnet] branch master updated: Fix quote on LBSGD docs
(#13975)
This is an automated email from the ASF dual-hosted git repository.
haibin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new 0e08891 Fix quote on LBSGD docs (#13975)
0e08891 is described below
commit 0e08891f855af524926070a54e362b4cdfced714
Author: Thomas Delteil <th...@gmail.com>
AuthorDate: Thu Feb 14 16:08:37 2019 -0800
Fix quote on LBSGD docs (#13975)
* change docs lbsgd
* fix more
---
python/mxnet/optimizer/optimizer.py | 44 +++++++++++++++++--------------------
1 file changed, 20 insertions(+), 24 deletions(-)
diff --git a/python/mxnet/optimizer/optimizer.py b/python/mxnet/optimizer/optimizer.py
index cb52ac5..a986f27 100644
--- a/python/mxnet/optimizer/optimizer.py
+++ b/python/mxnet/optimizer/optimizer.py
@@ -75,12 +75,11 @@ class Optimizer(object):
The initial number of updates.
multi_precision : bool, optional, default False
- Flag to control the internal precision of the optimizer.::
-
- False: results in using the same precision as the weights (default),
- True: makes internal 32-bit copy of the weights and applies gradients
- in 32-bit precision even if actual weights used in the model have lower precision.
- Turning this on can improve convergence and accuracy when training with float16.
+ Flag to control the internal precision of the optimizer.
+ False: results in using the same precision as the weights (default),
+ True: makes internal 32-bit copy of the weights and applies gradients
+ in 32-bit precision even if actual weights used in the model have lower precision.
+ Turning this on can improve convergence and accuracy when training with float16.
param_dict : dict of int -> gluon.Parameter, default None
Dictionary of parameter index to gluon.Parameter, used to lookup parameter attributes
@@ -541,12 +540,11 @@ class SGD(Optimizer):
Default is True. If True, lazy updates are applied \
if the storage types of weight and grad are both ``row_sparse``.
multi_precision: bool, optional
- Flag to control the internal precision of the optimizer.::
-
- False: results in using the same precision as the weights (default),
- True: makes internal 32-bit copy of the weights and applies gradients
- in 32-bit precision even if actual weights used in the model have lower precision.
- Turning this on can improve convergence and accuracy when training with float16.
+ Flag to control the internal precision of the optimizer.
+ False: results in using the same precision as the weights (default),
+ True: makes internal 32-bit copy of the weights and applies gradients
+ in 32-bit precision even if actual weights used in the model have lower precision.
+ Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, lazy_update=True, **kwargs):
super(SGD, self).__init__(**kwargs)
@@ -790,12 +788,11 @@ class LBSGD(Optimizer):
momentum : float, optional
The momentum value.
multi_precision: bool, optional
- Flag to control the internal precision of the optimizer.::
-
- False: results in using the same precision as the weights (default),
- True: makes internal 32-bit copy of the weights and applies gradients
- in 32-bit precision even if actual weights used in the model have lower precision.
- Turning this on can improve convergence and accuracy when training with float16.
+ Flag to control the internal precision of the optimizer.
+ False: results in using the same precision as the weights (default),
+ True: makes internal 32-bit copy of the weights and applies gradients
+ in 32-bit precision even if actual weights used in the model have lower precision.
+ Turning this on can improve convergence and accuracy when training with float16.
warmup_strategy: string ('linear', 'power2', 'sqrt'. , 'lars' default : 'linear')
warmup_epochs: unsigned, default: 5
@@ -1031,12 +1028,11 @@ class NAG(Optimizer):
momentum : float, optional
The momentum value.
multi_precision: bool, optional
- Flag to control the internal precision of the optimizer.::
-
- False: results in using the same precision as the weights (default),
- True: makes internal 32-bit copy of the weights and applies gradients
- in 32-bit precision even if actual weights used in the model have lower precision.
- Turning this on can improve convergence and accuracy when training with float16.
+ Flag to control the internal precision of the optimizer.
+ False: results in using the same precision as the weights (default),
+ True: makes internal 32-bit copy of the weights and applies gradients
+ in 32-bit precision even if actual weights used in the model have lower precision.
+ Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, **kwargs):
super(NAG, self).__init__(**kwargs)