You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/11/12 07:43:22 UTC

[GitHub] nswamy closed pull request #13170: Fix Sphinx doc errors

nswamy closed pull request #13170: Fix Sphinx doc errors
URL: https://github.com/apache/incubator-mxnet/pull/13170
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/docs/api/python/image/image.md b/docs/api/python/image/image.md
index 11fff4f4340..00c4a713bb5 100644
--- a/docs/api/python/image/image.md
+++ b/docs/api/python/image/image.md
@@ -56,7 +56,7 @@ Iterators support loading image from binary `Record IO` and raw image files.
 
 We use helper function to initialize augmenters
 ```eval_rst
-    .. currentmodule:: mxnet
+.. currentmodule:: mxnet
 .. autosummary::
     :nosignatures:
 
diff --git a/docs/api/python/module/module.md b/docs/api/python/module/module.md
index 5a874ac6df0..7a86eccd8d3 100644
--- a/docs/api/python/module/module.md
+++ b/docs/api/python/module/module.md
@@ -176,7 +176,7 @@ additional functionality. We summarize them in this section.
 .. autosummary::
     :nosignatures:
 
-    BucketModule.switch_bucket
+    BucketingModule.switch_bucket
 ```
 
 ### Class `SequentialModule`
diff --git a/docs/api/python/symbol/symbol.md b/docs/api/python/symbol/symbol.md
index 7c78cbd59b0..583f174cea2 100644
--- a/docs/api/python/symbol/symbol.md
+++ b/docs/api/python/symbol/symbol.md
@@ -297,8 +297,8 @@ Composite multiple symbols into a new one by an operator.
     Symbol.take
     Symbol.one_hot
     Symbol.pick
-    Symbol.ravel_multi_index
-    Symbol.unravel_index
+    ravel_multi_index
+    unravel_index
 ```
 
 ### Get internal and output symbol
@@ -577,7 +577,7 @@ Composite multiple symbols into a new one by an operator.
     broadcast_logical_and
     broadcast_logical_or
     broadcast_logical_xor
-    broadcast_logical_not
+    logical_not
 ```
 
 ### Random sampling
diff --git a/python/mxnet/contrib/svrg_optimization/svrg_module.py b/python/mxnet/contrib/svrg_optimization/svrg_module.py
index 5d6b5dd5720..47d0e57b45e 100644
--- a/python/mxnet/contrib/svrg_optimization/svrg_module.py
+++ b/python/mxnet/contrib/svrg_optimization/svrg_module.py
@@ -401,6 +401,7 @@ def fit(self, train_data, eval_data=None, eval_metric='acc',
             force_rebind=False, force_init=False, begin_epoch=0, num_epoch=None,
             validation_metric=None, monitor=None, sparse_row_id_fn=None):
         """Trains the module parameters.
+
         Parameters
         ----------
         train_data : DataIter
diff --git a/python/mxnet/rnn/rnn.py b/python/mxnet/rnn/rnn.py
index 47307c55b04..0255c55dbef 100644
--- a/python/mxnet/rnn/rnn.py
+++ b/python/mxnet/rnn/rnn.py
@@ -35,7 +35,7 @@ def save_rnn_checkpoint(cells, prefix, epoch, symbol, arg_params, aux_params):
 
     Parameters
     ----------
-    cells : RNNCell or list of RNNCells
+    cells : mxnet.rnn.RNNCell or list of RNNCells
         The RNN cells used by this symbol.
     prefix : str
         Prefix of model name.
@@ -65,7 +65,7 @@ def load_rnn_checkpoint(cells, prefix, epoch):
 
     Parameters
     ----------
-    cells : RNNCell or list of RNNCells
+    cells : mxnet.rnn.RNNCell or list of RNNCells
         The RNN cells used by this symbol.
     prefix : str
         Prefix of model name.
@@ -100,7 +100,7 @@ def do_rnn_checkpoint(cells, prefix, period=1):
 
     Parameters
     ----------
-    cells : RNNCell or list of RNNCells
+    cells : mxnet.rnn.RNNCell or list of RNNCells
         The RNN cells used by this symbol.
     prefix : str
         The file prefix to checkpoint to
diff --git a/python/mxnet/rnn/rnn_cell.py b/python/mxnet/rnn/rnn_cell.py
index 3301102ba90..37200da2a91 100644
--- a/python/mxnet/rnn/rnn_cell.py
+++ b/python/mxnet/rnn/rnn_cell.py
@@ -716,7 +716,7 @@ def unfuse(self):
 
         Returns
         -------
-        cell : SequentialRNNCell
+        cell : mxnet.rnn.SequentialRNNCell
             unfused cell that can be used for stepping, and can run on CPU.
         """
         stack = SequentialRNNCell()
diff --git a/python/mxnet/symbol/symbol.py b/python/mxnet/symbol/symbol.py
index c6575072cc7..530d72796c0 100644
--- a/python/mxnet/symbol/symbol.py
+++ b/python/mxnet/symbol/symbol.py
@@ -1347,7 +1347,7 @@ def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None,
         shared_buffer : Dict of string to `NDArray`
             The dict mapping argument names to the `NDArray` that can be reused for initializing
             the current executor. This buffer will be checked for reuse if one argument name
-            of the current executor is not found in `shared_arg_names`. The `NDArray`s are
+            of the current executor is not found in `shared_arg_names`. The `NDArray` s are
             expected have default storage type.
 
         kwargs : Dict of str->shape
diff --git a/python/mxnet/symbol_doc.py b/python/mxnet/symbol_doc.py
index 3cb1997584d..e59437a3ccb 100644
--- a/python/mxnet/symbol_doc.py
+++ b/python/mxnet/symbol_doc.py
@@ -44,9 +44,6 @@
 - *Examples*: simple and short code snippet showing how to use this operator.
   It should show typical calling examples and behaviors (e.g. maps an input
   of what shape to an output of what shape).
-- *Regression Test*: longer test code for the operators. We normally do not
-  expect the users to read those, but they will be executed by `doctest` to
-  ensure the behavior of each operator does not change unintentionally.
 """
 from __future__ import absolute_import as _abs
 import re as _re
@@ -75,8 +72,6 @@ class ActivationDoc(SymbolDoc):
     >>> mlp
     <Symbol mlp>
 
-    Regression Test
-    ---------------
     ReLU activation
 
     >>> test_suites = [
@@ -107,8 +102,6 @@ class DropoutDoc(SymbolDoc):
     >>> data = Variable('data')
     >>> data_dp = Dropout(data=data, p=0.2)
 
-    Regression Test
-    ---------------
     >>> shape = (100, 100)  # take larger shapes to be more statistical stable
     >>> x = np.ones(shape)
     >>> op = Dropout(p=0.5, name='dp')
@@ -141,8 +134,6 @@ class EmbeddingDoc(SymbolDoc):
     >>> SymbolDoc.get_output_shape(op, letters=(seq_len, batch_size))
     {'embed_output': (10L, 64L, 16L)}
 
-    Regression Test
-    ---------------
     >>> vocab_size, embed_dim = (26, 16)
     >>> batch_size = 12
     >>> word_vecs = test_utils.random_arrays((vocab_size, embed_dim))
@@ -167,8 +158,6 @@ class FlattenDoc(SymbolDoc):
     >>> SymbolDoc.get_output_shape(flatten, data=(2, 3, 4, 5))
     {'flat_output': (2L, 60L)}
 
-    Regression Test
-    ---------------
     >>> test_dims = [(2, 3, 4, 5), (2, 3), (2,)]
     >>> op = Flatten(name='flat')
     >>> for dims in test_dims:
@@ -208,8 +197,6 @@ class FullyConnectedDoc(SymbolDoc):
     >>> net
     <Symbol pred>
 
-    Regression Test
-    ---------------
     >>> dim_in, dim_out = (3, 4)
     >>> x, w, b = test_utils.random_arrays((10, dim_in), (dim_out, dim_in), (dim_out,))
     >>> op = FullyConnected(num_hidden=dim_out, name='FC')
diff --git a/src/operator/contrib/adaptive_avg_pooling.cc b/src/operator/contrib/adaptive_avg_pooling.cc
index a65f5fe8d43..91ea6d8bee8 100644
--- a/src/operator/contrib/adaptive_avg_pooling.cc
+++ b/src/operator/contrib/adaptive_avg_pooling.cc
@@ -206,10 +206,10 @@ Applies a 2D adaptive average pooling over a 4D input with the shape of (NCHW).
 The pooling kernel and stride sizes are automatically chosen for desired output sizes.
 
 - If a single integer is provided for output_size, the output size is
-(N x C x output_size x output_size) for any input (NCHW).
+  (N x C x output_size x output_size) for any input (NCHW).
 
 - If a tuple of integers (height, width) are provided for output_size, the output size is
-(N x C x height x width) for any input (NCHW).
+  (N x C x height x width) for any input (NCHW).
 
 )code" ADD_FILELINE)
 .set_attr_parser(ParamParser<AdaptiveAvgPoolParam>)


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services