You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by me...@apache.org on 2014/12/18 02:31:30 UTC

spark git commit: [SPARK-4822] Use sphinx tags for Python doc annotations

Repository: spark
Updated Branches:
  refs/heads/master ca1260891 -> 3cd516191


[SPARK-4822] Use sphinx tags for Python doc annotations

Modify python annotations for sphinx. There is no change to build process from.
https://github.com/apache/spark/blob/master/docs/README.md

Author: lewuathe <le...@me.com>

Closes #3685 from Lewuathe/sphinx-tag-for-pydoc and squashes the following commits:

88a0fd9 [lewuathe] [SPARK-4822] Fix DevelopApi and WARN tags
3d7a398 [lewuathe] [SPARK-4822] Use sphinx tags for Python doc annotations


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/3cd51619
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/3cd51619
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/3cd51619

Branch: refs/heads/master
Commit: 3cd516191baadf8496ccdae499771020e89acd7e
Parents: ca12608
Author: lewuathe <le...@me.com>
Authored: Wed Dec 17 17:31:24 2014 -0800
Committer: Xiangrui Meng <me...@databricks.com>
Committed: Wed Dec 17 17:31:24 2014 -0800

----------------------------------------------------------------------
 python/pyspark/context.py              |  4 ++--
 python/pyspark/mllib/classification.py |  4 ++--
 python/pyspark/mllib/feature.py        | 12 ++++++------
 python/pyspark/mllib/stat.py           |  4 ++--
 python/pyspark/rdd.py                  |  8 ++++----
 python/pyspark/sql.py                  |  2 +-
 6 files changed, 17 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/3cd51619/python/pyspark/context.py
----------------------------------------------------------------------
diff --git a/python/pyspark/context.py b/python/pyspark/context.py
index ed7351d..593d74b 100644
--- a/python/pyspark/context.py
+++ b/python/pyspark/context.py
@@ -407,7 +407,7 @@ class SparkContext(object):
 
     def binaryFiles(self, path, minPartitions=None):
         """
-        :: Experimental ::
+        .. note:: Experimental
 
         Read a directory of binary files from HDFS, a local file system
         (available on all nodes), or any Hadoop-supported file system URI
@@ -424,7 +424,7 @@ class SparkContext(object):
 
     def binaryRecords(self, path, recordLength):
         """
-        :: Experimental ::
+        .. note:: Experimental
 
         Load data from a flat binary file, assuming each record is a set of numbers
         with the specified numerical format (see ByteBuffer), and the number of

http://git-wip-us.apache.org/repos/asf/spark/blob/3cd51619/python/pyspark/mllib/classification.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/classification.py b/python/pyspark/mllib/classification.py
index f14d0ed..00e2e76 100644
--- a/python/pyspark/mllib/classification.py
+++ b/python/pyspark/mllib/classification.py
@@ -41,7 +41,7 @@ class LinearBinaryClassificationModel(LinearModel):
 
     def setThreshold(self, value):
         """
-        :: Experimental ::
+        .. note:: Experimental
 
         Sets the threshold that separates positive predictions from negative
         predictions. An example with prediction score greater than or equal
@@ -51,7 +51,7 @@ class LinearBinaryClassificationModel(LinearModel):
 
     def clearThreshold(self):
         """
-        :: Experimental ::
+        .. note:: Experimental
 
         Clears the threshold so that `predict` will output raw prediction scores.
         """

http://git-wip-us.apache.org/repos/asf/spark/blob/3cd51619/python/pyspark/mllib/feature.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py
index e46af20..10df628 100644
--- a/python/pyspark/mllib/feature.py
+++ b/python/pyspark/mllib/feature.py
@@ -36,7 +36,7 @@ __all__ = ['Normalizer', 'StandardScalerModel', 'StandardScaler',
 
 class VectorTransformer(object):
     """
-    :: DeveloperApi ::
+    .. note:: DeveloperApi
 
     Base class for transformation of a vector or RDD of vector
     """
@@ -51,7 +51,7 @@ class VectorTransformer(object):
 
 class Normalizer(VectorTransformer):
     """
-    :: Experimental ::
+    .. note:: Experimental
 
     Normalizes samples individually to unit L\ :sup:`p`\  norm
 
@@ -112,7 +112,7 @@ class JavaVectorTransformer(JavaModelWrapper, VectorTransformer):
 
 class StandardScalerModel(JavaVectorTransformer):
     """
-    :: Experimental ::
+    .. note:: Experimental
 
     Represents a StandardScaler model that can transform vectors.
     """
@@ -129,7 +129,7 @@ class StandardScalerModel(JavaVectorTransformer):
 
 class StandardScaler(object):
     """
-    :: Experimental ::
+    .. note:: Experimental
 
     Standardizes features by removing the mean and scaling to unit
     variance using column summary statistics on the samples in the
@@ -172,7 +172,7 @@ class StandardScaler(object):
 
 class HashingTF(object):
     """
-    :: Experimental ::
+    .. note:: Experimental
 
     Maps a sequence of terms to their term frequencies using the hashing trick.
 
@@ -232,7 +232,7 @@ class IDFModel(JavaVectorTransformer):
 
 class IDF(object):
     """
-    :: Experimental ::
+    .. note:: Experimental
 
     Inverse document frequency (IDF).
 

http://git-wip-us.apache.org/repos/asf/spark/blob/3cd51619/python/pyspark/mllib/stat.py
----------------------------------------------------------------------
diff --git a/python/pyspark/mllib/stat.py b/python/pyspark/mllib/stat.py
index 1980f5b..c8af777 100644
--- a/python/pyspark/mllib/stat.py
+++ b/python/pyspark/mllib/stat.py
@@ -55,7 +55,7 @@ class MultivariateStatisticalSummary(JavaModelWrapper):
 
 class ChiSqTestResult(JavaModelWrapper):
     """
-    :: Experimental ::
+    .. note:: Experimental
 
     Object containing the test results for the chi-squared hypothesis test.
     """
@@ -200,7 +200,7 @@ class Statistics(object):
     @staticmethod
     def chiSqTest(observed, expected=None):
         """
-        :: Experimental ::
+        .. note:: Experimental
 
         If `observed` is Vector, conduct Pearson's chi-squared goodness
         of fit test of the observed data against the expected distribution,

http://git-wip-us.apache.org/repos/asf/spark/blob/3cd51619/python/pyspark/rdd.py
----------------------------------------------------------------------
diff --git a/python/pyspark/rdd.py b/python/pyspark/rdd.py
index bd2ff00..c1120cf 100644
--- a/python/pyspark/rdd.py
+++ b/python/pyspark/rdd.py
@@ -1964,7 +1964,7 @@ class RDD(object):
 
     def countApprox(self, timeout, confidence=0.95):
         """
-        :: Experimental ::
+        .. note:: Experimental
         Approximate version of count() that returns a potentially incomplete
         result within a timeout, even if not all tasks have finished.
 
@@ -1977,7 +1977,7 @@ class RDD(object):
 
     def sumApprox(self, timeout, confidence=0.95):
         """
-        :: Experimental ::
+        .. note:: Experimental
         Approximate operation to return the sum within a timeout
         or meet the confidence.
 
@@ -1993,7 +1993,7 @@ class RDD(object):
 
     def meanApprox(self, timeout, confidence=0.95):
         """
-        :: Experimental ::
+        .. note:: Experimental
         Approximate operation to return the mean within a timeout
         or meet the confidence.
 
@@ -2009,7 +2009,7 @@ class RDD(object):
 
     def countApproxDistinct(self, relativeSD=0.05):
         """
-        :: Experimental ::
+        .. note:: Experimental
         Return approximate number of distinct elements in the RDD.
 
         The algorithm used is based on streamlib's implementation of

http://git-wip-us.apache.org/repos/asf/spark/blob/3cd51619/python/pyspark/sql.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql.py b/python/pyspark/sql.py
index 1ee0b28..469f824 100644
--- a/python/pyspark/sql.py
+++ b/python/pyspark/sql.py
@@ -420,7 +420,7 @@ class StructType(DataType):
 
 class UserDefinedType(DataType):
     """
-    :: WARN: Spark Internal Use Only ::
+    .. note:: WARN: Spark Internal Use Only
     SQL User-Defined Type (UDT).
     """
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org