You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by sr...@apache.org on 2016/08/30 10:19:49 UTC

spark git commit: [SPARK-17264][SQL] DataStreamWriter should document that it only supports Parquet for now

Repository: spark
Updated Branches:
  refs/heads/master 2d76cb11f -> befab9c1c


[SPARK-17264][SQL] DataStreamWriter should document that it only supports Parquet for now

## What changes were proposed in this pull request?

Clarify that only parquet files are supported by DataStreamWriter now

## How was this patch tested?

(Doc build -- no functional changes to test)

Author: Sean Owen <so...@cloudera.com>

Closes #14860 from srowen/SPARK-17264.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/befab9c1
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/befab9c1
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/befab9c1

Branch: refs/heads/master
Commit: befab9c1c6b59ad90f63a7d10e12b186be897f15
Parents: 2d76cb1
Author: Sean Owen <so...@cloudera.com>
Authored: Tue Aug 30 11:19:45 2016 +0100
Committer: Sean Owen <so...@cloudera.com>
Committed: Tue Aug 30 11:19:45 2016 +0100

----------------------------------------------------------------------
 python/pyspark/sql/streaming.py                                    | 2 +-
 .../scala/org/apache/spark/sql/streaming/DataStreamWriter.scala    | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/befab9c1/python/pyspark/sql/streaming.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/streaming.py b/python/pyspark/sql/streaming.py
index a0ba582..67375f6 100644
--- a/python/pyspark/sql/streaming.py
+++ b/python/pyspark/sql/streaming.py
@@ -591,7 +591,7 @@ class DataStreamWriter(object):
 
         .. note:: Experimental.
 
-        :param source: string, name of the data source, e.g. 'json', 'parquet'.
+        :param source: string, name of the data source, which for now can be 'parquet'.
 
         >>> writer = sdf.writeStream.format('json')
         """

http://git-wip-us.apache.org/repos/asf/spark/blob/befab9c1/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
index d38e3e5..f70c7d0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala
@@ -122,7 +122,7 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
 
   /**
    * :: Experimental ::
-   * Specifies the underlying output data source. Built-in options include "parquet", "json", etc.
+   * Specifies the underlying output data source. Built-in options include "parquet" for now.
    *
    * @since 2.0.0
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org