You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by do...@apache.org on 2019/04/03 16:11:44 UTC
[spark] branch branch-2.4 updated: [MINOR][DOC][SQL] Remove
out-of-date doc about ORC in DataFrameReader and Writer
This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-2.4 by this push:
new ed3ffda [MINOR][DOC][SQL] Remove out-of-date doc about ORC in DataFrameReader and Writer
ed3ffda is described below
commit ed3ffdae1e12b5c212105b61a3b10a31d9914bef
Author: Liang-Chi Hsieh <vi...@gmail.com>
AuthorDate: Wed Apr 3 09:11:09 2019 -0700
[MINOR][DOC][SQL] Remove out-of-date doc about ORC in DataFrameReader and Writer
## What changes were proposed in this pull request?
According to current status, `orc` is available even Hive support isn't enabled. This is a minor doc change to reflect it.
## How was this patch tested?
Doc only change.
Closes #24280 from viirya/fix-orc-doc.
Authored-by: Liang-Chi Hsieh <vi...@gmail.com>
Signed-off-by: Dongjoon Hyun <dh...@apple.com>
(cherry picked from commit d04a7371daec4af046a35066f9664c5011162baa)
Signed-off-by: Dongjoon Hyun <dh...@apple.com>
---
python/pyspark/sql/readwriter.py | 4 ----
sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala | 2 --
sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala | 1 -
3 files changed, 7 deletions(-)
diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py
index 690b130..c25426c 100644
--- a/python/pyspark/sql/readwriter.py
+++ b/python/pyspark/sql/readwriter.py
@@ -496,8 +496,6 @@ class DataFrameReader(OptionUtils):
def orc(self, path):
"""Loads ORC files, returning the result as a :class:`DataFrame`.
- .. note:: Currently ORC support is only available together with Hive support.
-
>>> df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> df.dtypes
[('a', 'bigint'), ('b', 'int'), ('c', 'int')]
@@ -932,8 +930,6 @@ class DataFrameWriter(OptionUtils):
def orc(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
- .. note:: Currently ORC support is only available together with Hive support.
-
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index e9278a0..666a97d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -646,7 +646,6 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
*
* @param path input path
* @since 1.5.0
- * @note Currently, this method can only be used after enabling Hive support.
*/
def orc(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
@@ -658,7 +657,6 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
*
* @param paths input paths
* @since 2.0.0
- * @note Currently, this method can only be used after enabling Hive support.
*/
@scala.annotation.varargs
def orc(paths: String*): DataFrame = format("orc").load(paths: _*)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
index b77dfd9..a2586cc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
@@ -574,7 +574,6 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
* </ul>
*
* @since 1.5.0
- * @note Currently, this method can only be used after enabling Hive support
*/
def orc(path: String): Unit = {
format("orc").save(path)
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org