You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by ma...@apache.org on 2014/09/05 00:06:16 UTC

git commit: [SPARK-3378] [DOCS] Replace the word "SparkSQL" with right word "Spark SQL"

Repository: spark
Updated Branches:
  refs/heads/master 4feb46c5f -> dc1ba9e9f


[SPARK-3378] [DOCS] Replace the word "SparkSQL" with right word "Spark SQL"

Author: Kousuke Saruta <sa...@oss.nttdata.co.jp>

Closes #2251 from sarutak/SPARK-3378 and squashes the following commits:

0bfe234 [Kousuke Saruta] Merge branch 'master' of git://git.apache.org/spark into SPARK-3378
bb5938f [Kousuke Saruta] Replaced rest of "SparkSQL" with "Spark SQL"
6df66de [Kousuke Saruta] Replaced "SparkSQL" with "Spark SQL"


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/dc1ba9e9
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/dc1ba9e9
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/dc1ba9e9

Branch: refs/heads/master
Commit: dc1ba9e9fc169962a9282ea6644dce09281ff598
Parents: 4feb46c
Author: Kousuke Saruta <sa...@oss.nttdata.co.jp>
Authored: Thu Sep 4 15:06:08 2014 -0700
Committer: Michael Armbrust <mi...@databricks.com>
Committed: Thu Sep 4 15:06:08 2014 -0700

----------------------------------------------------------------------
 dev/run-tests                                                  | 2 +-
 docs/programming-guide.md                                      | 2 +-
 python/pyspark/sql.py                                          | 6 +++---
 python/run-tests                                               | 2 +-
 .../src/main/scala/org/apache/spark/sql/api/java/Row.scala     | 2 +-
 .../org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala   | 2 +-
 6 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/dc1ba9e9/dev/run-tests
----------------------------------------------------------------------
diff --git a/dev/run-tests b/dev/run-tests
index d751961..90a8ce1 100755
--- a/dev/run-tests
+++ b/dev/run-tests
@@ -89,7 +89,7 @@ echo "========================================================================="
 echo "Running Spark unit tests"
 echo "========================================================================="
 
-# Build Spark; we always build with Hive because the PySpark SparkSQL tests need it.
+# Build Spark; we always build with Hive because the PySpark Spark SQL tests need it.
 # echo "q" is needed because sbt on encountering a build file with failure
 # (either resolution or compilation) prompts the user for input either q, r,
 # etc to quit or retry. This echo is there to make it not block.

http://git-wip-us.apache.org/repos/asf/spark/blob/dc1ba9e9/docs/programming-guide.md
----------------------------------------------------------------------
diff --git a/docs/programming-guide.md b/docs/programming-guide.md
index 6ae780d..624cc74 100644
--- a/docs/programming-guide.md
+++ b/docs/programming-guide.md
@@ -385,7 +385,7 @@ Apart from text files, Spark's Python API also supports several other data forma
 
 * SequenceFile and Hadoop Input/Output Formats
 
-**Note** this feature is currently marked ```Experimental``` and is intended for advanced users. It may be replaced in future with read/write support based on SparkSQL, in which case SparkSQL is the preferred approach.
+**Note** this feature is currently marked ```Experimental``` and is intended for advanced users. It may be replaced in future with read/write support based on Spark SQL, in which case Spark SQL is the preferred approach.
 
 **Writable Support**
 

http://git-wip-us.apache.org/repos/asf/spark/blob/dc1ba9e9/python/pyspark/sql.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql.py b/python/pyspark/sql.py
index aaa35da..e7f573c 100644
--- a/python/pyspark/sql.py
+++ b/python/pyspark/sql.py
@@ -900,7 +900,7 @@ def _create_cls(dataType):
 
 class SQLContext:
 
-    """Main entry point for SparkSQL functionality.
+    """Main entry point for Spark SQL functionality.
 
     A SQLContext can be used create L{SchemaRDD}s, register L{SchemaRDD}s as
     tables, execute SQL over tables, cache tables, and read parquet files.
@@ -946,7 +946,7 @@ class SQLContext:
 
     @property
     def _ssql_ctx(self):
-        """Accessor for the JVM SparkSQL context.
+        """Accessor for the JVM Spark SQL context.
 
         Subclasses can override this property to provide their own
         JVM Contexts.
@@ -1507,7 +1507,7 @@ class SchemaRDD(RDD):
     """An RDD of L{Row} objects that has an associated schema.
 
     The underlying JVM object is a SchemaRDD, not a PythonRDD, so we can
-    utilize the relational query api exposed by SparkSQL.
+    utilize the relational query api exposed by Spark SQL.
 
     For normal L{pyspark.rdd.RDD} operations (map, count, etc.) the
     L{SchemaRDD} is not operated on directly, as it's underlying

http://git-wip-us.apache.org/repos/asf/spark/blob/dc1ba9e9/python/run-tests
----------------------------------------------------------------------
diff --git a/python/run-tests b/python/run-tests
index d671da4..f2a80b4 100755
--- a/python/run-tests
+++ b/python/run-tests
@@ -28,7 +28,7 @@ FAILED=0
 
 rm -f unit-tests.log
 
-# Remove the metastore and warehouse directory created by the HiveContext tests in SparkSQL
+# Remove the metastore and warehouse directory created by the HiveContext tests in Spark SQL
 rm -rf metastore warehouse
 
 function run_test() {

http://git-wip-us.apache.org/repos/asf/spark/blob/dc1ba9e9/sql/core/src/main/scala/org/apache/spark/sql/api/java/Row.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/api/java/Row.scala b/sql/core/src/main/scala/org/apache/spark/sql/api/java/Row.scala
index 6c67934..e9d04ce 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/api/java/Row.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/api/java/Row.scala
@@ -25,7 +25,7 @@ import scala.math.BigDecimal
 import org.apache.spark.sql.catalyst.expressions.{Row => ScalaRow}
 
 /**
- * A result row from a SparkSQL query.
+ * A result row from a Spark SQL query.
  */
 class Row(private[spark] val row: ScalaRow) extends Serializable {
 

http://git-wip-us.apache.org/repos/asf/spark/blob/dc1ba9e9/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
index 544abfc..abed299 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector
 import org.apache.hadoop.io.Writable
 
 /**
- * A placeholder that allows SparkSQL users to create metastore tables that are stored as
+ * A placeholder that allows Spark SQL users to create metastore tables that are stored as
  * parquet files.  It is only intended to pass the checks that the serde is valid and exists
  * when a CREATE TABLE is run.  The actual work of decoding will be done by ParquetTableScan
  * when "spark.sql.hive.convertMetastoreParquet" is set to true.


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org