You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by ma...@apache.org on 2015/05/22 18:44:48 UTC
spark git commit: [SPARK-6743] [SQL] Fix empty projections of cached
data
Repository: spark
Updated Branches:
refs/heads/master 4e5220c31 -> 3b68cb043
[SPARK-6743] [SQL] Fix empty projections of cached data
Author: Michael Armbrust <mi...@databricks.com>
Closes #6165 from marmbrus/wrongColumn and squashes the following commits:
4fad158 [Michael Armbrust] Merge remote-tracking branch 'origin/master' into wrongColumn
aad7eab [Michael Armbrust] rxins comments
f1e8df1 [Michael Armbrust] [SPARK-6743][SQL] Fix empty projections of cached data
Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/3b68cb04
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/3b68cb04
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/3b68cb04
Branch: refs/heads/master
Commit: 3b68cb0430067059e9c7b9a86dbea4865e29bf78
Parents: 4e5220c
Author: Michael Armbrust <mi...@databricks.com>
Authored: Fri May 22 09:43:46 2015 -0700
Committer: Michael Armbrust <mi...@databricks.com>
Committed: Fri May 22 09:43:46 2015 -0700
----------------------------------------------------------------------
project/SparkBuild.scala | 1 +
.../src/main/scala/org/apache/spark/sql/Row.scala | 3 +++
.../sql/columnar/InMemoryColumnarTableScan.scala | 2 +-
.../scala/org/apache/spark/sql/SQLQuerySuite.scala | 17 +++++++++++++++--
4 files changed, 20 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/spark/blob/3b68cb04/project/SparkBuild.scala
----------------------------------------------------------------------
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 1b87e4e..b9515a1 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -324,6 +324,7 @@ object Hive {
|import org.apache.spark.sql.functions._
|import org.apache.spark.sql.hive._
|import org.apache.spark.sql.hive.test.TestHive._
+ |import org.apache.spark.sql.hive.test.TestHive.implicits._
|import org.apache.spark.sql.types._""".stripMargin,
cleanupCommands in console := "sparkContext.stop()",
// Some of our log4j jars make it impossible to submit jobs from this JVM to Hive Map/Reduce
http://git-wip-us.apache.org/repos/asf/spark/blob/3b68cb04/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
index 4190b7f..0d460b6 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/Row.scala
@@ -55,6 +55,9 @@ object Row {
// TODO: Improve the performance of this if used in performance critical part.
new GenericRow(rows.flatMap(_.toSeq).toArray)
}
+
+ /** Returns an empty row. */
+ val empty = apply()
}
http://git-wip-us.apache.org/repos/asf/spark/blob/3b68cb04/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala b/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala
index 0ded1cc..a59d42c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/columnar/InMemoryColumnarTableScan.scala
@@ -314,7 +314,7 @@ private[sql] case class InMemoryColumnarTableScan(
columnAccessors(i).extractTo(nextRow, i)
i += 1
}
- nextRow
+ if (attributes.isEmpty) Row.empty else nextRow
}
override def hasNext: Boolean = columnAccessors(0).hasNext
http://git-wip-us.apache.org/repos/asf/spark/blob/3b68cb04/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index c5c4f44..7c47fe4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -39,6 +39,19 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
import org.apache.spark.sql.test.TestSQLContext.implicits._
val sqlCtx = TestSQLContext
+ test("SPARK-6743: no columns from cache") {
+ Seq(
+ (83, 0, 38),
+ (26, 0, 79),
+ (43, 81, 24)
+ ).toDF("a", "b", "c").registerTempTable("cachedData")
+
+ cacheTable("cachedData")
+ checkAnswer(
+ sql("SELECT t1.b FROM cachedData, cachedData t1 GROUP BY t1.b"),
+ Row(0) :: Row(81) :: Nil)
+ }
+
test("self join with aliases") {
Seq(1,2,3).map(i => (i, i.toString)).toDF("int", "str").registerTempTable("df")
@@ -142,7 +155,7 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
sql("SELECT ABS(2.5)"),
Row(2.5))
}
-
+
test("aggregation with codegen") {
val originalValue = conf.codegenEnabled
setConf(SQLConf.CODEGEN_ENABLED, "true")
@@ -194,7 +207,7 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
"SELECT value, sum(key) FROM testData3x GROUP BY value",
(1 to 100).map(i => Row(i.toString, 3 * i)))
testCodeGen(
- "SELECT sum(key), SUM(CAST(key as Double)) FROM testData3x",
+ "SELECT sum(key), SUM(CAST(key as Double)) FROM testData3x",
Row(5050 * 3, 5050 * 3.0) :: Nil)
// AVERAGE
testCodeGen(
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org