You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by rx...@apache.org on 2016/02/02 20:29:23 UTC

spark git commit: [SPARK-13138][SQL] Add "logical" package prefix for ddl.scala

Repository: spark
Updated Branches:
  refs/heads/master b1835d727 -> 7f6e3ec79


[SPARK-13138][SQL] Add "logical" package prefix for ddl.scala

ddl.scala is defined in the execution package, and yet its reference of "UnaryNode" and "Command" are logical. This was fairly confusing when I was trying to understand the ddl code.

Author: Reynold Xin <rx...@databricks.com>

Closes #11021 from rxin/SPARK-13138.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/7f6e3ec7
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/7f6e3ec7
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/7f6e3ec7

Branch: refs/heads/master
Commit: 7f6e3ec79b77400f558ceffa10b2af011962115f
Parents: b1835d7
Author: Reynold Xin <rx...@databricks.com>
Authored: Tue Feb 2 11:29:20 2016 -0800
Committer: Reynold Xin <rx...@databricks.com>
Committed: Tue Feb 2 11:29:20 2016 -0800

----------------------------------------------------------------------
 .../apache/spark/sql/execution/datasources/ddl.scala   | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/7f6e3ec7/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ddl.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ddl.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ddl.scala
index 1554209..a141b58 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ddl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ddl.scala
@@ -20,7 +20,8 @@ package org.apache.spark.sql.execution.datasources
 import org.apache.spark.sql.{DataFrame, Row, SaveMode, SQLContext}
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
-import org.apache.spark.sql.catalyst.plans.logical._
+import org.apache.spark.sql.catalyst.plans.logical
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.execution.RunnableCommand
 import org.apache.spark.sql.types._
 
@@ -32,7 +33,7 @@ import org.apache.spark.sql.types._
  */
 case class DescribeCommand(
     table: LogicalPlan,
-    isExtended: Boolean) extends LogicalPlan with Command {
+    isExtended: Boolean) extends LogicalPlan with logical.Command {
 
   override def children: Seq[LogicalPlan] = Seq.empty
 
@@ -59,7 +60,7 @@ case class CreateTableUsing(
     temporary: Boolean,
     options: Map[String, String],
     allowExisting: Boolean,
-    managedIfNoPath: Boolean) extends LogicalPlan with Command {
+    managedIfNoPath: Boolean) extends LogicalPlan with logical.Command {
 
   override def output: Seq[Attribute] = Seq.empty
   override def children: Seq[LogicalPlan] = Seq.empty
@@ -67,8 +68,8 @@ case class CreateTableUsing(
 
 /**
  * A node used to support CTAS statements and saveAsTable for the data source API.
- * This node is a [[UnaryNode]] instead of a [[Command]] because we want the analyzer
- * can analyze the logical plan that will be used to populate the table.
+ * This node is a [[logical.UnaryNode]] instead of a [[logical.Command]] because we want the
+ * analyzer can analyze the logical plan that will be used to populate the table.
  * So, [[PreWriteCheck]] can detect cases that are not allowed.
  */
 case class CreateTableUsingAsSelect(
@@ -79,7 +80,7 @@ case class CreateTableUsingAsSelect(
     bucketSpec: Option[BucketSpec],
     mode: SaveMode,
     options: Map[String, String],
-    child: LogicalPlan) extends UnaryNode {
+    child: LogicalPlan) extends logical.UnaryNode {
   override def output: Seq[Attribute] = Seq.empty[Attribute]
 }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org