You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ch...@apache.org on 2017/07/21 10:48:52 UTC

carbondata git commit: [CARBONDATA-1209] add partitionId in show partition result

Repository: carbondata
Updated Branches:
  refs/heads/master c7aba5e5d -> c8355b5de


[CARBONDATA-1209] add partitionId in show partition result

This closes #1173


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/c8355b5d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/c8355b5d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/c8355b5d

Branch: refs/heads/master
Commit: c8355b5de95b2f1728e37930d14badcd8417e9ef
Parents: c7aba5e
Author: lionelcao <wh...@gmail.com>
Authored: Fri Jul 14 20:24:24 2017 +0800
Committer: chenliang613 <ch...@apache.org>
Committed: Fri Jul 21 18:48:37 2017 +0800

----------------------------------------------------------------------
 .../examples/CarbonPartitionExample.scala       | 19 +++++----
 .../examples/CarbonPartitionExample.scala       | 19 +++++----
 .../partition/TestShowPartitions.scala          | 20 ++++-----
 .../carbondata/spark/util/CommonUtil.scala      | 45 +++++++++++---------
 .../execution/command/carbonTableSchema.scala   |  8 ++--
 5 files changed, 58 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/c8355b5d/examples/spark/src/main/scala/org/apache/carbondata/examples/CarbonPartitionExample.scala
----------------------------------------------------------------------
diff --git a/examples/spark/src/main/scala/org/apache/carbondata/examples/CarbonPartitionExample.scala b/examples/spark/src/main/scala/org/apache/carbondata/examples/CarbonPartitionExample.scala
index 2f55189..9ceadea 100644
--- a/examples/spark/src/main/scala/org/apache/carbondata/examples/CarbonPartitionExample.scala
+++ b/examples/spark/src/main/scala/org/apache/carbondata/examples/CarbonPartitionExample.scala
@@ -21,6 +21,7 @@ import scala.collection.mutable.LinkedHashMap
 
 import org.apache.spark.sql.AnalysisException
 
+import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.examples.util.ExampleUtils
@@ -32,7 +33,7 @@ object CarbonPartitionExample {
     val testData = ExampleUtils.currentPath + "/src/main/resources/data.csv"
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
-
+    val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
     // none partition table
     cc.sql("DROP TABLE IF EXISTS t0")
     cc.sql("""
@@ -121,18 +122,18 @@ object CarbonPartitionExample {
     cc.sql("alter table hiveDB.t7 add partition (city = 'Shanghai')")
     //  show partitions
     try {
-      cc.sql("SHOW PARTITIONS t0").show()
+      cc.sql("SHOW PARTITIONS t0").show(100, false)
     } catch {
-      case ex: AnalysisException => print(ex.getMessage())
+      case ex: AnalysisException => LOGGER.error(ex.getMessage())
     }
-    cc.sql("SHOW PARTITIONS t1").show()
-    cc.sql("SHOW PARTITIONS t3").show()
-    cc.sql("SHOW PARTITIONS t5").show()
-    cc.sql("SHOW PARTITIONS t7").show()
+    cc.sql("SHOW PARTITIONS t1").show(100, false)
+    cc.sql("SHOW PARTITIONS t3").show(100, false)
+    cc.sql("SHOW PARTITIONS t5").show(100, false)
+    cc.sql("SHOW PARTITIONS t7").show(100, false)
     cc.sql("use hiveDB").show()
-    cc.sql("SHOW PARTITIONS t7").show()
+    cc.sql("SHOW PARTITIONS t7").show(100, false)
     cc.sql("use default").show()
-    cc.sql("SHOW PARTITIONS partitionDB.t9").show()
+    cc.sql("SHOW PARTITIONS partitionDB.t9").show(100, false)
 
     cc.sql("DROP TABLE IF EXISTS t0")
     cc.sql("DROP TABLE IF EXISTS t1")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c8355b5d/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonPartitionExample.scala
----------------------------------------------------------------------
diff --git a/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonPartitionExample.scala b/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonPartitionExample.scala
index 4cdde42..8a01ba1 100644
--- a/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonPartitionExample.scala
+++ b/examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonPartitionExample.scala
@@ -23,6 +23,7 @@ import org.apache.spark.sql.AnalysisException
 import org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException
 import org.apache.spark.sql.SparkSession
 
+import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.util.CarbonProperties
 
@@ -38,7 +39,7 @@ object CarbonPartitionExample {
 
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
-
+    val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
     import org.apache.spark.sql.CarbonSession._
 
     val spark = SparkSession
@@ -123,7 +124,7 @@ object CarbonPartitionExample {
     try {
       spark.sql(s"DROP TABLE IF EXISTS partitionDB.t9")
     } catch {
-      case ex: NoSuchDatabaseException => print(ex.getMessage())
+      case ex: NoSuchDatabaseException => LOGGER.error(ex.getMessage())
     }
     spark.sql(s"DROP DATABASE IF EXISTS partitionDB")
     spark.sql(s"CREATE DATABASE partitionDB")
@@ -144,15 +145,15 @@ object CarbonPartitionExample {
 
     // show partitions
     try {
-      spark.sql("""SHOW PARTITIONS t0""").show()
+      spark.sql("""SHOW PARTITIONS t0""").show(100, false)
     } catch {
-      case ex: AnalysisException => print(ex.getMessage())
+      case ex: AnalysisException => LOGGER.error(ex.getMessage())
     }
-    spark.sql("""SHOW PARTITIONS t1""").show()
-    spark.sql("""SHOW PARTITIONS t3""").show()
-    spark.sql("""SHOW PARTITIONS t5""").show()
-    spark.sql("""SHOW PARTITIONS t7""").show()
-    spark.sql("""SHOW PARTITIONS partitionDB.t9""").show()
+    spark.sql("""SHOW PARTITIONS t1""").show(100, false)
+    spark.sql("""SHOW PARTITIONS t3""").show(100, false)
+    spark.sql("""SHOW PARTITIONS t5""").show(100, false)
+    spark.sql("""SHOW PARTITIONS t7""").show(100, false)
+    spark.sql("""SHOW PARTITIONS partitionDB.t9""").show(100, false)
 
     // drop table
     spark.sql("DROP TABLE IF EXISTS t0")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c8355b5d/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestShowPartitions.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestShowPartitions.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestShowPartitions.scala
index f509e02..f455e9a 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestShowPartitions.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestShowPartitions.scala
@@ -146,31 +146,31 @@ class TestShowPartition  extends QueryTest with BeforeAndAfterAll {
 
   test("show partition table: hash table") {
     // EqualTo
-    checkAnswer(sql("show partitions hashTable"), Seq(Row("empno=HASH_NUMBER(3)")))
+    checkAnswer(sql("show partitions hashTable"), Seq(Row("empno = HASH_NUMBER(3)")))
 
   }
 
   test("show partition table: range partition") {
     // EqualTo
-    checkAnswer(sql("show partitions rangeTable"), Seq(Row("doj=default"),
-      Row("doj<01-01-2010"), Row("01-01-2010<=doj<01-01-2015")))
+    checkAnswer(sql("show partitions rangeTable"), Seq(Row("0, doj = DEFAULT"),
+      Row("1, doj < 01-01-2010"), Row("2, 01-01-2010 <= doj < 01-01-2015")))
   }
 
   test("show partition table: list partition") {
     // EqualTo
-    checkAnswer(sql("show partitions listTable"), Seq(Row("workgroupcategory=default"),
-      Row("workgroupcategory=0"), Row("workgroupcategory=1"), Row("workgroupcategory=2, 3")))
+    checkAnswer(sql("show partitions listTable"), Seq(Row("0, workgroupcategory = DEFAULT"),
+      Row("1, workgroupcategory = 0"), Row("2, workgroupcategory = 1"), Row("3, workgroupcategory = 2, 3")))
 
   }
   test("show partition table: not default db") {
     // EqualTo
-    checkAnswer(sql("show partitions partitionDB.hashTable"), Seq(Row("empno=HASH_NUMBER(3)")))
+    checkAnswer(sql("show partitions partitionDB.hashTable"), Seq(Row("empno = HASH_NUMBER(3)")))
     // EqualTo
-    checkAnswer(sql("show partitions partitionDB.rangeTable"), Seq(Row("doj=default"),
-      Row("doj<01-01-2010"), Row("01-01-2010<=doj<01-01-2015")))
+    checkAnswer(sql("show partitions partitionDB.rangeTable"), Seq(Row("0, doj = DEFAULT"),
+      Row("1, doj < 01-01-2010"), Row("2, 01-01-2010 <= doj < 01-01-2015")))
     // EqualTo
-    checkAnswer(sql("show partitions partitionDB.listTable"), Seq(Row("workgroupcategory=default"),
-      Row("workgroupcategory=0"), Row("workgroupcategory=1"), Row("workgroupcategory=2, 3")))
+    checkAnswer(sql("show partitions partitionDB.listTable"), Seq(Row("0, workgroupcategory = DEFAULT"),
+      Row("1, workgroupcategory = 0"), Row("2, workgroupcategory = 1"), Row("3, workgroupcategory = 2, 3")))
 
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c8355b5d/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
index 579347b..9c74a31 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
@@ -587,42 +587,45 @@ object CommonUtil {
     var result = Seq.newBuilder[Row]
     partitionType match {
       case PartitionType.RANGE =>
-        result.+=(RowFactory.create(columnName + "=default"))
-        var rangeInfo = partitionInfo.getRangeInfo
-        var size = rangeInfo.size() - 1
+        result.+=(RowFactory.create("0" + ", " + columnName + " = DEFAULT"))
+        val rangeInfo = partitionInfo.getRangeInfo
+        val size = rangeInfo.size() - 1
         for (index <- 0 to size) {
           if (index == 0) {
-            result.+=(RowFactory.create(columnName + "<" + rangeInfo.get(index)))
+            val id = partitionInfo.getPartitionId(index + 1).toString
+            val desc = columnName + " < " + rangeInfo.get(index)
+            result.+=(RowFactory.create(id + ", " + desc))
           } else {
-            result.+=(RowFactory.create(rangeInfo.get(index - 1) + "<=" +
-              columnName + "<" + rangeInfo.get(index)))
+            val id = partitionInfo.getPartitionId(index + 1).toString
+            val desc = rangeInfo.get(index - 1) + " <= " + columnName + " < " + rangeInfo.get(index)
+            result.+=(RowFactory.create(id + ", " + desc))
           }
         }
       case PartitionType.RANGE_INTERVAL =>
-        result.+=(RowFactory.create(columnName + "="))
+        result.+=(RowFactory.create(columnName + " = "))
       case PartitionType.LIST =>
-        result.+=(RowFactory.create(columnName + "=default"))
-        var listInfo = partitionInfo.getListInfo
+        result.+=(RowFactory.create("0" + ", " + columnName + " = DEFAULT"))
+        val listInfo = partitionInfo.getListInfo
         listInfo.asScala.foreach {
           f =>
-            result.+=(RowFactory.create(columnName + "=" +
-              f.toArray().mkString(", ")))
+            val id = partitionInfo.getPartitionId(listInfo.indexOf(f) + 1).toString
+            val desc = columnName + " = " + f.toArray().mkString(", ")
+            result.+=(RowFactory.create(id + ", " + desc))
         }
       case PartitionType.HASH =>
-        var hashNumber = partitionInfo.getNumPartitions
-        result.+=(RowFactory.create(columnName + "=HASH_NUMBER(" + hashNumber.toString() + ")"))
+        val hashNumber = partitionInfo.getNumPartitions
+        result.+=(RowFactory.create(columnName + " = HASH_NUMBER(" + hashNumber.toString() + ")"))
       case others =>
-        result.+=(RowFactory.create(columnName + "="))
+        result.+=(RowFactory.create(columnName + " = "))
     }
-    result.result()
+    val rows = result.result()
+    rows
   }
 
-  def partitionInfoOutput: Seq[Attribute] = {
-    Seq(
-      AttributeReference("partition", StringType, nullable = false,
-        new MetadataBuilder().putString("comment", "partitions info").build())()
-    )
-  }
+  def partitionInfoOutput: Seq[Attribute] = Seq(
+    AttributeReference("Partition(Id, DESC)", StringType, false,
+      new MetadataBuilder().putString("comment", "partition").build())()
+  )
 
   /**
    * Method to clear the memory for a task

http://git-wip-us.apache.org/repos/asf/carbondata/blob/c8355b5d/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 2e5812c..ee0b8a6 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -85,15 +85,15 @@ private[sql] case class ShowCarbonPartitionsCommand(
       .lookupRelation(tableIdentifier)(sparkSession).
       asInstanceOf[CarbonRelation]
     val carbonTable = relation.tableMeta.carbonTable
-    var tableName = carbonTable.getFactTableName
-    var partitionInfo = carbonTable.getPartitionInfo(
+    val tableName = carbonTable.getFactTableName
+    val partitionInfo = carbonTable.getPartitionInfo(
       carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier.getTableName)
     if (partitionInfo == null) {
       throw new AnalysisException(
         s"SHOW PARTITIONS is not allowed on a table that is not partitioned: $tableName")
     }
-    var partitionType = partitionInfo.getPartitionType
-    var columnName = partitionInfo.getColumnSchemaList.get(0).getColumnName
+    val partitionType = partitionInfo.getPartitionType
+    val columnName = partitionInfo.getColumnSchemaList.get(0).getColumnName
     LOGGER.info("partition column name:" + columnName)
     CommonUtil.getPartitionInfo(columnName, partitionType, partitionInfo)
   }