You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ra...@apache.org on 2017/11/27 06:00:39 UTC

carbondata git commit: [CARBONDATA-1803] Changing format of Show segments

Repository: carbondata
Updated Branches:
  refs/heads/master ed8236781 -> 373342d0e


[CARBONDATA-1803] Changing format of Show segments

Changing the show segments format for backward compatibility.
(1) File Format is a new option previously added in between, so moved the same to last
(2) Segment Id is again changed back to SegmentSequenceId

This closes #1558


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/373342d0
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/373342d0
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/373342d0

Branch: refs/heads/master
Commit: 373342d0e7186b0aba8b224a374cb39a245d2917
Parents: ed82367
Author: dhatchayani <dh...@gmail.com>
Authored: Fri Nov 24 11:26:10 2017 +0530
Committer: Ravindra Pesala <ra...@gmail.com>
Committed: Sun Nov 26 22:27:34 2017 +0530

----------------------------------------------------------------------
 .../src/main/scala/org/apache/carbondata/api/CarbonStore.scala | 4 ++--
 .../apache/carbondata/spark/util/GlobalDictionaryUtil.scala    | 2 +-
 .../scala/org/apache/spark/sql/CarbonCatalystOperators.scala   | 6 +++---
 .../spark/testsuite/segmentreading/TestSegmentReading.scala    | 2 +-
 .../apache/spark/carbondata/TestStreamingTableOperation.scala  | 2 +-
 5 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/373342d0/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
index 73325a6..44cbb50 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
@@ -92,8 +92,8 @@ object CarbonStore {
             load.getSegmentStatus.getMessage,
             startTime,
             endTime,
-            load.getFileFormat.toString,
-            mergedTo)
+            mergedTo,
+            load.getFileFormat.toString)
         }.toSeq
     } else {
       Seq.empty

http://git-wip-us.apache.org/repos/asf/carbondata/blob/373342d0/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
index d8038d5..f6170e8 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
@@ -374,7 +374,7 @@ object GlobalDictionaryUtil {
       classOf[CSVInputFormat],
       classOf[NullWritable],
       classOf[StringArrayWritable],
-      hadoopConf).setName("global dictionary").map[Row] { currentRow =>
+      jobConf).setName("global dictionary").map[Row] { currentRow =>
       row.setValues(currentRow._2.get())
     }
     sqlContext.createDataFrame(rdd, schema)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/373342d0/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
index ee285e7..e02df9a 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
@@ -122,12 +122,12 @@ case class ShowLoadsCommand(
   extends Command {
 
   override def output: Seq[Attribute] = {
-    Seq(AttributeReference("Segment Id", StringType, nullable = false)(),
+    Seq(AttributeReference("SegmentSequenceId", StringType, nullable = false)(),
       AttributeReference("Status", StringType, nullable = false)(),
       AttributeReference("Load Start Time", TimestampType, nullable = false)(),
       AttributeReference("Load End Time", TimestampType, nullable = true)(),
-      AttributeReference("File Format", StringType, nullable = false)(),
-      AttributeReference("Merged To", StringType, nullable = false)())
+      AttributeReference("Merged To", StringType, nullable = false)(),
+      AttributeReference("File Format", StringType, nullable = false)())
   }
 }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/373342d0/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/segmentreading/TestSegmentReading.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/segmentreading/TestSegmentReading.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/segmentreading/TestSegmentReading.scala
index 9e12f43..19201e3 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/segmentreading/TestSegmentReading.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/segmentreading/TestSegmentReading.scala
@@ -248,7 +248,7 @@ class TestSegmentReading extends QueryTest with BeforeAndAfterAll {
             |('DELIMITER'= ',', 'QUOTECHAR'= '\"')""".stripMargin)
       val df = sql("SHOW SEGMENTS for table carbon_table_show_seg")
       val col = df.collect().map{
-        row => Row(row.getString(0),row.getString(1),row.getString(5))
+        row => Row(row.getString(0),row.getString(1),row.getString(4))
       }.toSeq
       assert(col.equals(Seq(Row("2","Success","NA"),
         Row("1","Compacted","0.1"),

http://git-wip-us.apache.org/repos/asf/carbondata/blob/373342d0/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOperation.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOperation.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOperation.scala
index 7cbec04..d9591c4 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOperation.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOperation.scala
@@ -570,7 +570,7 @@ class TestStreamingTableOperation extends QueryTest with BeforeAndAfterAll {
     result.foreach { row =>
       if (row.getString(0).equals("1")) {
         assertResult(SegmentStatus.STREAMING.getMessage)(row.getString(1))
-        assertResult(FileFormat.ROW_V1.toString)(row.getString(4))
+        assertResult(FileFormat.ROW_V1.toString)(row.getString(5))
       }
     }
   }