You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ma...@apache.org on 2017/12/28 09:16:52 UTC

carbondata git commit: [CARBONDATA-1939] Added show segments validation test case

Repository: carbondata
Updated Branches:
  refs/heads/master 7a36c1a8d -> 1937a21a2


[CARBONDATA-1939] Added show segments validation test case

1. Modified headers of show segments

2. Modified SDV test cases for validating headers and result

This closes #1723


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1937a21a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1937a21a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1937a21a

Branch: refs/heads/master
Commit: 1937a21a210c16c4a185a803515b6932d0bc0553
Parents: 7a36c1a
Author: dhatchayani <dh...@gmail.com>
Authored: Tue Dec 26 11:40:14 2017 +0530
Committer: manishgupta88 <to...@gmail.com>
Committed: Thu Dec 28 14:49:51 2017 +0530

----------------------------------------------------------------------
 .../sdv/generated/ShowLoadsTestCase.scala       | 29 +++++++++++++++++---
 .../management/CarbonShowLoadsCommand.scala     |  5 ++--
 2 files changed, 28 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1937a21a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/ShowLoadsTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/ShowLoadsTestCase.scala b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/ShowLoadsTestCase.scala
index 08be0b5..ec3b6fc 100644
--- a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/ShowLoadsTestCase.scala
+++ b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/ShowLoadsTestCase.scala
@@ -36,7 +36,21 @@ class ShowLoadsTestCase extends QueryTest with BeforeAndAfterAll {
   sql(s"""CREATE TABLE ShowSegment_196 (CUST_ID int,CUST_NAME String,ACTIVE_EMUI_VERSION string,DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),Double_COLUMN1 double,DECIMAL_COLUMN2 decimal(36,10), Double_COLUMN2 double,INTEGER_COLUMN1 int) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('table_blocksize'='1')""").collect
   sql(s"""LOAD DATA INPATH '$resourcesPath/Data/InsertData/join1.csv' into table ShowSegment_196 OPTIONS('DELIMITER'=',' , 'QUOTECHAR'='"','BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,Double_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN2,INTEGER_COLUMN1')""").collect
   sql(s"""LOAD DATA INPATH '$resourcesPath/Data/InsertData/join1.csv' into table ShowSegment_196 OPTIONS('DELIMITER'=',' , 'QUOTECHAR'='"','BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,Double_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN2,INTEGER_COLUMN1')""").collect
-  sql(s"""show segments for table ShowSegment_196""").collect()
+
+  val df = sql(s"""show segments for table ShowSegment_196""").collect()
+  // validating headers
+  val header = df(0).schema
+   assert(header(0).name.equalsIgnoreCase("SegmentSequenceId"))
+   assert(header(1).name.equalsIgnoreCase("Status"))
+   assert(header(2).name.equalsIgnoreCase("Load Start Time"))
+   assert(header(3).name.equalsIgnoreCase("Load End Time"))
+   assert(header(4).name.equalsIgnoreCase("Merged To"))
+   assert(header(5).name.equalsIgnoreCase("File Format"))
+   val col = df.map {
+     row => Row(row.getString(0), row.getString(1), row.getString(4))
+   }.toSeq
+   assert(col.equals(Seq(Row("1", "Success", "NA"),
+     Row("0", "Success", "NA"))))
     sql(s"""drop table ShowSegment_196""").collect
  }
 
@@ -46,7 +60,11 @@ class ShowLoadsTestCase extends QueryTest with BeforeAndAfterAll {
     sql(s"""drop TABLE if exists Database_ShowSegment_196""").collect
   sql(s"""CREATE TABLE Database_ShowSegment_196 (CUST_ID int,CUST_NAME String,ACTIVE_EMUI_VERSION string,DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),Double_COLUMN1 double,DECIMAL_COLUMN2 decimal(36,10), Double_COLUMN2 double,INTEGER_COLUMN1 int) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('table_blocksize'='1')""").collect
   sql(s"""LOAD DATA INPATH '$resourcesPath/Data/InsertData/join1.csv' into table Database_ShowSegment_196 OPTIONS('DELIMITER'=',' , 'QUOTECHAR'='"','BAD_RECORDS_ACTION'='FORCE','FILEHEADER'='CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,Double_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN2,INTEGER_COLUMN1')""").collect
-  sql(s"""show segments for table default.Database_ShowSegment_196""").collect()
+  val df = sql(s"""show segments for table default.Database_ShowSegment_196""").collect()
+   val col = df.map {
+     row => Row(row.getString(0), row.getString(1), row.getString(4))
+   }.toSeq
+   assert(col.equals(Seq(Row("0", "Success", "NA"))))
     sql(s"""drop table Database_ShowSegment_196""").collect
  }
 
@@ -55,8 +73,11 @@ class ShowLoadsTestCase extends QueryTest with BeforeAndAfterAll {
  test("DataLoadManagement001_830", Include) {
     sql(s"""drop TABLE if exists Case_ShowSegment_196""").collect
   sql(s"""CREATE TABLE Case_ShowSegment_196 (CUST_ID int,CUST_NAME String,ACTIVE_EMUI_VERSION string,DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),Double_COLUMN1 double,DECIMAL_COLUMN2 decimal(36,10), Double_COLUMN2 double,INTEGER_COLUMN1 int) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('table_blocksize'='1')""").collect
-   sql(s"""show segments for table CASE_ShowSegment_196""").collect
-
+   val df = sql(s"""show segments for table CASE_ShowSegment_196""").collect()
+   val col = df.map {
+     row => Row(row.getString(0), row.getString(1), row.getString(4))
+   }.toSeq
+   assert(col.equals(Seq()))
     sql(s"""drop table Case_ShowSegment_196""").collect
  }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1937a21a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala
index e0311cb..f8f215f 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowLoadsCommand.scala
@@ -30,11 +30,12 @@ case class CarbonShowLoadsCommand(
     limit: Option[String])
   extends DataCommand {
 
+  // add new columns of show segments at last
   override def output: Seq[Attribute] = {
     Seq(AttributeReference("SegmentSequenceId", StringType, nullable = false)(),
       AttributeReference("Status", StringType, nullable = false)(),
-      AttributeReference("Load Start Time (GMT+0)", TimestampType, nullable = false)(),
-      AttributeReference("Load End Time (GMT+0)", TimestampType, nullable = true)(),
+      AttributeReference("Load Start Time", TimestampType, nullable = false)(),
+      AttributeReference("Load End Time", TimestampType, nullable = true)(),
       AttributeReference("Merged To", StringType, nullable = false)(),
       AttributeReference("File Format", StringType, nullable = false)())
   }