You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@carbondata.apache.org by GitBox <gi...@apache.org> on 2021/04/21 14:43:16 UTC

[GitHub] [carbondata] Indhumathi27 commented on a change in pull request #4121: [CARBONDATA-4170] Support dropping of parent complex columns(array/struct)

Indhumathi27 commented on a change in pull request #4121:
URL: https://github.com/apache/carbondata/pull/4121#discussion_r617595919



##########
File path: integration/spark/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala
##########
@@ -147,19 +143,28 @@ private[sql] case class CarbonAlterTableDropColumnCommand(
         metastore.getThriftTableInfo(carbonTable)
       // maintain the deleted columns for schema evolution history
       var deletedColumnSchema = ListBuffer[org.apache.carbondata.format.ColumnSchema]()
+      var deletedParentColumns = ListBuffer[org.apache.carbondata.format.ColumnSchema]()
       val columnSchemaList = tableInfo.fact_table.table_columns.asScala
       alterTableDropColumnModel.columns.foreach { column =>
         columnSchemaList.foreach { columnSchema =>
-          if (!columnSchema.invisible && column.equalsIgnoreCase(columnSchema.column_name)) {
-            deletedColumnSchema += columnSchema.deepCopy
-            columnSchema.invisible = true
+          if (!columnSchema.invisible) {
+            if (column.equalsIgnoreCase(columnSchema.column_name)) {
+              deletedParentColumns += columnSchema.deepCopy

Review comment:
       please reuse column schema copy object

##########
File path: integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/DropColumnTestCases.scala
##########
@@ -97,13 +102,127 @@ class DropColumnTestCases extends QueryTest with BeforeAndAfterAll {
     test_drop_and_compaction()
   }
 
-  test("test dropping of complex column should throw exception") {
-    sql("drop table if exists maintbl")
-    sql("create table maintbl (a string, b string, c struct<si:int>) STORED AS carbondata")
-    assert(intercept[ProcessMetaDataException] {
-      sql("alter table maintbl drop columns(b,c)").collect()
-    }.getMessage.contains("Complex column cannot be dropped"))
-    sql("drop table if exists maintbl")
+  def checkSchemaSize(): Unit = {
+    val schema = sql("describe alter_com").collect()
+    assert(schema.size == 1)
+  }
+
+  def droppedColumnsInSchemaEvolutionEntry(tableName: String): Seq[ColumnSchema] = {
+    val carbonTable = CarbonEnv.getCarbonTable(None, tableName)(sqlContext.sparkSession)
+    val schemaEvolutionList = carbonTable.getTableInfo
+      .getFactTable
+      .getSchemaEvolution()
+      .getSchemaEvolutionEntryList()
+    var droppedColumns = Seq[ColumnSchema]()
+    for (i <- 0 until schemaEvolutionList.size()) {
+      droppedColumns ++=
+      JavaConverters
+        .asScalaIteratorConverter(schemaEvolutionList.get(i).getRemoved.iterator())
+        .asScala
+        .toSeq
+    }
+    droppedColumns
+  }
+
+  test("test dropping of array of all primitive types") {
+    import scala.collection.mutable.WrappedArray.make
+    sql("DROP TABLE IF EXISTS alter_com")
+    sql("CREATE TABLE alter_com(intfield int, arr1 array<short>, arr2 array<int>, arr3 " +
+        "array<long>, arr4 array<double>, arr5 array<decimal(8,2)>, arr6 array<string>, arr7 " +
+        "array<char(5)>, arr8 array<varchar(50)>, arr9 array<boolean>, arr10 " +
+        "array<date>, arr11 array<timestamp>) STORED AS carbondata")
+    sql(
+      "insert into alter_com values(1,array(1,5),array(1,2),array(1,2,3),array(1.2d,2.3d),array" +
+      "(4.5,6.7),array('hello','world'),array('a','bcd'),array('abcd','efg'),array(true,false)," +
+      "array('2017-02-01','2018-09-11'),array('2017-02-01 00:01:00','2018-02-01 02:21:00') )")
+    sql("ALTER TABLE alter_com DROP COLUMNS(arr1,arr2,arr3,arr4,arr5,arr6) ")
+    sql("ALTER TABLE alter_com DROP COLUMNS(arr7,arr8,arr9) ")
+    sql("ALTER TABLE alter_com DROP COLUMNS(arr10,arr11) ")

Review comment:
       please add a check for drop duplicate complex column

##########
File path: integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/DropColumnTestCases.scala
##########
@@ -97,13 +102,127 @@ class DropColumnTestCases extends QueryTest with BeforeAndAfterAll {
     test_drop_and_compaction()
   }
 
-  test("test dropping of complex column should throw exception") {
-    sql("drop table if exists maintbl")
-    sql("create table maintbl (a string, b string, c struct<si:int>) STORED AS carbondata")
-    assert(intercept[ProcessMetaDataException] {
-      sql("alter table maintbl drop columns(b,c)").collect()
-    }.getMessage.contains("Complex column cannot be dropped"))
-    sql("drop table if exists maintbl")
+  def checkSchemaSize(): Unit = {
+    val schema = sql("describe alter_com").collect()
+    assert(schema.size == 1)
+  }
+
+  def droppedColumnsInSchemaEvolutionEntry(tableName: String): Seq[ColumnSchema] = {
+    val carbonTable = CarbonEnv.getCarbonTable(None, tableName)(sqlContext.sparkSession)
+    val schemaEvolutionList = carbonTable.getTableInfo
+      .getFactTable
+      .getSchemaEvolution()
+      .getSchemaEvolutionEntryList()
+    var droppedColumns = Seq[ColumnSchema]()
+    for (i <- 0 until schemaEvolutionList.size()) {
+      droppedColumns ++=
+      JavaConverters
+        .asScalaIteratorConverter(schemaEvolutionList.get(i).getRemoved.iterator())
+        .asScala
+        .toSeq
+    }
+    droppedColumns
+  }
+
+  test("test dropping of array of all primitive types") {

Review comment:
       please add testcases for drop Map complex column also

##########
File path: integration/spark/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala
##########
@@ -147,19 +143,28 @@ private[sql] case class CarbonAlterTableDropColumnCommand(
         metastore.getThriftTableInfo(carbonTable)
       // maintain the deleted columns for schema evolution history
       var deletedColumnSchema = ListBuffer[org.apache.carbondata.format.ColumnSchema]()
+      var deletedParentColumns = ListBuffer[org.apache.carbondata.format.ColumnSchema]()
       val columnSchemaList = tableInfo.fact_table.table_columns.asScala
       alterTableDropColumnModel.columns.foreach { column =>
         columnSchemaList.foreach { columnSchema =>
-          if (!columnSchema.invisible && column.equalsIgnoreCase(columnSchema.column_name)) {
-            deletedColumnSchema += columnSchema.deepCopy
-            columnSchema.invisible = true
+          if (!columnSchema.invisible) {
+            if (column.equalsIgnoreCase(columnSchema.column_name)) {
+              deletedParentColumns += columnSchema.deepCopy
+              deletedColumnSchema += columnSchema.deepCopy
+              columnSchema.invisible = true
+            }
+            // complex children are prefixed with -> parent_name + '.'
+            if (columnSchema.column_name.contains(CarbonCommonConstants.POINT)) {

Review comment:
       please validate if starts with parentname. and add a testcase with more than one complex column in table and drop any complex column

##########
File path: integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/DropColumnTestCases.scala
##########
@@ -97,13 +102,127 @@ class DropColumnTestCases extends QueryTest with BeforeAndAfterAll {
     test_drop_and_compaction()
   }
 
-  test("test dropping of complex column should throw exception") {
-    sql("drop table if exists maintbl")
-    sql("create table maintbl (a string, b string, c struct<si:int>) STORED AS carbondata")
-    assert(intercept[ProcessMetaDataException] {
-      sql("alter table maintbl drop columns(b,c)").collect()
-    }.getMessage.contains("Complex column cannot be dropped"))
-    sql("drop table if exists maintbl")
+  def checkSchemaSize(): Unit = {
+    val schema = sql("describe alter_com").collect()
+    assert(schema.size == 1)
+  }
+
+  def droppedColumnsInSchemaEvolutionEntry(tableName: String): Seq[ColumnSchema] = {
+    val carbonTable = CarbonEnv.getCarbonTable(None, tableName)(sqlContext.sparkSession)
+    val schemaEvolutionList = carbonTable.getTableInfo
+      .getFactTable
+      .getSchemaEvolution()
+      .getSchemaEvolutionEntryList()
+    var droppedColumns = Seq[ColumnSchema]()
+    for (i <- 0 until schemaEvolutionList.size()) {
+      droppedColumns ++=
+      JavaConverters
+        .asScalaIteratorConverter(schemaEvolutionList.get(i).getRemoved.iterator())
+        .asScala
+        .toSeq
+    }
+    droppedColumns
+  }
+
+  test("test dropping of array of all primitive types") {
+    import scala.collection.mutable.WrappedArray.make
+    sql("DROP TABLE IF EXISTS alter_com")
+    sql("CREATE TABLE alter_com(intfield int, arr1 array<short>, arr2 array<int>, arr3 " +
+        "array<long>, arr4 array<double>, arr5 array<decimal(8,2)>, arr6 array<string>, arr7 " +
+        "array<char(5)>, arr8 array<varchar(50)>, arr9 array<boolean>, arr10 " +
+        "array<date>, arr11 array<timestamp>) STORED AS carbondata")
+    sql(
+      "insert into alter_com values(1,array(1,5),array(1,2),array(1,2,3),array(1.2d,2.3d),array" +
+      "(4.5,6.7),array('hello','world'),array('a','bcd'),array('abcd','efg'),array(true,false)," +
+      "array('2017-02-01','2018-09-11'),array('2017-02-01 00:01:00','2018-02-01 02:21:00') )")
+    sql("ALTER TABLE alter_com DROP COLUMNS(arr1,arr2,arr3,arr4,arr5,arr6) ")
+    sql("ALTER TABLE alter_com DROP COLUMNS(arr7,arr8,arr9) ")
+    sql("ALTER TABLE alter_com DROP COLUMNS(arr10,arr11) ")
+
+    checkSchemaSize
+    checkAnswer(sql("select * from alter_com"), Seq(Row(1)))
+    val droppedColumns = droppedColumnsInSchemaEvolutionEntry("alter_com")
+    assert(droppedColumns.size == 11)
+    // check adding columns with same names again
+    sql(
+      "ALTER TABLE alter_com ADD COLUMNS(arr1 array<short>, arr2 array<int>, arr3 " +
+      "array<long>, arr4 array<double>, arr5 array<decimal(8,2)>, arr6 array<string>, arr7 " +
+      "array<char(5)>, arr8 array<varchar(50)>, arr9 array<boolean>, arr10 array<date>, arr11 " +
+      "array<timestamp> )")
+    val columns = sql("desc table alter_com").collect()
+    assert(columns.size == 12)
+    sql(
+      "insert into alter_com values(2,array(2,5),array(2,2),array(2,2,3),array(2.2d,2.3d),array" +
+      "(2.5,6.7),array('hello2','world'),array('a2','bcd'),array('abcd2','efg'),array(true,false)" +
+      ",array('2017-02-01','2018-09-11'),array('2017-02-01 00:01:00','2018-02-01 02:21:00') )")
+    checkAnswer(sql(
+      "select * from alter_com"),
+      Seq(Row(1, null, null, null, null, null, null, null, null, null, null, null), Row(2,
+        make(Array(2, 5)),
+        make(Array(2, 2)),
+        make(Array(2, 2, 3)),
+        make(Array(2.2, 2.3)),
+        make(Array(java.math.BigDecimal.valueOf(2.5).setScale(2),
+          java.math.BigDecimal.valueOf(6.7).setScale(2))),
+        make(Array("hello2", "world")),
+        make(Array("a2", "bcd")),
+        make(Array("abcd2", "efg")),
+        make(Array(true, false)),
+        make(Array(Date.valueOf("2017-02-01"),
+          Date.valueOf("2018-09-11"))),
+        make(Array(Timestamp.valueOf("2017-02-01 00:01:00"),
+          Timestamp.valueOf("2018-02-01 02:21:00")))
+      )))
+  }
+
+  test("test dropping of struct of all primitive types") {
+    sql("DROP TABLE IF EXISTS alter_com")
+    sql("CREATE TABLE alter_com(intField INT,struct1 struct<a:short,b:int,c:long,d:double," +
+        "e:decimal(8,2),f:string,g:char(5),h:varchar(50),i:boolean,j:date,k:timestamp>) " +
+        "STORED AS carbondata")
+    sql("insert into alter_com values(1, named_struct('a',1,'b',2,'c',3,'d',1.23,'e',2.34,'f'," +
+        "'hello','g','abc','h','def','i',true,'j','2017-02-01','k','2018-02-01 02:00:00.0') ) ")
+    sql("ALTER TABLE alter_com DROP COLUMNS(struct1) ")
+    var columns = sql("desc table alter_com").collect()

Review comment:
       please reuse checkschema method




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org