You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by gv...@apache.org on 2016/08/31 16:37:35 UTC

[1/2] incubator-carbondata git commit: Problem: When the number of columns in csv data file while parsing a row are more than the number of columns in schema, the parser throws array index of bound exception

Repository: incubator-carbondata
Updated Branches:
  refs/heads/master 1a0d2a672 -> eb12b8517


Problem: When the number of columns in csv data file while parsing a row are more than the number of columns in schema, the parser throws array index of bound exception

Reason: Max number of columns in CSVParserSettings are set equivalent to the number of columns in schema with an addition of 10. if still the number of columns while parsing are more then the univocity parser throws array index of bound exception.

Solution: Configure a higher value for max number of columns and take the max of number of columns in schema and default max columns value while setting in univocity parser settings.

Impact: Data load flow


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/11782a38
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/11782a38
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/11782a38

Branch: refs/heads/master
Commit: 11782a3864e92554d2c73b2c7149c8c29efc90c0
Parents: 1a0d2a6
Author: manishgupt88 <to...@gmail.com>
Authored: Wed Aug 31 11:48:53 2016 +0530
Committer: Venkata Ramana G <ra...@huawei.com>
Committed: Wed Aug 31 21:58:10 2016 +0530

----------------------------------------------------------------------
 .../carbondata/spark/load/CarbonLoadModel.java  | 20 ++++++
 .../carbondata/spark/load/CarbonLoaderUtil.java |  1 +
 .../org/apache/spark/sql/CarbonSqlParser.scala  | 13 +++-
 .../execution/command/carbonTableSchema.scala   |  3 +-
 .../src/test/resources/character_carbon.csv     | 33 +++++++++
 .../spark/src/test/resources/character_hive.csv | 32 +++++++++
 .../TestDataLoadWithColumnsMoreThanSchema.scala | 75 ++++++++++++++++++++
 .../api/dataloader/DataLoadModel.java           | 16 +++++
 .../processing/csvreaderstep/CsvInput.java      |  4 ++
 .../processing/csvreaderstep/CsvInputMeta.java  | 15 ++++
 .../csvreaderstep/UnivocityCsvParser.java       | 27 ++++++-
 .../csvreaderstep/UnivocityCsvParserVo.java     | 19 +++++
 .../graphgenerator/GraphGenerator.java          |  6 ++
 13 files changed, 261 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/integration/spark/src/main/java/org/apache/carbondata/spark/load/CarbonLoadModel.java
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/java/org/apache/carbondata/spark/load/CarbonLoadModel.java b/integration/spark/src/main/java/org/apache/carbondata/spark/load/CarbonLoadModel.java
index 283299b..c1a073d 100644
--- a/integration/spark/src/main/java/org/apache/carbondata/spark/load/CarbonLoadModel.java
+++ b/integration/spark/src/main/java/org/apache/carbondata/spark/load/CarbonLoadModel.java
@@ -100,6 +100,10 @@ public class CarbonLoadModel implements Serializable {
    * defines the string that should be treated as null while loadind data
    */
   private String serializationNullFormat;
+  /**
+   * Max number of columns that needs to be parsed by univocity parser
+   */
+  private String maxColumns;
 
   /**
    * get escape char
@@ -318,6 +322,7 @@ public class CarbonLoadModel implements Serializable {
     copy.segmentId = segmentId;
     copy.serializationNullFormat = serializationNullFormat;
     copy.escapeChar = escapeChar;
+    copy.maxColumns = maxColumns;
     return copy;
   }
 
@@ -356,6 +361,7 @@ public class CarbonLoadModel implements Serializable {
     copyObj.segmentId = segmentId;
     copyObj.serializationNullFormat = serializationNullFormat;
     copyObj.escapeChar = escapeChar;
+    copyObj.maxColumns = maxColumns;
     return copyObj;
   }
 
@@ -524,4 +530,18 @@ public class CarbonLoadModel implements Serializable {
   public void setSerializationNullFormat(String serializationNullFormat) {
     this.serializationNullFormat = serializationNullFormat;
   }
+
+  /**
+   * @return
+   */
+  public String getMaxColumns() {
+    return maxColumns;
+  }
+
+  /**
+   * @param maxColumns
+   */
+  public void setMaxColumns(String maxColumns) {
+    this.maxColumns = maxColumns;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/integration/spark/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java b/integration/spark/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
index 48f2805..54c8f5e 100644
--- a/integration/spark/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
+++ b/integration/spark/src/main/java/org/apache/carbondata/spark/load/CarbonLoaderUtil.java
@@ -141,6 +141,7 @@ public final class CarbonLoaderUtil {
     model.setEscapeCharacter(schmaModel.getEscapeCharacter());
     model.setTaskNo(loadModel.getTaskNo());
     model.setFactTimeStamp(loadModel.getFactTimeStamp());
+    model.setMaxColumns(loadModel.getMaxColumns());
     boolean hdfsReadMode =
         schmaModel.getCsvFilePath() != null && schmaModel.getCsvFilePath().startsWith("hdfs:");
     int allocate = null != schmaModel.getCsvFilePath() ? 1 : schmaModel.getFilesToProcess().size();

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
index 6556ba2..3bc5f5c 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
@@ -997,7 +997,7 @@ class CarbonSqlParser()
     val supportedOptions = Seq("DELIMITER", "QUOTECHAR", "FILEHEADER", "ESCAPECHAR", "MULTILINE",
       "COMPLEX_DELIMITER_LEVEL_1", "COMPLEX_DELIMITER_LEVEL_2", "COLUMNDICT",
       "SERIALIZATION_NULL_FORMAT",
-      "ALL_DICTIONARY_PATH"
+      "ALL_DICTIONARY_PATH", "MAXCOLUMNS"
     )
     var isSupported = true
     val invalidOptions = StringBuilder.newBuilder
@@ -1022,6 +1022,17 @@ class CarbonSqlParser()
       throw new MalformedCarbonCommandException(errorMessage)
     }
 
+    if (options.exists(_._1.equalsIgnoreCase("MAXCOLUMNS"))) {
+      val maxColumns: String = options.get("maxcolumns").get(0)._2
+      try {
+        maxColumns.toInt
+      } catch {
+        case ex: NumberFormatException =>
+          throw new MalformedCarbonCommandException(
+            "option MAXCOLUMNS can only contain integer values")
+      }
+    }
+
     // check for duplicate options
     val duplicateOptions = options filter {
       case (_, optionlist) => optionlist.size > 1

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index 486d54c..a0a9c99 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -1103,7 +1103,8 @@ private[sql] case class LoadTable(
             "load DDL which you set can only be 'true' or 'false', please check your input DDL."
           throw new MalformedCarbonCommandException(errorMessage)
       }
-
+      val maxColumns = partionValues.getOrElse("maxcolumns", null)
+      carbonLoadModel.setMaxColumns(maxColumns)
       carbonLoadModel.setEscapeChar(escapeChar)
       carbonLoadModel.setSerializationNullFormat("serialization_null_format" + "," +
         serializationNullFormat)

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/integration/spark/src/test/resources/character_carbon.csv
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/resources/character_carbon.csv b/integration/spark/src/test/resources/character_carbon.csv
new file mode 100644
index 0000000..2ea7b6c
--- /dev/null
+++ b/integration/spark/src/test/resources/character_carbon.csv
@@ -0,0 +1,33 @@
+imei,age,task,num,level,productdate,mark,name
+\u4f60~babu~520,\u4f60~babu~520,\u4f60~babu~520,\u4f60~babu~520,\u4f60~babu~520,\u4f60~babu~520,1,\u4f60~babu~520
+\u4f60`babu`520,\u4f60`babu`520,\u4f60`babu`520,\u4f60`babu`520,\u4f60`babu`520,\u4f60`babu`520,2,\u4f60`babu`520
+\u4f60!babu!520,\u4f60!babu!520,\u4f60!babu!520,\u4f60!babu!520,\u4f60!babu!520,\u4f60!babu!520,3,\u4f60!babu!520
+\u4f60@babu@520,\u4f60@babu@520,\u4f60@babu@520,\u4f60@babu@520,\u4f60@babu@520,\u4f60@babu@520,4,\u4f60@babu@520
+\u4f60#babu#520,\u4f60#babu#520,\u4f60#babu#520,\u4f60#babu#520,\u4f60#babu#520,\u4f60#babu#520,5,\u4f60#babu#520
+\u4f60$babu$520,\u4f60$babu$520,\u4f60$babu$520,\u4f60$babu$520,\u4f60$babu$520,\u4f60$babu$520,6,\u4f60$babu$520
+\u4f60%babu%520,\u4f60%babu%520,\u4f60%babu%520,\u4f60%babu%520,\u4f60%babu%520,\u4f60%babu%520,7,\u4f60%babu%520
+\u4f60^babu^520,\u4f60^babu^520,\u4f60^babu^520,\u4f60^babu^520,\u4f60^babu^520,\u4f60^babu^520,8,\u4f60^babu^520
+\u4f60&babu&520,\u4f60&babu&520,\u4f60&babu&520,\u4f60&babu&520,\u4f60&babu&520,\u4f60&babu&520,9,\u4f60&babu&520
+\u4f60*babu*520,\u4f60*babu*520,\u4f60*babu*520,\u4f60*babu*520,\u4f60*babu*520,\u4f60*babu*520,10,\u4f60*babu*520
+\u4f60(babu(520,\u4f60(babu(520,\u4f60(babu(520,\u4f60(babu(520,\u4f60(babu(520,\u4f60(babu(520,11,\u4f60(babu(520
+\u4f60)babu)520,\u4f60)babu)520,\u4f60)babu)520,\u4f60)babu)520,\u4f60)babu)520,\u4f60)babu)520,12,\u4f60)babu)520
+\u4f60-babu-520,\u4f60-babu-520,\u4f60-babu-520,\u4f60-babu-520,\u4f60-babu-520,\u4f60-babu-520,13,\u4f60-babu-520
+\u4f60_babu_520,\u4f60_babu_520,\u4f60_babu_520,\u4f60_babu_520,\u4f60_babu_520,\u4f60_babu_520,14,\u4f60_babu_520
+\u4f60=babu=520,\u4f60=babu=520,\u4f60=babu=520,\u4f60=babu=520,\u4f60=babu=520,\u4f60=babu=520,15,\u4f60=babu=520
+\u4f60+babu+520,\u4f60+babu+520,\u4f60+babu+520,\u4f60+babu+520,\u4f60+babu+520,\u4f60+babu+520,16,\u4f60+babu+520
+\u4f60{babu{520,\u4f60{babu{520,\u4f60{babu{520,\u4f60{babu{520,\u4f60{babu{520,\u4f60{babu{520,17,\u4f60{babu{520
+\u4f60}babu}520,\u4f60}babu}520,\u4f60}babu}520,\u4f60}babu}520,\u4f60}babu}520,\u4f60}babu}520,18,\u4f60}babu}520
+\u4f60[babu[520,\u4f60[babu[520,\u4f60[babu[520,\u4f60[babu[520,\u4f60[babu[520,\u4f60[babu[520,19,\u4f60[babu[520
+\u4f60]babu]520,\u4f60]babu]520,\u4f60]babu]520,\u4f60]babu]520,\u4f60]babu]520,\u4f60]babu]520,20,\u4f60]babu]520
+\u4f60\babu\520,\u4f60\babu\520,\u4f60\babu\520,\u4f60\babu\520,\u4f60\babu\520,\u4f60\babu\520,21,\u4f60\babu\520
+\u4f60|babu|520,\u4f60|babu|520,\u4f60|babu|520,\u4f60|babu|520,\u4f60|babu|520,\u4f60|babu|520,22,\u4f60|babu|520
+\u4f60;babu;520,\u4f60;babu;520,\u4f60;babu;520,\u4f60;babu;520,\u4f60;babu;520,\u4f60;babu;520,23,\u4f60;babu;520
+\u4f60:babu:520,\u4f60:babu:520,\u4f60:babu:520,\u4f60:babu:520,\u4f60:babu:520,\u4f60:babu:520,24,\u4f60:babu:520
+\u4f60'babu'520,\u4f60'babu'520,\u4f60'babu'520,\u4f60'babu'520,\u4f60'babu'520,\u4f60'babu'520,25,\u4f60'babu'520
+\u4f60"babu"520,\u4f60"babu"520,\u4f60"babu"520,\u4f60"babu"520,\u4f60"babu"520,\u4f60"babu"520,26,\u4f60"babu"520
+\u4f60,babu,520,\u4f60,babu,520,\u4f60,babu,520,\u4f60,babu,520,\u4f60,babu,520,\u4f60,babu,520,27,\u4f60,babu,520
+\u4f60.babu.520,\u4f60.babu.520,\u4f60.babu.520,\u4f60.babu.520,\u4f60.babu.520,\u4f60.babu.520,28,\u4f60.babu.520
+\u4f60<babu<520,\u4f60<babu<520,\u4f60<babu<520,\u4f60<babu<520,\u4f60<babu<520,\u4f60<babu<520,29,\u4f60<babu<520
+\u4f60>babu>520,\u4f60>babu>520,\u4f60>babu>520,\u4f60>babu>520,\u4f60>babu>520,\u4f60>babu>520,30,\u4f60>babu>520
+\u4f60?babu?520,\u4f60?babu?520,\u4f60?babu?520,\u4f60?babu?520,\u4f60?babu?520,\u4f60?babu?520,31,\u4f60?babu?520
+\u4f60/babu/520,\u4f60/babu/520,\u4f60/babu/520,\u4f60/babu/520,\u4f60/babu/520,\u4f60/babu/520,32,\u4f60/babu/520
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/integration/spark/src/test/resources/character_hive.csv
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/resources/character_hive.csv b/integration/spark/src/test/resources/character_hive.csv
new file mode 100644
index 0000000..4362ca0
--- /dev/null
+++ b/integration/spark/src/test/resources/character_hive.csv
@@ -0,0 +1,32 @@
+\u4f60~babu~520,\u4f60~babu~520,\u4f60~babu~520,\u4f60~babu~520,\u4f60~babu~520,\u4f60~babu~520,1,\u4f60~babu~520
+\u4f60`babu`520,\u4f60`babu`520,\u4f60`babu`520,\u4f60`babu`520,\u4f60`babu`520,\u4f60`babu`520,2,\u4f60`babu`520
+\u4f60!babu!520,\u4f60!babu!520,\u4f60!babu!520,\u4f60!babu!520,\u4f60!babu!520,\u4f60!babu!520,3,\u4f60!babu!520
+\u4f60@babu@520,\u4f60@babu@520,\u4f60@babu@520,\u4f60@babu@520,\u4f60@babu@520,\u4f60@babu@520,4,\u4f60@babu@520
+\u4f60#babu#520,\u4f60#babu#520,\u4f60#babu#520,\u4f60#babu#520,\u4f60#babu#520,\u4f60#babu#520,5,\u4f60#babu#520
+\u4f60$babu$520,\u4f60$babu$520,\u4f60$babu$520,\u4f60$babu$520,\u4f60$babu$520,\u4f60$babu$520,6,\u4f60$babu$520
+\u4f60%babu%520,\u4f60%babu%520,\u4f60%babu%520,\u4f60%babu%520,\u4f60%babu%520,\u4f60%babu%520,7,\u4f60%babu%520
+\u4f60^babu^520,\u4f60^babu^520,\u4f60^babu^520,\u4f60^babu^520,\u4f60^babu^520,\u4f60^babu^520,8,\u4f60^babu^520
+\u4f60&babu&520,\u4f60&babu&520,\u4f60&babu&520,\u4f60&babu&520,\u4f60&babu&520,\u4f60&babu&520,9,\u4f60&babu&520
+\u4f60*babu*520,\u4f60*babu*520,\u4f60*babu*520,\u4f60*babu*520,\u4f60*babu*520,\u4f60*babu*520,10,\u4f60*babu*520
+\u4f60(babu(520,\u4f60(babu(520,\u4f60(babu(520,\u4f60(babu(520,\u4f60(babu(520,\u4f60(babu(520,11,\u4f60(babu(520
+\u4f60)babu)520,\u4f60)babu)520,\u4f60)babu)520,\u4f60)babu)520,\u4f60)babu)520,\u4f60)babu)520,12,\u4f60)babu)520
+\u4f60-babu-520,\u4f60-babu-520,\u4f60-babu-520,\u4f60-babu-520,\u4f60-babu-520,\u4f60-babu-520,13,\u4f60-babu-520
+\u4f60_babu_520,\u4f60_babu_520,\u4f60_babu_520,\u4f60_babu_520,\u4f60_babu_520,\u4f60_babu_520,14,\u4f60_babu_520
+\u4f60=babu=520,\u4f60=babu=520,\u4f60=babu=520,\u4f60=babu=520,\u4f60=babu=520,\u4f60=babu=520,15,\u4f60=babu=520
+\u4f60+babu+520,\u4f60+babu+520,\u4f60+babu+520,\u4f60+babu+520,\u4f60+babu+520,\u4f60+babu+520,16,\u4f60+babu+520
+\u4f60{babu{520,\u4f60{babu{520,\u4f60{babu{520,\u4f60{babu{520,\u4f60{babu{520,\u4f60{babu{520,17,\u4f60{babu{520
+\u4f60}babu}520,\u4f60}babu}520,\u4f60}babu}520,\u4f60}babu}520,\u4f60}babu}520,\u4f60}babu}520,18,\u4f60}babu}520
+\u4f60[babu[520,\u4f60[babu[520,\u4f60[babu[520,\u4f60[babu[520,\u4f60[babu[520,\u4f60[babu[520,19,\u4f60[babu[520
+\u4f60]babu]520,\u4f60]babu]520,\u4f60]babu]520,\u4f60]babu]520,\u4f60]babu]520,\u4f60]babu]520,20,\u4f60]babu]520
+\u4f60\babu\520,\u4f60\babu\520,\u4f60\babu\520,\u4f60\babu\520,\u4f60\babu\520,\u4f60\babu\520,21,\u4f60\babu\520
+\u4f60|babu|520,\u4f60|babu|520,\u4f60|babu|520,\u4f60|babu|520,\u4f60|babu|520,\u4f60|babu|520,22,\u4f60|babu|520
+\u4f60;babu;520,\u4f60;babu;520,\u4f60;babu;520,\u4f60;babu;520,\u4f60;babu;520,\u4f60;babu;520,23,\u4f60;babu;520
+\u4f60:babu:520,\u4f60:babu:520,\u4f60:babu:520,\u4f60:babu:520,\u4f60:babu:520,\u4f60:babu:520,24,\u4f60:babu:520
+\u4f60'babu'520,\u4f60'babu'520,\u4f60'babu'520,\u4f60'babu'520,\u4f60'babu'520,\u4f60'babu'520,25,\u4f60'babu'520
+\u4f60"babu"520,\u4f60"babu"520,\u4f60"babu"520,\u4f60"babu"520,\u4f60"babu"520,\u4f60"babu"520,26,\u4f60"babu"520
+\u4f60,babu,520,\u4f60,babu,520,\u4f60,babu,520,\u4f60,babu,520,\u4f60,babu,520,\u4f60,babu,520,27,\u4f60,babu,520
+\u4f60.babu.520,\u4f60.babu.520,\u4f60.babu.520,\u4f60.babu.520,\u4f60.babu.520,\u4f60.babu.520,28,\u4f60.babu.520
+\u4f60<babu<520,\u4f60<babu<520,\u4f60<babu<520,\u4f60<babu<520,\u4f60<babu<520,\u4f60<babu<520,29,\u4f60<babu<520
+\u4f60>babu>520,\u4f60>babu>520,\u4f60>babu>520,\u4f60>babu>520,\u4f60>babu>520,\u4f60>babu>520,30,\u4f60>babu>520
+\u4f60?babu?520,\u4f60?babu?520,\u4f60?babu?520,\u4f60?babu?520,\u4f60?babu?520,\u4f60?babu?520,31,\u4f60?babu?520
+\u4f60/babu/520,\u4f60/babu/520,\u4f60/babu/520,\u4f60/babu/520,\u4f60/babu/520,\u4f60/babu/520,32,\u4f60/babu/520
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala
new file mode 100644
index 0000000..4755a01
--- /dev/null
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.dataload
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.spark.sql.common.util.CarbonHiveContext._
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+ * This class will test data load in which number of columns in data are more than
+ * the number of columns in schema
+ */
+class TestDataLoadWithColumnsMoreThanSchema extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("DROP TABLE IF EXISTS char_test")
+    sql("DROP TABLE IF EXISTS hive_char_test")
+    sql("CREATE TABLE char_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
+    sql("CREATE TABLE hive_char_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)row format delimited fields terminated by ','")
+    sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table char_test")
+    sql("LOAD DATA local inpath './src/test/resources/character_hive.csv' INTO table hive_char_test")
+  }
+
+  test("test count(*) to check for data loss") {
+    checkAnswer(sql("select count(*) from char_test"),
+      sql("select count(*) from hive_char_test"))
+  }
+
+  test("test for invalid value of maxColumns") {
+    sql("DROP TABLE IF EXISTS max_columns_test")
+    sql("CREATE TABLE max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
+    try {
+      sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table max_columns_test options('MAXCOLUMNS'='avfgd')")
+      assert(false)
+    } catch {
+      case _ => assert(true)
+    }
+  }
+
+  test("test for valid value of maxColumns") {
+    sql("DROP TABLE IF EXISTS valid_max_columns_test")
+    sql("CREATE TABLE valid_max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
+    try {
+      sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table valid_max_columns_test options('MAXCOLUMNS'='400')")
+      checkAnswer(sql("select count(*) from valid_max_columns_test"),
+        sql("select count(*) from hive_char_test"))
+    } catch {
+      case _ => assert(false)
+    }
+  }
+
+  override def afterAll {
+    sql("DROP TABLE IF EXISTS char_test")
+    sql("DROP TABLE IF EXISTS hive_char_test")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/processing/src/main/java/org/apache/carbondata/processing/api/dataloader/DataLoadModel.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/api/dataloader/DataLoadModel.java b/processing/src/main/java/org/apache/carbondata/processing/api/dataloader/DataLoadModel.java
index 24812bc..239457b 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/api/dataloader/DataLoadModel.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/api/dataloader/DataLoadModel.java
@@ -60,6 +60,8 @@ public class DataLoadModel {
   private String factTimeStamp;
 
   private String escapeCharacter;
+
+  private String maxColumns;
   /**
    * @return Returns the schemaInfo.
    */
@@ -197,5 +199,19 @@ public class DataLoadModel {
   public void setEscapeCharacter(String escapeCharacter) {
     this.escapeCharacter = escapeCharacter;
   }
+
+  /**
+   * @return
+   */
+  public String getMaxColumns() {
+    return maxColumns;
+  }
+
+  /**
+   * @param maxColumns
+   */
+  public void setMaxColumns(String maxColumns) {
+    this.maxColumns = maxColumns;
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/CsvInput.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/CsvInput.java b/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/CsvInput.java
index 3ab8c7f..c01a682 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/CsvInput.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/CsvInput.java
@@ -454,6 +454,10 @@ public class CsvInput extends BaseStep implements StepInterface {
     csvParserVo.setNumberOfColumns(meta.getInputFields().length);
     csvParserVo.setEscapeCharacter(meta.getEscapeCharacter());
     csvParserVo.setHeaderPresent(meta.isHeaderPresent());
+    String maxColumns = meta.getMaxColumns();
+    if(null != maxColumns) {
+      csvParserVo.setMaxColumns(Integer.parseInt(maxColumns));
+    }
     return csvParserVo;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/CsvInputMeta.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/CsvInputMeta.java b/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/CsvInputMeta.java
index 2007f5e..b15862d 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/CsvInputMeta.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/CsvInputMeta.java
@@ -101,6 +101,8 @@ public class CsvInputMeta extends BaseStepMeta
 
   private String escapeCharacter;
 
+  private String maxColumns;
+
   public CsvInputMeta() {
     super(); // allocate BaseStepMeta
     allocate(0);
@@ -160,6 +162,7 @@ public class CsvInputMeta extends BaseStepMeta
       blocksID = XMLHandler.getTagValue(stepnode, "blocksID");
       partitionID = XMLHandler.getTagValue(stepnode, "partitionID");
       escapeCharacter = XMLHandler.getTagValue(stepnode, "escapeCharacter");
+      maxColumns = XMLHandler.getTagValue(stepnode, "maxColumns");
       Node fields = XMLHandler.getSubNode(stepnode, getXmlCode("FIELDS"));
       int nrfields = XMLHandler.countNodes(fields, getXmlCode("FIELD"));
 
@@ -224,6 +227,7 @@ public class CsvInputMeta extends BaseStepMeta
     retval.append("    ").append(XMLHandler.addTagValue("blocksID", blocksID));
     retval.append("    ").append(XMLHandler.addTagValue("partitionID", partitionID));
     retval.append("    ").append(XMLHandler.addTagValue("escapeCharacter", escapeCharacter));
+    retval.append("    ").append(XMLHandler.addTagValue("maxColumns", maxColumns));
     retval.append("    ").append(XMLHandler.openTag(getXmlCode("FIELDS"))).append(Const.CR);
     for (int i = 0; i < inputFields.length; i++) {
       TextFileInputField field = inputFields[i];
@@ -277,6 +281,7 @@ public class CsvInputMeta extends BaseStepMeta
       blocksID = rep.getStepAttributeString(idStep, getRepCode("blocksID"));
       partitionID = rep.getStepAttributeString(idStep, getRepCode("partitionID"));
       escapeCharacter = rep.getStepAttributeString(idStep, getRepCode("escapeCharacter"));
+      maxColumns = rep.getStepAttributeString(idStep, getRepCode("maxColumns"));
       int nrfields = rep.countNrStepAttributes(idStep, getRepCode("FIELD_NAME"));
 
       allocate(nrfields);
@@ -334,6 +339,8 @@ public class CsvInputMeta extends BaseStepMeta
       rep.saveStepAttribute(idTransformation, idStep, getRepCode("partitionID"), partitionID);
       rep.saveStepAttribute(idTransformation, idStep, getRepCode("escapeCharacter"),
           escapeCharacter);
+      rep.saveStepAttribute(idTransformation, idStep, getRepCode("maxColumns"),
+          maxColumns);
       for (int i = 0; i < inputFields.length; i++) {
         TextFileInputField field = inputFields[i];
 
@@ -934,4 +941,12 @@ public class CsvInputMeta extends BaseStepMeta
   public String getPartitionID() {
     return this.partitionID;
   }
+
+  public String getMaxColumns() {
+    return maxColumns;
+  }
+
+  public void setMaxColumns(String maxColumns) {
+    this.maxColumns = maxColumns;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/UnivocityCsvParser.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/UnivocityCsvParser.java b/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/UnivocityCsvParser.java
index 42866c1..630586a 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/UnivocityCsvParser.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/UnivocityCsvParser.java
@@ -41,6 +41,10 @@ import org.apache.hadoop.util.LineReader;
 public class UnivocityCsvParser {
 
   /**
+   * Max number of columns that will be parsed for a row by univocity parsing
+   */
+  private static final int DEFAULT_MAX_NUMBER_OF_COLUMNS_FOR_PARSING = 2000;
+  /**
    * reader for csv
    */
   private Reader inputStreamReader;
@@ -86,7 +90,8 @@ public class UnivocityCsvParser {
     CsvParserSettings parserSettings = new CsvParserSettings();
     parserSettings.getFormat().setDelimiter(csvParserVo.getDelimiter().charAt(0));
     parserSettings.setLineSeparatorDetectionEnabled(true);
-    parserSettings.setMaxColumns(csvParserVo.getNumberOfColumns() + 10);
+    parserSettings.setMaxColumns(
+        getMaxColumnsForParsing(csvParserVo.getNumberOfColumns(), csvParserVo.getMaxColumns()));
     parserSettings.setNullValue("");
     parserSettings.setIgnoreLeadingWhitespaces(false);
     parserSettings.setIgnoreTrailingWhitespaces(false);
@@ -104,6 +109,26 @@ public class UnivocityCsvParser {
   }
 
   /**
+   * This method will decide the number of columns to be parsed for a row by univocity parser
+   *
+   * @param columnCountInSchema total number of columns in schema
+   * @return
+   */
+  private int getMaxColumnsForParsing(int columnCountInSchema, int maxColumns) {
+    int maxNumberOfColumnsForParsing = DEFAULT_MAX_NUMBER_OF_COLUMNS_FOR_PARSING;
+    if (maxColumns > 0) {
+      if (columnCountInSchema > maxColumns) {
+        maxNumberOfColumnsForParsing = columnCountInSchema + 10;
+      } else {
+        maxNumberOfColumnsForParsing = maxColumns;
+      }
+    } else if (columnCountInSchema > DEFAULT_MAX_NUMBER_OF_COLUMNS_FOR_PARSING) {
+      maxNumberOfColumnsForParsing = columnCountInSchema + 10;
+    }
+    return maxNumberOfColumnsForParsing;
+  }
+
+  /**
    * Below method will be used to initialize the reader
    *
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/UnivocityCsvParserVo.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/UnivocityCsvParserVo.java b/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/UnivocityCsvParserVo.java
index 041bfd0..623cac3 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/UnivocityCsvParserVo.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/csvreaderstep/UnivocityCsvParserVo.java
@@ -71,6 +71,11 @@ public class UnivocityCsvParserVo {
   private String escapeCharacter;
 
   /**
+   * max number of columns configured by user to be parsed in a row
+   */
+  private int maxColumns;
+
+  /**
    * @return the delimiter
    */
   public String getDelimiter() {
@@ -181,4 +186,18 @@ public class UnivocityCsvParserVo {
   public void setEscapeCharacter(String escapeCharacter) {
     this.escapeCharacter = escapeCharacter;
   }
+
+  /**
+   * @return
+   */
+  public int getMaxColumns() {
+    return maxColumns;
+  }
+
+  /**
+   * @param maxColumns
+   */
+  public void setMaxColumns(int maxColumns) {
+    this.maxColumns = maxColumns;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/11782a38/processing/src/main/java/org/apache/carbondata/processing/graphgenerator/GraphGenerator.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/graphgenerator/GraphGenerator.java b/processing/src/main/java/org/apache/carbondata/processing/graphgenerator/GraphGenerator.java
index b5e1682..10ea710 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/graphgenerator/GraphGenerator.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/graphgenerator/GraphGenerator.java
@@ -199,6 +199,10 @@ public class GraphGenerator {
    * new load start time
    */
   private String factTimeStamp;
+  /**
+   * max number of columns configured by user to be parsed in a row
+   */
+  private String maxColumns;
 
   public GraphGenerator(DataLoadModel dataLoadModel, boolean isHDFSReadMode, String partitionID,
       String factStoreLocation, int currentRestructNum, int allocate,
@@ -221,6 +225,7 @@ public class GraphGenerator {
     this.factTimeStamp = dataLoadModel.getFactTimeStamp();
     this.segmentId = segmentId;
     this.escapeCharacter = dataLoadModel.getEscapeCharacter();
+    this.maxColumns = dataLoadModel.getMaxColumns();
     initialise();
     LOGGER.info("************* Is Columnar Storage" + isColumnar);
   }
@@ -436,6 +441,7 @@ public class GraphGenerator {
     csvInputMeta.setEncoding("UTF-8");
     csvInputMeta.setEnclosure("\"");
     csvInputMeta.setHeaderPresent(true);
+    csvInputMeta.setMaxColumns(maxColumns);
     csvInputMeta.setCurrentRestructNumber(graphConfiguration.getCurrentRestructNumber());
     StepMeta csvDataStep =
         new StepMeta(GraphGeneratorConstants.CSV_INPUT, (StepMetaInterface) csvInputMeta);


[2/2] incubator-carbondata git commit: [CARBONDATA-194] Support MAXCOLUMNS option while dataload. This closes #111

Posted by gv...@apache.org.
[CARBONDATA-194] Support MAXCOLUMNS option while dataload. This closes #111


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/eb12b851
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/eb12b851
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/eb12b851

Branch: refs/heads/master
Commit: eb12b85173188bf985167f959474c5fb461ada34
Parents: 1a0d2a6 11782a3
Author: Venkata Ramana G <ra...@huawei.com>
Authored: Wed Aug 31 22:06:45 2016 +0530
Committer: Venkata Ramana G <ra...@huawei.com>
Committed: Wed Aug 31 22:06:45 2016 +0530

----------------------------------------------------------------------
 .../carbondata/spark/load/CarbonLoadModel.java  | 20 ++++++
 .../carbondata/spark/load/CarbonLoaderUtil.java |  1 +
 .../org/apache/spark/sql/CarbonSqlParser.scala  | 13 +++-
 .../execution/command/carbonTableSchema.scala   |  3 +-
 .../src/test/resources/character_carbon.csv     | 33 +++++++++
 .../spark/src/test/resources/character_hive.csv | 32 +++++++++
 .../TestDataLoadWithColumnsMoreThanSchema.scala | 75 ++++++++++++++++++++
 .../api/dataloader/DataLoadModel.java           | 16 +++++
 .../processing/csvreaderstep/CsvInput.java      |  4 ++
 .../processing/csvreaderstep/CsvInputMeta.java  | 15 ++++
 .../csvreaderstep/UnivocityCsvParser.java       | 27 ++++++-
 .../csvreaderstep/UnivocityCsvParserVo.java     | 19 +++++
 .../graphgenerator/GraphGenerator.java          |  6 ++
 13 files changed, 261 insertions(+), 3 deletions(-)
----------------------------------------------------------------------